Skip to content

Commit

Permalink
feat: add complex/float32/ctor
Browse files Browse the repository at this point in the history
This commit copies the constructor implementation for `complex/float32`
to a new package for the purposes of eventually creating a complex
sub-namespace for single-precision complex numbers.

Ref: #2260
  • Loading branch information
kgryte committed May 25, 2024
1 parent be3061e commit f4cdf4d
Show file tree
Hide file tree
Showing 20 changed files with 2,338 additions and 0 deletions.
538 changes: 538 additions & 0 deletions lib/node_modules/@stdlib/complex/float32/ctor/README.md

Large diffs are not rendered by default.

136 changes: 136 additions & 0 deletions lib/node_modules/@stdlib/complex/float32/ctor/benchmark/benchmark.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

'use strict';

// MODULES //

var bench = require( '@stdlib/bench' );
var randu = require( '@stdlib/random/base/randu' );
var isnan = require( '@stdlib/math/base/assert/is-nan' );
var pkg = require( './../package.json' ).name;
var Complex64 = require( './../lib' );


// MAIN //

bench( pkg, function benchmark( b ) {
var z;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
z = new Complex64( i, i );
if ( isnan( z ) ) {
b.fail( 'should not return NaN' );
}
}
b.toc();
if ( !( z instanceof Complex64 ) ) {
b.fail( 'should return a complex number' );
}
b.pass( 'benchmark finished' );
b.end();
});

bench( pkg+'::get:real', function benchmark( b ) {
var re;
var z;
var i;

z = new Complex64( randu(), randu() );

b.tic();
for ( i = 0; i < b.iterations; i++ ) {
re = z.re;
if ( isnan( re ) ) {
b.fail( 'should not return NaN' );
}
}
b.toc();
if ( isnan( re ) ) {
b.fail( 'should not return NaN' );
}
b.pass( 'benchmark finished' );
b.end();
});

bench( pkg+'::get:imag', function benchmark( b ) {
var im;
var z;
var i;

z = new Complex64( randu(), randu() );

b.tic();
for ( i = 0; i < b.iterations; i++ ) {
im = z.im;
if ( isnan( im ) ) {
b.fail( 'should not return NaN' );
}
}
b.toc();
if ( isnan( im ) ) {
b.fail( 'should not return NaN' );
}
b.pass( 'benchmark finished' );
b.end();
});

bench( pkg+':toString', function benchmark( b ) {
var o;
var z;
var i;

z = new Complex64( randu(), randu() );

b.tic();
for ( i = 0; i < b.iterations; i++ ) {
o = z.toString();
if ( typeof o !== 'string' ) {
b.fail( 'should return a string' );
}
}
b.toc();
if ( typeof o !== 'string' ) {
b.fail( 'should return a string' );
}
b.pass( 'benchmark finished' );
b.end();
});

bench( pkg+':toJSON', function benchmark( b ) {
var o;
var z;
var i;

z = new Complex64( randu(), randu() );

b.tic();
for ( i = 0; i < b.iterations; i++ ) {
o = z.toJSON();
if ( typeof o !== 'object' ) {
b.fail( 'should return an object' );
}
}
b.toc();
if ( typeof o !== 'object' ) {
b.fail( 'should return an object' );
}
b.pass( 'benchmark finished' );
b.end();
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
julia 1.5
BenchmarkTools 0.5.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
#!/usr/bin/env julia
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import BenchmarkTools
using Printf

# Benchmark variables:
name = "complex64";
repeats = 3;

"""
print_version()
Prints the TAP version.
# Examples
``` julia
julia> print_version()
```
"""
function print_version()
@printf( "TAP version 13\n" );
end

"""
print_summary( total, passing )
Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
# Examples
``` julia
julia> print_summary( 3, 3 )
```
"""
function print_summary( total, passing )
@printf( "#\n" );
@printf( "1..%d\n", total ); # TAP plan
@printf( "# total %d\n", total );
@printf( "# pass %d\n", passing );
@printf( "#\n" );
@printf( "# ok\n" );
end

"""
print_results( iterations, elapsed )
Print benchmark results.
# Arguments
* `iterations`: number of iterations
* `elapsed`: elapsed time (in seconds)
# Examples
``` julia
julia> print_results( 1000000, 0.131009101868 )
```
"""
function print_results( iterations, elapsed )
rate = iterations / elapsed

@printf( " ---\n" );
@printf( " iterations: %d\n", iterations );
@printf( " elapsed: %0.9f\n", elapsed );
@printf( " rate: %0.9f\n", rate );
@printf( " ...\n" );
end

"""
benchmark()
Run a benchmark.
# Notes
* Benchmark results are returned as a two-element array: [ iterations, elapsed ].
* The number of iterations is not the true number of iterations. Instead, an 'iteration' is defined as a 'sample', which is a computed estimate for a single evaluation.
* The elapsed time is in seconds.
# Examples
``` julia
julia> out = benchmark();
```
"""
function benchmark()
t = BenchmarkTools.@benchmark ComplexF32( rand(), rand() ) samples=1e6

# Compute the total "elapsed" time and convert from nanoseconds to seconds:
s = sum( t.times ) / 1.0e9;

# Determine the number of "iterations":
iter = length( t.times );

# Return the results:
[ iter, s ];
end

"""
main()
Run benchmarks.
# Examples
``` julia
julia> main();
```
"""
function main()
print_version();
for i in 1:repeats
@printf( "# julia::%s\n", name );
results = benchmark();
print_results( results[ 1 ], results[ 2 ] );
@printf( "ok %d benchmark finished\n", i );
end
print_summary( repeats, repeats );
end

main();
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Benchmark complex."""

from __future__ import print_function
import timeit

NAME = "complex64"
REPEATS = 3
ITERATIONS = 1000000


def print_version():
"""Print the TAP version."""
print("TAP version 13")


def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")


def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed

print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")


def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from random import random;"
stmt = "z = complex(float(random()), float(random()))"

t = timeit.Timer(stmt, setup=setup)

print_version()

for i in range(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")

print_summary(REPEATS, REPEATS)


def main():
"""Run the benchmark."""
benchmark()


if __name__ == "__main__":
main()
Loading

0 comments on commit f4cdf4d

Please sign in to comment.