Skip to content

Commit

Permalink
Merge branch 'main' into jcw/bump-c-17
Browse files Browse the repository at this point in the history
  • Loading branch information
xadupre committed Oct 6, 2023
2 parents 06eefeb + b702c4e commit 92d32af
Show file tree
Hide file tree
Showing 13 changed files with 221 additions and 156 deletions.
1 change: 1 addition & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ jobs:
reporter: github-pr-check
level: warning
flags: --linelength=120
filter: "-runtime/references"


enforce-style:
Expand Down
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,18 @@ ONNX is [widely supported](http://onnx.ai/supported-tools) and can be found in m

# Learn about the ONNX spec

* [Overview](docs/Overview.md)
* [ONNX intermediate representation spec](docs/IR.md)
* [Versioning principles of the spec](docs/Versioning.md)
* [Operators documentation](docs/Operators.md) (development version)
* [Overview](https://github.com/onnx/onnx/blob/main/docs/Overview.md)
* [ONNX intermediate representation spec](https://github.com/onnx/onnx/blob/main/docs/IR.md)
* [Versioning principles of the spec](https://github.com/onnx/onnx/blob/main/docs/Versioning.md)
* [Operators documentation](https://github.com/onnx/onnx/blob/main/docs/Operators.md)
* [Operators documentation](https://onnx.ai/onnx/operators/index.html) (latest release)
* [Python API Overview](docs/PythonAPIOverview.md)
* [Python API Overview](https://github.com/onnx/onnx/blob/main/docs/PythonAPIOverview.md)

# Programming utilities for working with ONNX Graphs

* [Shape and Type Inference](docs/ShapeInference.md)
* [Shape and Type Inference](https://github.com/onnx/onnx/blob/main/docs/ShapeInference.md)
* [Graph Optimization](https://github.com/onnx/optimizer)
* [Opset Version Conversion](docs/VersionConverter.md)
* [Opset Version Conversion](https://github.com/onnx/onnx/blob/main/docs/docsgen/source/api/version_converter.md)

# Contribute

Expand Down
2 changes: 1 addition & 1 deletion docs/Changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -23972,7 +23972,7 @@ This version of the operator has been available since version 20 of the default

<dl>
<dt><tt>grid</tt> (differentiable) : T1</dt>
<dd>output tensor of shape (N, C, H, W, 2) of 2D sample coordinates or (N, C, D, H, W, 3) of 3D sample coordinates.</dd>
<dd>output tensor of shape (N, H, W, 2) of 2D sample coordinates or (N, D, H, W, 3) of 3D sample coordinates.</dd>
</dl>

#### Type Constraints
Expand Down
2 changes: 1 addition & 1 deletion docs/Operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ This version of the operator has been available since version 20 of the default

<dl>
<dt><tt>grid</tt> (differentiable) : T1</dt>
<dd>output tensor of shape (N, C, H, W, 2) of 2D sample coordinates or (N, C, D, H, W, 3) of 3D sample coordinates.</dd>
<dd>output tensor of shape (N, H, W, 2) of 2D sample coordinates or (N, D, H, W, 3) of 3D sample coordinates.</dd>
</dl>

#### Type Constraints
Expand Down
19 changes: 19 additions & 0 deletions onnx/common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,22 @@
#define ONNX_CATCH(x) catch (x)
#define ONNX_HANDLE_EXCEPTION(func) func()
#endif

// Macros to disable the copy and/or assignment methods
// These are usually placed in the private: declarations for a class.

#define ONNX_DISALLOW_COPY(TypeName) TypeName(const TypeName&) = delete

#define ONNX_DISALLOW_ASSIGNMENT(TypeName) TypeName& operator=(const TypeName&) = delete

#define ONNX_DISALLOW_COPY_AND_ASSIGNMENT(TypeName) \
ONNX_DISALLOW_COPY(TypeName); \
ONNX_DISALLOW_ASSIGNMENT(TypeName)

#define ONNX_DISALLOW_MOVE(TypeName) \
TypeName(TypeName&&) = delete; \
TypeName& operator=(TypeName&&) = delete

#define ONNX_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TypeName) \
ONNX_DISALLOW_COPY_AND_ASSIGNMENT(TypeName); \
ONNX_DISALLOW_MOVE(TypeName)
2 changes: 1 addition & 1 deletion onnx/defs/tensor/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2527,7 +2527,7 @@ ONNX_OPERATOR_SET_SCHEMA(
.Output(
0,
"grid",
"output tensor of shape (N, C, H, W, 2) of 2D sample coordinates or (N, C, D, H, W, 3) of 3D sample coordinates.",
"output tensor of shape (N, H, W, 2) of 2D sample coordinates or (N, D, H, W, 3) of 3D sample coordinates.",
"T1",
OpSchema::Single,
true,
Expand Down
7 changes: 4 additions & 3 deletions onnx/onnx-ml.proto
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,8 @@ enum Version {
// An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto {
reserved 12, 16 to 19;
reserved "v";

// Note: this enum is structurally identical to the OpSchema::AttrType
// enum defined in schema.h. If you rev one, you likely need to rev the other.
Expand Down Expand Up @@ -517,8 +519,8 @@ message TensorProto {
// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
// The computation usually happens inside a block quantize / dequantize
// fused by the runtime.
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients
FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero

Expand Down Expand Up @@ -845,7 +847,6 @@ message FunctionProto {
optional string domain = 10;
}


// For using protobuf-lite
option optimize_for = LITE_RUNTIME;

7 changes: 4 additions & 3 deletions onnx/onnx-ml.proto3
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,8 @@ enum Version {
// An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto {
reserved 12, 16 to 19;
reserved "v";

// Note: this enum is structurally identical to the OpSchema::AttrType
// enum defined in schema.h. If you rev one, you likely need to rev the other.
Expand Down Expand Up @@ -517,8 +519,8 @@ message TensorProto {
// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
// The computation usually happens inside a block quantize / dequantize
// fused by the runtime.
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients
FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero

Expand Down Expand Up @@ -845,7 +847,6 @@ message FunctionProto {
string domain = 10;
}


// For using protobuf-lite
option optimize_for = LITE_RUNTIME;

7 changes: 4 additions & 3 deletions onnx/onnx.in.proto
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ enum Version {
// An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto {
reserved 12, 16 to 19;
reserved "v";

// Note: this enum is structurally identical to the OpSchema::AttrType
// enum defined in schema.h. If you rev one, you likely need to rev the other.
Expand Down Expand Up @@ -514,8 +516,8 @@ message TensorProto {
// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
// The computation usually happens inside a block quantize / dequantize
// fused by the runtime.
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients
FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero

Expand Down Expand Up @@ -845,4 +847,3 @@ message FunctionProto {
// the FunctionProto.
optional string domain = 10;
}

7 changes: 4 additions & 3 deletions onnx/onnx.proto
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,8 @@ enum Version {
// An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto {
reserved 12, 16 to 19;
reserved "v";

// Note: this enum is structurally identical to the OpSchema::AttrType
// enum defined in schema.h. If you rev one, you likely need to rev the other.
Expand Down Expand Up @@ -515,8 +517,8 @@ message TensorProto {
// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
// The computation usually happens inside a block quantize / dequantize
// fused by the runtime.
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients
FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero

Expand Down Expand Up @@ -829,7 +831,6 @@ message FunctionProto {
optional string domain = 10;
}


// For using protobuf-lite
option optimize_for = LITE_RUNTIME;

7 changes: 4 additions & 3 deletions onnx/onnx.proto3
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,8 @@ enum Version {
// An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto {
reserved 12, 16 to 19;
reserved "v";

// Note: this enum is structurally identical to the OpSchema::AttrType
// enum defined in schema.h. If you rev one, you likely need to rev the other.
Expand Down Expand Up @@ -515,8 +517,8 @@ message TensorProto {
// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
// The computation usually happens inside a block quantize / dequantize
// fused by the runtime.
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf
FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero
FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients
FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero

Expand Down Expand Up @@ -829,7 +831,6 @@ message FunctionProto {
string domain = 10;
}


// For using protobuf-lite
option optimize_for = LITE_RUNTIME;

Loading

0 comments on commit 92d32af

Please sign in to comment.