IRs¶
PyTorch 2.0 offers two set of IRs for backends to interface with: Core Aten IR and Prims IR.
Core Aten IR¶
Core aten ops is the core subset of aten operators that can be used to compose other operators. Core aten IR is fully functional, and there is no inplace or _out variants in this opset. In contrast to Prims IR, core aten ops reuses the existing aten ops in “native_functions.yaml”, and it doesn’t further decompose ops into explicit type promotion and broadcasting ops. This opset is designed to serve as the functional IR to interface with backends.
Warning
This opset is still under active development, more ops will be added in the future.
Operator |
Schema |
---|---|
|
_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor |
|
_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor |
|
_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor |
|
_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) |
|
_softmax(Tensor self, int dim, bool half_to_float) -> Tensor |
|
_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor |
|
abs(Tensor self) -> Tensor |
|
acos(Tensor self) -> Tensor |
|
acosh(Tensor self) -> Tensor |
|
add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor |
|
add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor |
|
addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor |
|
alias(Tensor(a) self) -> Tensor(a) |
|
amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor |
|
amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor |
|
arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
|
argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor |
|
argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor |
|
as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) |
|
asin(Tensor self) -> Tensor |
|
asinh(Tensor self) -> Tensor |
|
atan(Tensor self) -> Tensor |
|
atanh(Tensor self) -> Tensor |
|
avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor |
|
avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor |
|
bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor |
|
bitwise_not(Tensor self) -> Tensor |
|
bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor |
|
bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor |
|
bmm(Tensor self, Tensor mat2) -> Tensor |
|
cat(Tensor[] tensors, int dim=0) -> Tensor |
|
clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor |
|
clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor |
|
col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor |
|
constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor |
|
convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor |
|
convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) |
|
cos(Tensor self) -> Tensor |
|
cosh(Tensor self) -> Tensor |
|
div.Scalar(Tensor self, Scalar other) -> Tensor |
|
div.Tensor(Tensor self, Tensor other) -> Tensor |
|
embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor |
|
empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
|
eq.Scalar(Tensor self, Scalar other) -> Tensor |
|
eq.Tensor(Tensor self, Tensor other) -> Tensor |
|
erf(Tensor self) -> Tensor |
|
exp(Tensor self) -> Tensor |
|
expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) |
|
fill.Scalar(Tensor self, Scalar value) -> Tensor |
|
flip(Tensor self, int[] dims) -> Tensor |
|
floor(Tensor self) -> Tensor |
|
fmod.Tensor(Tensor self, Tensor other) -> Tensor |
|
full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
|
gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor |
|
ge.Scalar(Tensor self, Scalar other) -> Tensor |
|
ge.Tensor(Tensor self, Tensor other) -> Tensor |
|
gelu(Tensor self, *, str approximate=’none’) -> Tensor |
|
grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor |
|
gt.Scalar(Tensor self, Scalar other) -> Tensor |
|
gt.Tensor(Tensor self, Tensor other) -> Tensor |
|
hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor |
|
index_select(Tensor self, int dim, Tensor index) -> Tensor |
|
isinf(Tensor self) -> Tensor |
|
isnan(Tensor self) -> Tensor |
|
le.Scalar(Tensor self, Scalar other) -> Tensor |
|
le.Tensor(Tensor self, Tensor other) -> Tensor |
|
leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor |
|
log(Tensor self) -> Tensor |
|
logical_and(Tensor self, Tensor other) -> Tensor |
|
logical_not(Tensor self) -> Tensor |
|
logical_or(Tensor self, Tensor other) -> Tensor |
|
lt.Scalar(Tensor self, Scalar other) -> Tensor |
|
lt.Tensor(Tensor self, Tensor other) -> Tensor |
|
max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) |
|
max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) |
|
max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor |
|
max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) |
|
maximum(Tensor self, Tensor other) -> Tensor |
|
mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor |
|
min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) |
|
minimum(Tensor self, Tensor other) -> Tensor |
|
mm(Tensor self, Tensor mat2) -> Tensor |
|
mul.Scalar(Tensor self, Scalar other) -> Tensor |
|
mul.Tensor(Tensor self, Tensor other) -> Tensor |
|
native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) |
|
native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) |
|
native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) |
|
native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) |
|
native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) |
|
native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) |
|
ne.Scalar(Tensor self, Scalar other) -> Tensor |
|
ne.Tensor(Tensor self, Tensor other) -> Tensor |
|
neg(Tensor self) -> Tensor |
|
nonzero(Tensor self) -> Tensor |
|
permute(Tensor(a) self, int[] dims) -> Tensor(a) |
|
pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor |
|
pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor |
|
reciprocal(Tensor self) -> Tensor |
|
reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor |
|
relu(Tensor self) -> Tensor |
|
remainder.Tensor(Tensor self, Tensor other) -> Tensor |
|
repeat(Tensor self, SymInt[] repeats) -> Tensor |
|
replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor |
|
replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor |
|
rsqrt(Tensor self) -> Tensor |
|
scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
|
scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor |
|
scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor |
|
select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) |
|
sigmoid(Tensor self) -> Tensor |
|
sign(Tensor self) -> Tensor |
|
sin(Tensor self) -> Tensor |
|
sinh(Tensor self) -> Tensor |
|
slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) |
|
slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor |
|
sqrt(Tensor self) -> Tensor |
|
squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) |
|
squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) |
|
sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor |
|
sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor |
|
sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor |
|
tanh(Tensor self) -> Tensor |
|
topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) |
|
unsqueeze(Tensor(a) self, int dim) -> Tensor(a) |
|
upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor |
|
upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor |
|
var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor |
|
view(Tensor(a) self, SymInt[] size) -> Tensor(a) |
|
where.self(Tensor condition, Tensor self, Tensor other) -> Tensor |
Prims IR¶
Prims IR is a set of primitive operators that can be used to compose other operators. Prims IR is a lower level opset than core aten IR, and it further decomposes ops into explicit type promotion and broadcasting ops: prims.convert_element_type and prims.broadcast_in_dim. This opset is designed to interface with compiler backends.
Warning
This opset is still under active development, more ops will be added in the future.
Operator |
Schema |
---|---|
|
abs(Tensor self) -> Tensor |
|
acos(Tensor self) -> Tensor |
|
acosh(Tensor self) -> Tensor |
|
asin(Tensor self) -> Tensor |
|
asinh(Tensor self) -> Tensor |
|
atan(Tensor self) -> Tensor |
|
atanh(Tensor self) -> Tensor |
|
cos(Tensor self) -> Tensor |
|
cosh(Tensor self) -> Tensor |
|
bessel_i0(Tensor self) -> Tensor |
|
bessel_i0e(Tensor self) -> Tensor |
|
bessel_i1(Tensor self) -> Tensor |
|
bessel_i1e(Tensor self) -> Tensor |
|
bessel_j0(Tensor self) -> Tensor |
|
bessel_j1(Tensor self) -> Tensor |
|
bitwise_not(Tensor self) -> Tensor |
|
cbrt(Tensor self) -> Tensor |
|
ceil(Tensor self) -> Tensor |
|
conj_physical(Tensor self) -> Tensor |
|
digamma(Tensor self) -> Tensor |
|
erf(Tensor self) -> Tensor |
|
erf_inv(Tensor self) -> Tensor |
|
erfc(Tensor self) -> Tensor |
|
erfcx(Tensor self) -> Tensor |
|
exp(Tensor self) -> Tensor |
|
expm1(Tensor self) -> Tensor |
|
exp2(Tensor self) -> Tensor |
|
fill(Tensor self, Scalar value) -> Tensor |
|
floor(Tensor self) -> Tensor |
|
imag(Tensor self) -> Tensor |
|
isfinite(Tensor self) -> Tensor |
|
lgamma(Tensor self) -> Tensor |
|
log(Tensor self) -> Tensor |
|
log1p(Tensor self) -> Tensor |
|
log2(Tensor self) -> Tensor |
|
log10(Tensor self) -> Tensor |
|
ndtri(Tensor self) -> Tensor |
|
neg(Tensor self) -> Tensor |
|
real(Tensor self) -> Tensor |
|
reciprocal(Tensor self) -> Tensor |
|
round(Tensor self) -> Tensor |
|
sign(Tensor self) -> Tensor |
|
signbit(Tensor self) -> Tensor |
|
sin(Tensor self) -> Tensor |
|
sinh(Tensor self) -> Tensor |
|
spherical_bessel_j0(Tensor self) -> Tensor |
|
sqrt(Tensor self) -> Tensor |
|
tan(Tensor self) -> Tensor |
|
tanh(Tensor self) -> Tensor |
|
trunc(Tensor self) -> Tensor |
|
add(Tensor self, Tensor other) -> Tensor |
|
atan2(Tensor self, Tensor other) -> Tensor |
|
bitwise_and(Tensor self, Tensor other) -> Tensor |
|
bitwise_or(Tensor self, Tensor other) -> Tensor |
|
bitwise_xor(Tensor self, Tensor other) -> Tensor |
|
div(Tensor self, Tensor other) -> Tensor |
|
eq(Tensor self, Tensor other) -> Tensor |
|
fmax(Tensor self, Tensor other) -> Tensor |
|
fmin(Tensor self, Tensor other) -> Tensor |
|
fmod(Tensor self, Tensor other) -> Tensor |
|
gcd(Tensor self, Tensor other) -> Tensor |
|
ge(Tensor self, Tensor other) -> Tensor |
|
gt(Tensor self, Tensor other) -> Tensor |
|
hypot(Tensor self, Tensor other) -> Tensor |
|
igamma(Tensor self, Tensor other) -> Tensor |
|
igammac(Tensor self, Tensor other) -> Tensor |
|
le(Tensor self, Tensor other) -> Tensor |
|
lt(Tensor self, Tensor other) -> Tensor |
|
maximum(Tensor self, Tensor other) -> Tensor |
|
minimum(Tensor self, Tensor other) -> Tensor |
|
mul(Tensor self, Tensor other) -> Tensor |
|
ne(Tensor self, Tensor other) -> Tensor |
|
nextafter(Tensor self, Tensor other) -> Tensor |
|
pow(Tensor self, Tensor other) -> Tensor |
|
remainder(Tensor self, Tensor other) -> Tensor |
|
rsqrt(Tensor self) -> Tensor |
|
shift_left(Tensor self, Tensor other) -> Tensor |
|
shift_right_arithmetic(Tensor self, Tensor other) -> Tensor |
|
sub(Tensor self, Tensor other) -> Tensor |
|
zeta(Tensor self, Tensor other) -> Tensor |
|
as_strided(Tensor(a!) a, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor(a!) |
|
broadcast_in_dim(Tensor(a) a, SymInt[] shape, int[] broadcast_dimensions) -> Tensor(a) |
|
collapse_view(Tensor(a) a, int start, int end) -> Tensor(a) |
|
conj(Tensor(a) a) -> Tensor(a) |
|
slice(Tensor(a) a, SymInt[] start_indices, SymInt[] limit_indices, SymInt[]? strides=None) -> Tensor(a) |
|
slice_in_dim(Tensor(a) a, SymInt start_index, SymInt limit_index, int stride=1, int axis=0) -> Tensor(a) |
|
split_dim(Tensor(a) a, int dim, SymInt outer_length) -> Tensor(a) |
|
squeeze(Tensor(a) a, int[] dimensions) -> Tensor(a) |
|
transpose(Tensor(a) a, int[] permutation) -> Tensor(a) |
|
view_of(Tensor(a) a) -> Tensor |
|
as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor |
|
cat(Tensor[] tensors, int dim) -> Tensor |
|
reshape(Tensor a, SymInt[] shape) -> Tensor |
|
rev(Tensor a, int[] dims) -> Tensor |
|
where(Tensor pred, Tensor a, Tensor b) -> Tensor |
|
clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor |
|
convert_element_type(Tensor a, ScalarType dtype) -> Tensor |
|
device_put(Tensor a, Device device) -> Tensor |
|
item(Tensor a) -> Scalar |
|
maximum_value(ScalarType dtype) -> Scalar |
|
minium_value(ScalarType dtype) -> Scalar |
|
copy_strided(Tensor a, SymInt[] stride) -> Tensor |
|
copy_to(Tensor(a!) a, Tensor b) -> Tensor(a!) |
|
resize(Tensor(a!) a, SymInt[] shape) -> Tensor(a!) |
|
amax(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
amin(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
prod(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
sum(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
var(Tensor inp, int[]? dims, *, int correction, ScalarType? output_dtype=None) -> Tensor |
|
empty_strided(SymInt[] shape, SymInt[] strides, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor |
|
scalar_tensor(Scalar s, *, ScalarType? dtype=None, Device? device=None) -> Tensor |
|
iota(SymInt length, *, SymInt start, SymInt step, ScalarType dtype, Device device, bool requires_grad) -> Tensor |
|
svd(Tensor A, *, bool full_matrices) -> (Tensor U, Tensor S, Tensor Vh) |
|
normal(SymInt[] shape, *, Scalar mean, Scalar std, ScalarType dtype, Device device, bool requires_grad) -> Tensor |
|
uniform(SymInt[] shape, *, Scalar low, Scalar high, ScalarType dtype, Device device) -> Tensor |
|
fft_r2c(Tensor self, *, int[] dim, bool onesided) -> Tensor |
|
fft_c2c(Tensor self, *, int[] dim, bool forward) -> Tensor |
|
fft_c2r(Tensor self, *, int[] dim, SymInt last_dim_size) -> Tensor |