Dataset Viewer
Auto-converted to Parquet Duplicate
entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
SumAggregator
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class SumAggregator(nn.Module): def __init__(self): super(SumAggregator, self).__init__() def forward(self, neighbor): return torch.sum(neighbor, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
AlexMinhao/NAS_GNN
SumAggregator
false
0
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, neighbor): return torch.sum(neighbor, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LinearEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.utils.data import torch.nn as nn class LinearEmbedding(nn.Module): def __init__(self, inp_size, d_model): super(LinearEmbedding, self).__init__() self.lut = nn.Linear(inp_size, d_model) self.d_model = d_model def forward(self, x): return ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dyn...
Akhil-Raj/Trajectory-Transformer
LinearEmbedding
false
1
[ "MIT" ]
0
dd09fda99443f6afb59d962026573162219ea6a9
https://github.com/Akhil-Raj/Trajectory-Transformer/tree/dd09fda99443f6afb59d962026573162219ea6a9
import math import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, inp_size, d_model): super().__init__() self.lut = nn.Linear(inp_size, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_...
CustomizeLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class CustomizeLayer(nn.Module): def __init__(self, in_dim): super().__init__() self.in_dim = in_dim self.scale = nn.Parameter(torch.Tensor(self.in_dim)) self.bias = nn.Parameter(torch.Tensor(self.in_dim)) def forward(self, x): norm ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
Abhishekvats1997/Torch-Pruning
CustomizeLayer
false
2
[ "MIT" ]
0
b322a42d1c9032cc9644332d33a9662ca6ed44ac
https://github.com/Abhishekvats1997/Torch-Pruning/tree/b322a42d1c9032cc9644332d33a9662ca6ed44ac
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_dim): super().__init__() self.in_dim = in_dim self.scale = nn.Parameter(torch.Tensor(self.in_dim)) self.bias = nn.Parameter(torch.Tensor(self.in_dim)) def forward(self, x): norm = x.pow(2...
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, weights, eps=1e-05): super().__init__() self.gamma = nn.Parameter(torch.ones(weights)) self.beta = nn.Parameter(torch.zeros(weights)) self.eps = eps def forward(self, x): mean = x.mean(-...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
AWilcke/Dissertation
LayerNorm
false
3
[ "MIT" ]
0
b85ad38a7f336ee290d5883f5e942f54e140d0d0
https://github.com/AWilcke/Dissertation/tree/b85ad38a7f336ee290d5883f5e942f54e140d0d0
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, weights, eps=1e-05): super().__init__() self.gamma = nn.Parameter(torch.ones(weights)) self.beta = nn.Parameter(torch.zeros(weights)) self.eps = eps def forward(self, x): mean = x.mean(-1, k...
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn class LayerNorm(nn.Module): """ Construct a layernorm module (See citation for details). """ def __init__(self, features, eps=1e-06): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dy...
Akhil-Raj/Trajectory-Transformer
LayerNorm
false
4
[ "MIT" ]
0
dd09fda99443f6afb59d962026573162219ea6a9
https://github.com/Akhil-Raj/Trajectory-Transformer/tree/dd09fda99443f6afb59d962026573162219ea6a9
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): """ Construct a layernorm module (See citation for details). """ def __init__(self, features, eps=1e-06): super().__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter...
Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Norm(nn.Module): def __init__(self, n_state, axis=-1, epsilon=1e-05): super().__init__() self.n_state = n_state self.g = nn.Parameter(torch.ones([self.n_state])) self.b = nn.Parameter(torch.zeros([self.n_state])) self.axis = axis ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
Aalanli/MusicGeneration
Norm
false
5
[ "MIT" ]
0
7d268322d692013d8ac6e70be31741cea519fa28
https://github.com/Aalanli/MusicGeneration/tree/7d268322d692013d8ac6e70be31741cea519fa28
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, n_state, axis=-1, epsilon=1e-05): super().__init__() self.n_state = n_state self.g = nn.Parameter(torch.ones([self.n_state])) self.b = nn.Parameter(torch.zeros([self.n_state])) self.axis = axis ...
BehlerAngular
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn as nn class BehlerAngular(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. A...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._emp...
AlexanderDKazakov/schnetpack
BehlerAngular
false
6
[ "MIT" ]
0
97b82469d977981b500e439a6c93696d8dac8a3f
https://github.com/AlexanderDKazakov/schnetpack/tree/97b82469d977981b500e439a6c93696d8dac8a3f
import torch from torch import nn as nn class Model(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: ...
BottleneckBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn a...
Aamer98/FeatureNorm
BottleneckBlock
false
7
[ "MIT" ]
0
fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
https://github.com/Aamer98/FeatureNorm/tree/fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
Mlp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Conv1d(nn.Module): def __init__(self, nf, nx, stdev=0.02): super().__init__() self.nf = nf self.nx = nx self.stdev = stdev self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean =0.0, std=self.stdev)) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Aalanli/MusicGeneration
Mlp
false
9
[ "MIT" ]
0
7d268322d692013d8ac6e70be31741cea519fa28
https://github.com/Aalanli/MusicGeneration/tree/7d268322d692013d8ac6e70be31741cea519fa28
import torch import torch.nn as nn class Conv1d(nn.Module): def __init__(self, nf, nx, stdev=0.02): super().__init__() self.nf = nf self.nx = nx self.stdev = stdev self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean =0.0, std=self.stdev)) ...
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.nn import Module import math import torch import torch.nn.functional as F import torch.nn as nn class GraphConvolution(Module): """ A Graph Convolution Layer (GCN) """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn impor...
AlexMinhao/NAS_GNN
GCN
false
10
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
from torch.nn import Module import math import torch import torch.nn.functional as F import torch.nn as nn class GraphConvolution(Module): """ A Graph Convolution Layer (GCN) """ def __init__(self, in_features, out_features, bias=True): super().__init__() self.in_features = in_feature...
SimpleBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn a...
Aamer98/FeatureNorm
SimpleBlock
false
12
[ "MIT" ]
0
fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
https://github.com/Aamer98/FeatureNorm/tree/fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
Aggregate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn as nn class Aggregate(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whethe...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._emp...
AlexanderDKazakov/schnetpack
Aggregate
false
13
[ "MIT" ]
0
97b82469d977981b500e439a6c93696d8dac8a3f
https://github.com/AlexanderDKazakov/schnetpack/tree/97b82469d977981b500e439a6c93696d8dac8a3f
import torch from torch import nn as nn class Model(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether th...
KaggleAccuracy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import Tensor from torch import nn class KaggleAccuracy(nn.Module): def __init__(self, threshold: 'float'=0.25, num_patches: 'int'=38, size: 'int'=418) ->None: super().__init__() self.threshold = threshold self.num_patches = num_patches self.patch_s...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C....
AlessandroRuzzi/Computational-Intelligence-Lab-2021
KaggleAccuracy
false
14
[ "MIT" ]
0
ed9dae37618e0ca2f01c4e336df4354e77e00c1f
https://github.com/AlessandroRuzzi/Computational-Intelligence-Lab-2021/tree/ed9dae37618e0ca2f01c4e336df4354e77e00c1f
import torch from torch import Tensor from torch import nn class Model(nn.Module): def __init__(self, threshold: 'float'=0.25, num_patches: 'int'=38, size: 'int'=418) ->None: super().__init__() self.threshold = threshold self.num_patches = num_patches self.patch_size = siz...
CosAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_siz...
AlexMinhao/NAS_GNN
CosAttention
false
15
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super().__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(se...
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
5