entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
SumAggregator
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class SumAggregator(nn.Module): def __init__(self): super(SumAggregator, self).__init__() def forward(self, neighbor): return torch.sum(neighbor, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
AlexMinhao/NAS_GNN
SumAggregator
false
0
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, neighbor): return torch.sum(neighbor, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LinearEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.utils.data import torch.nn as nn class LinearEmbedding(nn.Module): def __init__(self, inp_size, d_model): super(LinearEmbedding, self).__init__() self.lut = nn.Linear(inp_size, d_model) self.d_model = d_model def forward(self, x): return ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dyn...
Akhil-Raj/Trajectory-Transformer
LinearEmbedding
false
1
[ "MIT" ]
0
dd09fda99443f6afb59d962026573162219ea6a9
https://github.com/Akhil-Raj/Trajectory-Transformer/tree/dd09fda99443f6afb59d962026573162219ea6a9
import math import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, inp_size, d_model): super().__init__() self.lut = nn.Linear(inp_size, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_...
CustomizeLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class CustomizeLayer(nn.Module): def __init__(self, in_dim): super().__init__() self.in_dim = in_dim self.scale = nn.Parameter(torch.Tensor(self.in_dim)) self.bias = nn.Parameter(torch.Tensor(self.in_dim)) def forward(self, x): norm ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
Abhishekvats1997/Torch-Pruning
CustomizeLayer
false
2
[ "MIT" ]
0
b322a42d1c9032cc9644332d33a9662ca6ed44ac
https://github.com/Abhishekvats1997/Torch-Pruning/tree/b322a42d1c9032cc9644332d33a9662ca6ed44ac
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_dim): super().__init__() self.in_dim = in_dim self.scale = nn.Parameter(torch.Tensor(self.in_dim)) self.bias = nn.Parameter(torch.Tensor(self.in_dim)) def forward(self, x): norm = x.pow(2...
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, weights, eps=1e-05): super().__init__() self.gamma = nn.Parameter(torch.ones(weights)) self.beta = nn.Parameter(torch.zeros(weights)) self.eps = eps def forward(self, x): mean = x.mean(-...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
AWilcke/Dissertation
LayerNorm
false
3
[ "MIT" ]
0
b85ad38a7f336ee290d5883f5e942f54e140d0d0
https://github.com/AWilcke/Dissertation/tree/b85ad38a7f336ee290d5883f5e942f54e140d0d0
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, weights, eps=1e-05): super().__init__() self.gamma = nn.Parameter(torch.ones(weights)) self.beta = nn.Parameter(torch.zeros(weights)) self.eps = eps def forward(self, x): mean = x.mean(-1, k...
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn class LayerNorm(nn.Module): """ Construct a layernorm module (See citation for details). """ def __init__(self, features, eps=1e-06): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dy...
Akhil-Raj/Trajectory-Transformer
LayerNorm
false
4
[ "MIT" ]
0
dd09fda99443f6afb59d962026573162219ea6a9
https://github.com/Akhil-Raj/Trajectory-Transformer/tree/dd09fda99443f6afb59d962026573162219ea6a9
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): """ Construct a layernorm module (See citation for details). """ def __init__(self, features, eps=1e-06): super().__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter...
Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Norm(nn.Module): def __init__(self, n_state, axis=-1, epsilon=1e-05): super().__init__() self.n_state = n_state self.g = nn.Parameter(torch.ones([self.n_state])) self.b = nn.Parameter(torch.zeros([self.n_state])) self.axis = axis ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
Aalanli/MusicGeneration
Norm
false
5
[ "MIT" ]
0
7d268322d692013d8ac6e70be31741cea519fa28
https://github.com/Aalanli/MusicGeneration/tree/7d268322d692013d8ac6e70be31741cea519fa28
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, n_state, axis=-1, epsilon=1e-05): super().__init__() self.n_state = n_state self.g = nn.Parameter(torch.ones([self.n_state])) self.b = nn.Parameter(torch.zeros([self.n_state])) self.axis = axis ...
BehlerAngular
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn as nn class BehlerAngular(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. A...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._emp...
AlexanderDKazakov/schnetpack
BehlerAngular
false
6
[ "MIT" ]
0
97b82469d977981b500e439a6c93696d8dac8a3f
https://github.com/AlexanderDKazakov/schnetpack/tree/97b82469d977981b500e439a6c93696d8dac8a3f
import torch from torch import nn as nn class Model(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: ...
BottleneckBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn a...
Aamer98/FeatureNorm
BottleneckBlock
false
7
[ "MIT" ]
0
fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
https://github.com/Aamer98/FeatureNorm/tree/fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
Mlp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Conv1d(nn.Module): def __init__(self, nf, nx, stdev=0.02): super().__init__() self.nf = nf self.nx = nx self.stdev = stdev self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean =0.0, std=self.stdev)) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Aalanli/MusicGeneration
Mlp
false
9
[ "MIT" ]
0
7d268322d692013d8ac6e70be31741cea519fa28
https://github.com/Aalanli/MusicGeneration/tree/7d268322d692013d8ac6e70be31741cea519fa28
import torch import torch.nn as nn class Conv1d(nn.Module): def __init__(self, nf, nx, stdev=0.02): super().__init__() self.nf = nf self.nx = nx self.stdev = stdev self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean =0.0, std=self.stdev)) ...
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.nn import Module import math import torch import torch.nn.functional as F import torch.nn as nn class GraphConvolution(Module): """ A Graph Convolution Layer (GCN) """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn impor...
AlexMinhao/NAS_GNN
GCN
false
10
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
from torch.nn import Module import math import torch import torch.nn.functional as F import torch.nn as nn class GraphConvolution(Module): """ A Graph Convolution Layer (GCN) """ def __init__(self, in_features, out_features, bias=True): super().__init__() self.in_features = in_feature...
SimpleBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn a...
Aamer98/FeatureNorm
SimpleBlock
false
12
[ "MIT" ]
0
fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
https://github.com/Aamer98/FeatureNorm/tree/fbf3d3b4cef81b3351347d272eb51b6cdd9f0cc5
import math import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed def init_layer(L): if isinstance(L, nn.Conv2d): n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels L.weight.data.normal_(0, math.sqrt(2.0 / f...
Aggregate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn as nn class Aggregate(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whethe...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._emp...
AlexanderDKazakov/schnetpack
Aggregate
false
13
[ "MIT" ]
0
97b82469d977981b500e439a6c93696d8dac8a3f
https://github.com/AlexanderDKazakov/schnetpack/tree/97b82469d977981b500e439a6c93696d8dac8a3f
import torch from torch import nn as nn class Model(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether th...
KaggleAccuracy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import Tensor from torch import nn class KaggleAccuracy(nn.Module): def __init__(self, threshold: 'float'=0.25, num_patches: 'int'=38, size: 'int'=418) ->None: super().__init__() self.threshold = threshold self.num_patches = num_patches self.patch_s...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C....
AlessandroRuzzi/Computational-Intelligence-Lab-2021
KaggleAccuracy
false
14
[ "MIT" ]
0
ed9dae37618e0ca2f01c4e336df4354e77e00c1f
https://github.com/AlessandroRuzzi/Computational-Intelligence-Lab-2021/tree/ed9dae37618e0ca2f01c4e336df4354e77e00c1f
import torch from torch import Tensor from torch import nn class Model(nn.Module): def __init__(self, threshold: 'float'=0.25, num_patches: 'int'=38, size: 'int'=418) ->None: super().__init__() self.threshold = threshold self.num_patches = num_patches self.patch_size = siz...
CosAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_siz...
AlexMinhao/NAS_GNN
CosAttention
false
15
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super().__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(se...
FullyConnectedNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class FullyConnectedNet(nn.Module): """https://github.com/VainF/Torch-Pruning/issues/21""" def __init__(self, input_size, num_classes, HIDDEN_UNITS): super().__init__() self.fc1 = nn.Linear(input_size, HIDDEN_UNITS) se...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Abhishekvats1997/Torch-Pruning
FullyConnectedNet
false
16
[ "MIT" ]
0
b322a42d1c9032cc9644332d33a9662ca6ed44ac
https://github.com/Abhishekvats1997/Torch-Pruning/tree/b322a42d1c9032cc9644332d33a9662ca6ed44ac
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """https://github.com/VainF/Torch-Pruning/issues/21""" def __init__(self, input_size, num_classes, HIDDEN_UNITS): super().__init__() self.fc1 = nn.Linear(input_size, HIDDEN_UNITS) self.fc2 = nn....
GatAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = to...
AlexMinhao/NAS_GNN
GatAttention
false
17
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super().__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class Model(ConstAttention): def __init__(self, num...
TripletLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class TripletLoss(nn.Module): def __init__(self): super(TripletLoss, self).__init__() self.margin = 0.5 def distance(self, x, y): diff = torch.abs(x - y) diff = torch.pow(diff, 2).sum(-1) return diff def forward(self, anchor, po...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
Alonso94/Representation-learning
TripletLoss
false
18
[ "MIT" ]
0
c4410b3bc5d2d1de666fba2958c4a7024e2af79f
https://github.com/Alonso94/Representation-learning/tree/c4410b3bc5d2d1de666fba2958c4a7024e2af79f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.margin = 0.5 def distance(self, x, y): diff = torch.abs(x - y) diff = torch.pow(diff, 2).sum(-1) return diff def forward(self, anchor, pos, neg): pos_di...
GatSymAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super(ConstAttention, self).__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_siz...
AlexMinhao/NAS_GNN
GatSymAttention
false
19
[ "Apache-2.0" ]
0
89183988a96e1d6baed910ab3843c13282f8b077
https://github.com/AlexMinhao/NAS_GNN/tree/89183988a96e1d6baed910ab3843c13282f8b077
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Parameter class ConstAttention(nn.Module): def __init__(self, **kwargs): super().__init__() def forward(self, neighbor_vecs, self_vecs): return 1 class GatAttention(ConstAttention): def __init__(se...
HuberLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data class HuberLoss(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, x, x_hat): loss = self.huber_loss_delta1(x / self.delta, x_ha...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
AndrewPaulChester/sage-code
HuberLoss
false
20
[ "MIT" ]
0
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
https://github.com/AndrewPaulChester/sage-code/tree/9fe676bfbcbc6f642eca29b30a1027fba2a426a0
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, x, x_hat): loss = self.huber_loss_delta1(x / self.delta, x_hat / ...
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AlbertiPot/attention-is-all-you-need-pytorch
ScaledDotProductAttention
false
21
[ "MIT" ]
0
c5ec40907db281b85b3bd7a5dd8016940291add0
https://github.com/AlbertiPot/attention-is-all-you-need-pytorch/tree/c5ec40907db281b85b3bd7a5dd8016940291add0
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward...
ScalarBiasScale
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init class ScalarScaleBias(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super(ScalarScaleBias, self).__init__() if scale: self.weig...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert...
Ali-Homsi/githubrepo
ScalarBiasScale
false
25
[ "Apache-2.0" ]
0
7163f110193142a97ec05f76ff7d897c6cedb915
https://github.com/Ali-Homsi/githubrepo/tree/7163f110193142a97ec05f76ff7d897c6cedb915
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init class ScalarScaleBias(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super().__init__() if scale: self.weight = Parameter(torch....
MeanStd
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class MeanStd(nn.Module): def __init__(self): super(MeanStd, self).__init__() def forward(self, x): x = x.view(x.size(0), x.size(1), -1) mean_x = torch.mean(x, dim=2) var_x = torch.mean(x ** 2, dim=2) - mean_x * mean_x return torch.c...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
AnetaKaczynska/video-GAN
MeanStd
false
26
[ "BSD-3-Clause" ]
0
e30e54c18265c658a65b1b26b57b4f499b58bfc6
https://github.com/AnetaKaczynska/video-GAN/tree/e30e54c18265c658a65b1b26b57b4f499b58bfc6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): x = x.view(x.size(0), x.size(1), -1) mean_x = torch.mean(x, dim=2) var_x = torch.mean(x ** 2, dim=2) - mean_x * mean_x return torch.cat([mean_x, var...
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from math import sqrt as sqrt from itertools import produ...
AlphaGoMK/ssd.pytorch
L2Norm
false
27
[ "MIT" ]
0
d9a85041645b6d221fe0531e985c6fc90a612391
https://github.com/AlphaGoMK/ssd.pytorch/tree/d9a85041645b6d221fe0531e985c6fc90a612391
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product import torch.nn.init as init class Model(nn.Module): def __init__(self, n_channels, scale): super().__init__() self.n_channels = n_channels self.gamma = scale or None self.eps...
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AlbertiPot/attention-is-all-you-need-pytorch
PositionwiseFeedForward
false
28
[ "MIT" ]
0
c5ec40907db281b85b3bd7a5dd8016940291add0
https://github.com/AlbertiPot/attention-is-all-you-need-pytorch/tree/c5ec40907db281b85b3bd7a5dd8016940291add0
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_no...
NormalizationLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class NormalizationLayer(nn.Module): def __init__(self): super(NormalizationLayer, self).__init__() def forward(self, x, epsilon=1e-08): return x * ((x ** 2).mean(dim=1, keepdim=True) + epsilon).rsqrt() def get_inputs(): return [torch.rand([4, 4, 4, 4...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
AnetaKaczynska/video-GAN
NormalizationLayer
false
30
[ "BSD-3-Clause" ]
0
e30e54c18265c658a65b1b26b57b4f499b58bfc6
https://github.com/AnetaKaczynska/video-GAN/tree/e30e54c18265c658a65b1b26b57b4f499b58bfc6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, epsilon=1e-08): return x * ((x ** 2).mean(dim=1, keepdim=True) + epsilon).rsqrt() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): retu...
Decoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Decoder(nn.Module): def __init__(self, z_dim): super(Decoder, self).__init__() self.deconv1 = nn.ConvTranspose2d(z_dim, 128, kernel_size=4, stride =1, bias=False) self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
AbinavRavi/VAE-KL
Decoder
false
31
[ "Apache-2.0" ]
0
af7a44b7952c2e5e1be4f3ffa12a3d859f4f4bdc
https://github.com/AbinavRavi/VAE-KL/tree/af7a44b7952c2e5e1be4f3ffa12a3d859f4f4bdc
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, z_dim): super().__init__() self.deconv1 = nn.ConvTranspose2d(z_dim, 128, kernel_size=4, stride =1, bias=False) self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=...
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class LayerNorm(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if s...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dy...
AndrewPaulChester/sage-code
LayerNorm
false
32
[ "MIT" ]
0
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
https://github.com/AndrewPaulChester/sage-code/tree/9fe676bfbcbc6f642eca29b30a1027fba2a426a0
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self....
MaskedMSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class MaskedMSELoss(nn.Module): def __init__(self): super(MaskedMSELoss, self).__init__() self.loss = nn.MSELoss(reduction='sum') def forward(self, pred, target, mask): """ pred -> batch*seq_len target -> batch*seq_len mask -...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
Anshul044/Project-NN
MaskedMSELoss
false
33
[ "MIT" ]
0
ef080846715a95b735f0381e4f60742e40791630
https://github.com/Anshul044/Project-NN/tree/ef080846715a95b735f0381e4f60742e40791630
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.loss = nn.MSELoss(reduction='sum') def forward(self, pred, target, mask): """ pred -> batch*seq_len target -> batch*seq_len mask -> batch*seq_len """...
SimpleMLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F import torch.utils.data class SimpleMLP(nn.Module): def __init__(self, n_inputs, n_outputs, dropout_probability): super(SimpleMLP, self).__init__() self.n_inputs = n_inputs self.n_outputs = n_outputs self.dropout_pr...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import t...
AlexandreGuilbault/CS294-Hws
SimpleMLP
false
34
[ "MIT" ]
0
2810323c1560949707b71843039ff7c88ae7e596
https://github.com/AlexandreGuilbault/CS294-Hws/tree/2810323c1560949707b71843039ff7c88ae7e596
import torch from torch import nn import torch.nn.functional as F import torch.utils.data class Model(nn.Module): def __init__(self, n_inputs, n_outputs, dropout_probability): super().__init__() self.n_inputs = n_inputs self.n_outputs = n_outputs self.dropout_probability = dropout...
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AlbertiPot/attention-is-all-you-need-pytorch
MultiHeadAttention
false
35
[ "MIT" ]
0
c5ec40907db281b85b3bd7a5dd8016940291add0
https://github.com/AlbertiPot/attention-is-all-you-need-pytorch/tree/c5ec40907db281b85b3bd7a5dd8016940291add0
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as tnn class Net(tnn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = tnn.Conv2d(3, 6, 5) self.pool = tnn.MaxPool2d(2, 2) self.conv2 = tnn.Conv2d(6, 16, 5) self.fc1 = tnn.Linear(16 * 5 * 5, 120) self.fc2 = tnn.Linea...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as tnn assert...
AbbasMZ/jittor
Net
false
36
[ "Apache-2.0" ]
0
fcec57f70422b52d6b8d0235e29f91fd2212f559
https://github.com/AbbasMZ/jittor/tree/fcec57f70422b52d6b8d0235e29f91fd2212f559
import torch import torch.nn as tnn class Model(tnn.Module): def __init__(self): super().__init__() self.conv1 = tnn.Conv2d(3, 6, 5) self.pool = tnn.MaxPool2d(2, 2) self.conv2 = tnn.Conv2d(6, 16, 5) self.fc1 = tnn.Linear(16 * 5 * 5, 120) self.fc2 = tnn.Linear(120, ...
EncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AlbertiPot/attention-is-all-you-need-pytorch
EncoderLayer
false
37
[ "MIT" ]
0
c5ec40907db281b85b3bd7a5dd8016940291add0
https://github.com/AlbertiPot/attention-is-all-you-need-pytorch/tree/c5ec40907db281b85b3bd7a5dd8016940291add0
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
Mish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class MishAutoFn(torch.autograd.Function): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 Experimental memory-efficient variant """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.gua...
AnikinNN/AI-58
Mish
false
38
[ "MIT" ]
0
ca3e3e7fc4551cf23d7f76a3d4edaf0c625883b9
https://github.com/AnikinNN/AI-58/tree/ca3e3e7fc4551cf23d7f76a3d4edaf0c625883b9
import torch from torch import nn class MishAutoFn(torch.autograd.Function): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 Experimental memory-efficient variant """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) ...
SparseDownSampleClose
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class SparseDownSampleClose(nn.Module): def __init__(self, stride): super(SparseDownSampleClose, self).__init__() self.pooling = nn.MaxPool2d(stride, stride) self.large_number = 600 ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.asser...
Anonymous1234321/GuideFormer
SparseDownSampleClose
false
39
[ "MIT" ]
0
cccee1c5305977a1bc8d0b8df3f1b6ff66bd1736
https://github.com/Anonymous1234321/GuideFormer/tree/cccee1c5305977a1bc8d0b8df3f1b6ff66bd1736
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class Model(nn.Module): def __init__(self, stride): super().__init__() self.pooling = nn.MaxPool2d(stride, stride) self.large_number = 600 def forward(self, d, mask): encode...
ScalarScaleBias
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init class ScalarScaleBias(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super(ScalarScaleBias, self).__init__() if scale: self.weig...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert...
Ali-Homsi/githubrepo
ScalarScaleBias
false
40
[ "Apache-2.0" ]
0
7163f110193142a97ec05f76ff7d897c6cedb915
https://github.com/Ali-Homsi/githubrepo/tree/7163f110193142a97ec05f76ff7d897c6cedb915
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init class Model(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super().__init__() if scale: self.weight = Parameter(torch.Tensor(1))...
NormActivation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch class NormActivation(torch.nn.Module): def __init__(self, dim=-1): super().__init__() self.dim = dim def forward(self, tensor): tensor = tensor ** 2 length = tensor.sum(dim=self.dim, keepdim=True) return tensor / length def get_inputs(): return [tor...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.j...
AnimeshSinha1309/qroute-router
NormActivation
false
41
[ "MIT" ]
0
56ef1b6c08e721011e1f73a04b0b0229f7bdff1b
https://github.com/AnimeshSinha1309/qroute-router/tree/56ef1b6c08e721011e1f73a04b0b0229f7bdff1b
import torch class Model(torch.nn.Module): def __init__(self, dim=-1): super().__init__() self.dim = dim def forward(self, tensor): tensor = tensor ** 2 length = tensor.sum(dim=self.dim, keepdim=True) return tensor / length def get_inputs(): return [torch.rand([...
BertLayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.onnx class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Paramete...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.gu...
Alwaysproblem/examples-1
BertLayerNorm
false
42
[ "MIT" ]
0
9754fa63ed1931489a21ac1f5b299f945e369a5c
https://github.com/Alwaysproblem/examples-1/tree/9754fa63ed1931489a21ac1f5b299f945e369a5c
import torch from torch import nn import torch.onnx class Model(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) ...
Upscale2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn def upscale2d(x, factor=2, gain=1): assert x.dim() == 4 if gain != 1: x = x * gain if factor != 1: shape = x.shape x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor) x = x.contiguous(...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_str...
AnimeshKoratana/blurryface
Upscale2d
false
43
[ "Apache-2.0" ]
0
c6cb5feec02f6d5af3acb1678336800390715d65
https://github.com/AnimeshKoratana/blurryface/tree/c6cb5feec02f6d5af3acb1678336800390715d65
import torch from torch import nn def upscale2d(x, factor=2, gain=1): assert x.dim() == 4 if gain != 1: x = x * gain if factor != 1: shape = x.shape x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor) x = x.contiguous(...
AdaIN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn from numpy import prod def getLayerNormalizationFactor(x): """ Get He's constant for the given layer https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf """ size = x.weight.size() fan_in = pro...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AnetaKaczynska/video-GAN
AdaIN
false
44
[ "BSD-3-Clause" ]
0
e30e54c18265c658a65b1b26b57b4f499b58bfc6
https://github.com/AnetaKaczynska/video-GAN/tree/e30e54c18265c658a65b1b26b57b4f499b58bfc6
import math import torch import torch.nn as nn from numpy import prod def getLayerNormalizationFactor(x): """ Get He's constant for the given layer https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf """ size = x.weight.size() fan_in = pro...
maxout
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class maxout(nn.Module): """ maxout network """ def __init__(self, in_feature, out_feature, pool_size): super(maxout, self).__init__() self.in_feature = in_feature self.out_feature = out_feature self.pool_size ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
Angelinaa/KOBE
maxout
false
45
[ "MIT" ]
0
4d25487051e2791a977e59297f70a25e51806466
https://github.com/Angelinaa/KOBE/tree/4d25487051e2791a977e59297f70a25e51806466
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ maxout network """ def __init__(self, in_feature, out_feature, pool_size): super().__init__() self.in_feature = in_feature self.out_feature = out_feature self.pool_size = pool_size ...
DecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AlbertiPot/attention-is-all-you-need-pytorch
DecoderLayer
false
46
[ "MIT" ]
0
c5ec40907db281b85b3bd7a5dd8016940291add0
https://github.com/AlbertiPot/attention-is-all-you-need-pytorch/tree/c5ec40907db281b85b3bd7a5dd8016940291add0
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropo...
Joiner
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F class Joiner(nn.Module): def __init__(self, x_latent_dim, y_latent_dim, hidden_dim): super().__init__() self.fc1 = nn.Linear(x_latent_dim + y_latent_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, 1) def forward...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
Andrewzh112/experiments
Joiner
false
47
[ "MIT" ]
0
a35fd9e6157cd9a746f82229c2487539f668716a
https://github.com/Andrewzh112/experiments/tree/a35fd9e6157cd9a746f82229c2487539f668716a
import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, x_latent_dim, y_latent_dim, hidden_dim): super().__init__() self.fc1 = nn.Linear(x_latent_dim + y_latent_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, 1) def forward(...
NoiseLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class NoiseLayer(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(se...
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C....
AnimeshKoratana/blurryface
NoiseLayer
false
49
[ "Apache-2.0" ]
0
c6cb5feec02f6d5af3acb1678336800390715d65
https://github.com/AnimeshKoratana/blurryface/tree/c6cb5feec02f6d5af3acb1678336800390715d65
import torch from torch import nn class Model(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x...
resblock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class mfm(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1): super(mfm, self).__init__() self.out_channels = out_channels if type == 1: self.filter = nn.Conv2d(in_channels, 2 * out_ch...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
AnimeshKoratana/blurryface
resblock
false
50
[ "Apache-2.0" ]
0
c6cb5feec02f6d5af3acb1678336800390715d65
https://github.com/AnimeshKoratana/blurryface/tree/c6cb5feec02f6d5af3acb1678336800390715d65
import torch from torch import nn class mfm(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1): super().__init__() self.out_channels = out_channels if type == 1: self.filter = nn.Conv2d(in_channels, 2 * out_channels, ...
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class PositionwiseFeedForward(nn.Module): """ Point-wise Feed-Forward NN, FFN, in fact 1-d convolution """ def __init__(self, d_model, d_ff, dropout=0.1): """ initialization of required functions :param d_model: model size ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Angelinaa/KOBE
PositionwiseFeedForward
false
51
[ "MIT" ]
0
4d25487051e2791a977e59297f70a25e51806466
https://github.com/Angelinaa/KOBE/tree/4d25487051e2791a977e59297f70a25e51806466
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ Point-wise Feed-Forward NN, FFN, in fact 1-d convolution """ def __init__(self, d_model, d_ff, dropout=0.1): """ initialization of required functions :param d_model: model size :param d_ff: ...
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class SelfAttention(nn.Module): def __init__(self, hidden_size, attention_size=100, n_attention_heads=1): super().__init__() self.hidden_size = hidden_size self.attention_size = attention_size self.n_attention_head...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AnoushkaVyas/TextOutlierDetection
SelfAttention
false
52
[ "MIT" ]
0
290a6800262090998d32c8bbd311e3d53737e2cd
https://github.com/AnoushkaVyas/TextOutlierDetection/tree/290a6800262090998d32c8bbd311e3d53737e2cd
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hidden_size, attention_size=100, n_attention_heads=1): super().__init__() self.hidden_size = hidden_size self.attention_size = attention_size self.n_attention_heads = n_at...
RelativeAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Attention(nn.Module): def __init__(self, heads, n_state): super().__init__() assert n_state % heads == 0 self.heads = heads self.n_state = n_state self.depth = self.n_state // self.heads def split_heads(self, x: 'torch.Tensor',...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Aalanli/MusicGeneration
RelativeAttention
false
53
[ "MIT" ]
0
7d268322d692013d8ac6e70be31741cea519fa28
https://github.com/Aalanli/MusicGeneration/tree/7d268322d692013d8ac6e70be31741cea519fa28
import torch import torch.nn as nn class Attention(nn.Module): def __init__(self, heads, n_state): super().__init__() assert n_state % heads == 0 self.heads = heads self.n_state = n_state self.depth = self.n_state // self.heads def split_heads(self, x: 'torch.Tensor',...
GeometryFeature
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class GeometryFeature(nn.Module): def __init__(self): super(GeometryFeature, self).__init__() def forward(self, z, vnorm, unorm, h, w, ch, cw, fh, fw): x = z * (0.5 * h * (vnorm + 1) - ch) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.asser...
Anonymous1234321/GuideFormer
GeometryFeature
false
55
[ "MIT" ]
0
cccee1c5305977a1bc8d0b8df3f1b6ff66bd1736
https://github.com/Anonymous1234321/GuideFormer/tree/cccee1c5305977a1bc8d0b8df3f1b6ff66bd1736
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class Model(nn.Module): def __init__(self): super().__init__() def forward(self, z, vnorm, unorm, h, w, ch, cw, fh, fw): x = z * (0.5 * h * (vnorm + 1) - ch) / fh y = z * (0.5 * w *...
group
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class mfm(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1): super(mfm, self).__init__() self.out_channels = out_channels if type == 1: self.filter = nn.Conv2d(in_channels, 2 * out_ch...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
AnimeshKoratana/blurryface
group
false
57
[ "Apache-2.0" ]
0
c6cb5feec02f6d5af3acb1678336800390715d65
https://github.com/AnimeshKoratana/blurryface/tree/c6cb5feec02f6d5af3acb1678336800390715d65
import torch from torch import nn class mfm(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1): super().__init__() self.out_channels = out_channels if type == 1: self.filter = nn.Conv2d(in_channels, 2 * out_channels, ...
ToHalf
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.onnx class ToHalf(torch.nn.Module): def forward(self, tensor): return tensor.half() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_stride...
Alwaysproblem/examples-1
ToHalf
false
58
[ "MIT" ]
0
9754fa63ed1931489a21ac1f5b299f945e369a5c
https://github.com/Alwaysproblem/examples-1/tree/9754fa63ed1931489a21ac1f5b299f945e369a5c
import torch import torch.onnx class Model(torch.nn.Module): def forward(self, tensor): return tensor.half() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
mfm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class mfm(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1): super(mfm, self).__init__() self.out_channels = out_channels if type == 1: self.filter = nn.Conv2d(in_channels, 2 * out_ch...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
AnimeshKoratana/blurryface
mfm
false
59
[ "Apache-2.0" ]
0
c6cb5feec02f6d5af3acb1678336800390715d65
https://github.com/AnimeshKoratana/blurryface/tree/c6cb5feec02f6d5af3acb1678336800390715d65
import torch from torch import nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1): super().__init__() self.out_channels = out_channels if type == 1: self.filter = nn.Conv2d(in_channels, 2 * out_channels,...
SimpleAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class SimpleAttention(nn.Module): def __init__(self, input_dim): super(SimpleAttention, self).__init__() self.input_dim = input_dim self.scalar = nn.Linear(self.input_dim, 1, bias=False) def forward(self, M, x=None): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Anshul044/Project-NN
SimpleAttention
false
60
[ "MIT" ]
0
ef080846715a95b735f0381e4f60742e40791630
https://github.com/Anshul044/Project-NN/tree/ef080846715a95b735f0381e4f60742e40791630
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_dim): super().__init__() self.input_dim = input_dim self.scalar = nn.Linear(self.input_dim, 1, bias=False) def forward(self, M, x=None): """ M -> (seq_l...
MultiLevelPooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class MultiLevelPooling(nn.Module): def __init__(self, levels=[1, 2, 4]): super(MultiLevelPooling, self).__init__() self.Pools = nn.ModuleList([nn.MaxPool2d(i) for i in levels]) def forward(self, x): assert len(x.size()) == 4, '输入形状不满足(n,c,w,w)' ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
Asichurter/Few-Shot-Project
MultiLevelPooling
false
61
[ "MIT" ]
0
865cd6aa7b996c518dfa48dcc9ffad90445f9efe
https://github.com/Asichurter/Few-Shot-Project/tree/865cd6aa7b996c518dfa48dcc9ffad90445f9efe
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, levels=[1, 2, 4]): super().__init__() self.Pools = nn.ModuleList([nn.MaxPool2d(i) for i in levels]) def forward(self, x): assert len(x.size()) == 4, '输入形状不满足(n,c,w,w)' n = x.size(0) c = x.si...
Linear_soft_plus
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Linear_soft_plus(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.Softplus() def forward(self, x): out = self.linear(x) out ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math im...
Armand-Morin/AutoML
Linear_soft_plus
false
62
[ "MIT" ]
0
189867e2c7734d9afb87a9f51fd42bd6cc527a64
https://github.com/Armand-Morin/AutoML/tree/189867e2c7734d9afb87a9f51fd42bd6cc527a64
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.Softplus() def forward(self, x): out = self.linear(x) out = self.acti...
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn.functional as F class ContrastiveLoss(torch.nn.Module): def __init__(self, margin=0.99): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._...
AssassionXY/HOR
ContrastiveLoss
false
63
[ "Apache-2.0" ]
0
a4c91d90a59eb2b144d827afff626b7eac907320
https://github.com/AssassionXY/HOR/tree/a4c91d90a59eb2b144d827afff626b7eac907320
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, margin=0.99): super().__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch....
Linear_tanh
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Linear_tanh(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.Tanh() def forward(self, x): out = self.linear(x) out = self.ac...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Armand-Morin/AutoML
Linear_tanh
false
64
[ "MIT" ]
0
189867e2c7734d9afb87a9f51fd42bd6cc527a64
https://github.com/Armand-Morin/AutoML/tree/189867e2c7734d9afb87a9f51fd42bd6cc527a64
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.Tanh() def forward(self, x): out = self.linear(x) out = self.activati...
Linear_leaky_relu
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Linear_leaky_relu(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.LeakyReLU() def forward(self, x): out = self.linear(x) ou...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Armand-Morin/AutoML
Linear_leaky_relu
false
65
[ "MIT" ]
0
189867e2c7734d9afb87a9f51fd42bd6cc527a64
https://github.com/Armand-Morin/AutoML/tree/189867e2c7734d9afb87a9f51fd42bd6cc527a64
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.LeakyReLU() def forward(self, x): out = self.linear(x) out = self.act...
Conv2dBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = mome...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import t...
Arthur1511/CAD-COVID
Conv2dBlock
false
66
[ "MIT" ]
0
daab5d70b9f811da41f702e92179a15ca4809fa5
https://github.com/Arthur1511/CAD-COVID/tree/daab5d70b9f811da41f702e92179a15ca4809fa5
import torch from torch import nn import torch.nn.functional as F class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super().__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = N...
LinearBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parame...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
Arthur1511/CAD-COVID
LinearBlock
false
67
[ "MIT" ]
0
daab5d70b9f811da41f702e92179a15ca4809fa5
https://github.com/Arthur1511/CAD-COVID/tree/daab5d70b9f811da41f702e92179a15ca4809fa5
import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tenso...
Linear_sigmoid
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Linear_sigmoid(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.Sigmoid() def forward(self, x): out = self.linear(x) out = s...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Armand-Morin/AutoML
Linear_sigmoid
false
68
[ "MIT" ]
0
189867e2c7734d9afb87a9f51fd42bd6cc527a64
https://github.com/Armand-Morin/AutoML/tree/189867e2c7734d9afb87a9f51fd42bd6cc527a64
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim_in, dim_out, bias=True): super().__init__() self.linear = nn.Linear(dim_in, dim_out, bias=bias) self.activation = nn.Sigmoid() def forward(self, x): out = self.linear(x) out = self.activ...
BiAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F class BiAttention(nn.Module): def __init__(self, input_size, dropout): super().__init__() self.dropout = nn.Dropout(p=dropout) self.input_linear = nn.Linear(input_size, 1, bias=False) self.m...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Angelinaa/KOBE
BiAttention
false
69
[ "MIT" ]
0
4d25487051e2791a977e59297f70a25e51806466
https://github.com/Angelinaa/KOBE/tree/4d25487051e2791a977e59297f70a25e51806466
import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_size, dropout): super().__init__() self.dropout = nn.Dropout(p=dropout) self.input_linear = nn.Linear(input_size, 1, bias=False) self.memory_...
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Anshul044/Project-NN
Attention
false
71
[ "MIT" ]
0
ef080846715a95b735f0381e4f60742e40791630
https://github.com/Anshul044/Project-NN/tree/ef080846715a95b735f0381e4f60742e40791630
import math import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: ...
MaxPool2dDynamicSamePadding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch from torch import nn import torch.nn.functional as F class MaxPool2dDynamicSamePadding(nn.MaxPool2d): """2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size. The padding is operated in forward function by calculating dynamically. """ def __init__(self, k...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empt...
AustinCai/gmaxup-augmentation
MaxPool2dDynamicSamePadding
false
72
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
import math import torch from torch import nn import torch.nn.functional as F class Model(nn.MaxPool2d): """2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size. The padding is operated in forward function by calculating dynamically. """ def __init__(self, kernel_size, stride, pa...
GroupScaling1D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class GroupScaling1D(nn.Module): """Scales inputs by the second moment for the entire layer.""" def __init__(self, eps=1e-05, group_num=4): super(GroupScaling1D, self).__init__() self.eps = eps self.group_num = group_num def extra_repr(self): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Azerrroth/spacetimeformer
GroupScaling1D
false
73
[ "MIT" ]
0
e822444a6d696a1edb9e446d6f3482a70681be3c
https://github.com/Azerrroth/spacetimeformer/tree/e822444a6d696a1edb9e446d6f3482a70681be3c
import torch from torch import nn class Model(nn.Module): """Scales inputs by the second moment for the entire layer.""" def __init__(self, eps=1e-05, group_num=4): super().__init__() self.eps = eps self.group_num = group_num def extra_repr(self): return f'eps={self.eps},...
adaModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import Tensor import torch.nn as nn from torch.nn import Parameter class adaConv2d(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'int', stride: 'int'=1, padding: 'int'=0, dilation: 'int'=1, bias: 'bool'=True): supe...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math i...
Artem531/pytorch-unet
adaModule
false
74
[ "MIT" ]
0
a8048f88f34a59f12f7f74735f03cf3c111a8415
https://github.com/Artem531/pytorch-unet/tree/a8048f88f34a59f12f7f74735f03cf3c111a8415
import math import torch from torch import Tensor import torch.nn as nn from torch.nn import Parameter class adaConv2d(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'int', stride: 'int'=1, padding: 'int'=0, dilation: 'int'=1, bias: 'bool'=True): supe...
SppPooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch as t import torch.nn as nn class SppPooling(nn.Module): def __init__(self, levels=[1, 2, 4]): super(SppPooling, self).__init__() self.Pools = nn.ModuleList([nn.AdaptiveMaxPool2d((i, i)) for i in levels]) def forward(self, x): assert len(x.size())...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
Asichurter/Few-Shot-Project
SppPooling
false
75
[ "MIT" ]
0
865cd6aa7b996c518dfa48dcc9ffad90445f9efe
https://github.com/Asichurter/Few-Shot-Project/tree/865cd6aa7b996c518dfa48dcc9ffad90445f9efe
import torch import torch as t import torch.nn as nn class Model(nn.Module): def __init__(self, levels=[1, 2, 4]): super().__init__() self.Pools = nn.ModuleList([nn.AdaptiveMaxPool2d((i, i)) for i in levels]) def forward(self, x): assert len(x.size()) == 4, '输入形状不满足(n,c,w...
SurfaceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class SurfaceLoss(nn.Module): def __init__(self, epsilon=1e-05, softmax=True): super(SurfaceLoss, self).__init__() self.weight_map = [] def forward(self, x, distmap): x = torch.softmax(x, dim=1) self.weight_map = distmap score = x.fl...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
ArmandB/RITnet
SurfaceLoss
false
76
[ "MIT" ]
0
afe9524fdd795982c7e52761da68af2dfda589ea
https://github.com/ArmandB/RITnet/tree/afe9524fdd795982c7e52761da68af2dfda589ea
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, epsilon=1e-05, softmax=True): super().__init__() self.weight_map = [] def forward(self, x, distmap): x = torch.softmax(x, dim=1) self.weight_map = distmap score = x.flatten(start_dim=2) * di...
SmoothCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
from torch.nn import Module import torch from torch.nn.modules.module import Module def cross_entropy(input, target, size_average=True): """ Cross entropy that accepts soft targets Args: pred: predictions for neural network targets: targets, can be soft size_average: if false, sum i...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import M...
AustinCai/gmaxup-augmentation
SmoothCrossEntropyLoss
false
77
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
from torch.nn import Module import torch from torch.nn.modules.module import Module def cross_entropy(input, target, size_average=True): """ Cross entropy that accepts soft targets Args: pred: predictions for neural network targets: targets, can be soft size_average: if false, sum i...
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parame...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Arthur1511/CAD-COVID
LayerNorm
false
78
[ "MIT" ]
0
daab5d70b9f811da41f702e92179a15ca4809fa5
https://github.com/Arthur1511/CAD-COVID/tree/daab5d70b9f811da41f702e92179a15ca4809fa5
import torch from torch import nn class Model(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(nu...
Conv2dDynamicSamePadding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn import torch.nn.functional as F class Conv2dDynamicSamePadding(nn.Conv2d): """2D Convolutions like TensorFlow, for a dynamic image size. The padding is operated in forward function by calculating dynamically. """ def __init__(self, in_channels, out_cha...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_st...
AustinCai/gmaxup-augmentation
Conv2dDynamicSamePadding
false
79
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
import math import torch from torch import nn import torch.nn.functional as F class Model(nn.Conv2d): """2D Convolutions like TensorFlow, for a dynamic image size. The padding is operated in forward function by calculating dynamically. """ def __init__(self, in_channels, out_channels, kernel_size,...
CasualConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class CasualConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super(CasualConv1d, self).__init__() self.dilation = dilation padding = dilation * (kernel_size - 1) sel...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Asichurter/Few-Shot-Project
CasualConv1d
false
80
[ "MIT" ]
0
865cd6aa7b996c518dfa48dcc9ffad90445f9efe
https://github.com/Asichurter/Few-Shot-Project/tree/865cd6aa7b996c518dfa48dcc9ffad90445f9efe
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super().__init__() self.dilation = dilation padding = dilation * (kernel_size - 1) self.conv1d = nn.Conv1d(in_c...
DenseBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class CasualConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super(CasualConv1d, self).__init__() self.dilation = dilation padding = dilation * (kernel_size - 1) sel...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Asichurter/Few-Shot-Project
DenseBlock
false
81
[ "MIT" ]
0
865cd6aa7b996c518dfa48dcc9ffad90445f9efe
https://github.com/Asichurter/Few-Shot-Project/tree/865cd6aa7b996c518dfa48dcc9ffad90445f9efe
import torch import torch.nn as nn class CasualConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super().__init__() self.dilation = dilation padding = dilation * (kernel_size - 1) self.conv1d = nn.Conv...
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.onnx class Block(nn.Module): def __init__(self, in_channels, num_filters, kernel_size, pool_size): super(Block, self).__init__() self.conv = nn.Conv2d(in_channels, num_filters, kernel_size=kernel_size ) self.pool = nn.MaxPool2d(ke...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import t...
Alwaysproblem/examples-1
Block
false
82
[ "MIT" ]
0
9754fa63ed1931489a21ac1f5b299f945e369a5c
https://github.com/Alwaysproblem/examples-1/tree/9754fa63ed1931489a21ac1f5b299f945e369a5c
import torch from torch import nn import torch.onnx class Model(nn.Module): def __init__(self, in_channels, num_filters, kernel_size, pool_size): super().__init__() self.conv = nn.Conv2d(in_channels, num_filters, kernel_size=kernel_size ) self.pool = nn.MaxPool2d(kernel_size=p...
Bc
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.utils class Bc(nn.Module): def __init__(self, nc): super(Bc, self).__init__() self.nn = nn.Linear(nc, 1) def forward(self, input): return torch.sigmoid(self.nn(input)) def get_inputs():...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data import to...
AyufhSri/GANAccImprover
Bc
false
83
[ "MIT" ]
0
eff3a944bd6e5d9761ec815f28c0d32c87096308
https://github.com/AyufhSri/GANAccImprover/tree/eff3a944bd6e5d9761ec815f28c0d32c87096308
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.utils class Model(nn.Module): def __init__(self, nc): super().__init__() self.nn = nn.Linear(nc, 1) def forward(self, input): return torch.sigmoid(self.nn(input)) def get_inputs(): ...
ATRCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ATRCell(nn.Module): def __init__(self, input_size, hidden_size): super(ATRCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self._W = nn.Parameter(torch.FloatTensor(input_size, hidden_size)) self._W_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Avmb/lm-robustness
ATRCell
false
84
[ "BSD-3-Clause" ]
0
b5417d9aac01bff0d2a56b506eabed899fd718d4
https://github.com/Avmb/lm-robustness/tree/b5417d9aac01bff0d2a56b506eabed899fd718d4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self._W = nn.Parameter(torch.FloatTensor(input_size, hidden_size)) self._W_b = nn.Paramete...
TimeEncode
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np class TimeEncode(torch.nn.Module): def __init__(self, dimension): super(TimeEncode, self).__init__() self.dimension = dimension self.w = torch.nn.Linear(1, dimension) self.w.weight = torch.nn.Parameter(torch.from_numpy(1 / 10 ** np. lins...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy ...
Awannaphasch2016/tgn
TimeEncode
false
85
[ "Apache-2.0" ]
0
a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
https://github.com/Awannaphasch2016/tgn/tree/a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
import torch import numpy as np class Model(torch.nn.Module): def __init__(self, dimension): super().__init__() self.dimension = dimension self.w = torch.nn.Linear(1, dimension) self.w.weight = torch.nn.Parameter(torch.from_numpy(1 / 10 ** np. linspace(0, 9, dimension)...
LargeNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F class LargeNN(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.l1 = nn.Linear(in_channels, 1024) self.l2 = nn.Linear(1024, 1024) self.l3 = nn.Linear(1024, out_channels) def for...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
AustinCai/gmaxup-augmentation
LargeNN
false
86
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.l1 = nn.Linear(in_channels, 1024) self.l2 = nn.Linear(1024, 1024) self.l3 = nn.Linear(1024, out_channels) def forwa...
LRNCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LRNCell(nn.Module): def __init__(self, input_size, hidden_size): super(LRNCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self._W = nn.Parameter(torch.FloatTensor(input_size, hidden_size * 3)) self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Avmb/lm-robustness
LRNCell
false
87
[ "BSD-3-Clause" ]
0
b5417d9aac01bff0d2a56b506eabed899fd718d4
https://github.com/Avmb/lm-robustness/tree/b5417d9aac01bff0d2a56b506eabed899fd718d4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self._W = nn.Parameter(torch.FloatTensor(input_size, hidden_size * 3)) self._W_b = nn.Para...
FeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class FeedForward(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super().__init__() self.linear_1 = nn.Linear(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = nn.Linear(d_ff, d_mo...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
AviVarma/torchASN-Transformer
FeedForward
false
88
[ "MIT" ]
0
55bccf4cdb099cd8e9ac99f5f87f989ce2add983
https://github.com/AviVarma/torchASN-Transformer/tree/55bccf4cdb099cd8e9ac99f5f87f989ce2add983
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super().__init__() self.linear_1 = nn.Linear(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = nn.Linear(d_ff, d_model) ...
MergeLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch class MergeLayer(torch.nn.Module): def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C...
Awannaphasch2016/tgn
MergeLayer
false
89
[ "Apache-2.0" ]
0
a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
https://github.com/Awannaphasch2016/tgn/tree/a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
import torch class Model(torch.nn.Module): def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) t...
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch class MLP(torch.nn.Module): def __init__(self, dim, drop=0.3): super().__init__() self.fc_1 = torch.nn.Linear(dim, 80) self.fc_2 = torch.nn.Linear(80, 10) self.fc_3 = torch.nn.Linear(10, 1) self.act = torch.nn.ReLU() self.dropout = torch.nn.Dropout(p=d...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C...
Awannaphasch2016/tgn
MLP
false
90
[ "Apache-2.0" ]
0
a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
https://github.com/Awannaphasch2016/tgn/tree/a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
import torch class Model(torch.nn.Module): def __init__(self, dim, drop=0.3): super().__init__() self.fc_1 = torch.nn.Linear(dim, 80) self.fc_2 = torch.nn.Linear(80, 10) self.fc_3 = torch.nn.Linear(10, 1) self.act = torch.nn.ReLU() self.dropout = torch.nn.Dropout(p...
DenseNet2D_up_block_concat
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class DenseNet2D_up_block_concat(nn.Module): def __init__(self, skip_channels, input_channels, output_channels, up_stride, dropout=False, prob=0): super(DenseNet2D_up_block_concat, self).__init__() self.conv11 = nn.Conv2d(skip_channels + input_channels, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
ArmandB/RITnet
DenseNet2D_up_block_concat
false
91
[ "MIT" ]
0
afe9524fdd795982c7e52761da68af2dfda589ea
https://github.com/ArmandB/RITnet/tree/afe9524fdd795982c7e52761da68af2dfda589ea
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, skip_channels, input_channels, output_channels, up_stride, dropout=False, prob=0): super().__init__() self.conv11 = nn.Conv2d(skip_channels + input_channels, output_channels, kernel_size=(1, 1), padd...
ScaleNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, x): n = torch.norm(x, dim=-1, keepdim=True).clamp(min=se...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_...
Azerrroth/spacetimeformer
ScaleNorm
false
92
[ "MIT" ]
0
e822444a6d696a1edb9e446d6f3482a70681be3c
https://github.com/Azerrroth/spacetimeformer/tree/e822444a6d696a1edb9e446d6f3482a70681be3c
import torch from torch import nn class Model(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, x): n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.e...
ShakeResNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn import torch.nn.functional as F from torch.autograd import Variable class ShakeShake(torch.autograd.Function): @staticmethod def forward(ctx, x1, x2, training=True): if training: alpha = torch.FloatTensor(x1.size(0)).uniform_() alp...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math from torch import...
AustinCai/gmaxup-augmentation
ShakeResNet
false
93
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
import math import torch from torch import nn import torch.nn.functional as F from torch.autograd import Variable class ShakeShake(torch.autograd.Function): @staticmethod def forward(ctx, x1, x2, training=True): if training: alpha = torch.FloatTensor(x1.size(0)).uniform_() alp...
ShakeResNeXt
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn import torch.nn.functional as F from torch.autograd import Variable class ShakeShake(torch.autograd.Function): @staticmethod def forward(ctx, x1, x2, training=True): if training: alpha = torch.FloatTensor(x1.size(0)).uniform_() alp...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math from torch import...
AustinCai/gmaxup-augmentation
ShakeResNeXt
false
94
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
import math import torch from torch import nn import torch.nn.functional as F from torch.autograd import Variable class ShakeShake(torch.autograd.Function): @staticmethod def forward(ctx, x1, x2, training=True): if training: alpha = torch.FloatTensor(x1.size(0)).uniform_() alp...
Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Norm(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.size = d_model self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
AviVarma/torchASN-Transformer
Norm
false
95
[ "MIT" ]
0
55bccf4cdb099cd8e9ac99f5f87f989ce2add983
https://github.com/AviVarma/torchASN-Transformer/tree/55bccf4cdb099cd8e9ac99f5f87f989ce2add983
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, d_model, eps=1e-06): super().__init__() self.size = d_model self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self,...
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F class ConvBlock(torch.nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv2d = torch.nn.Conv2d(in_channels=in_channels, out_channels =out_channels, kernel_size=3, padding=1) self.batchnorm2d = torc...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
ArmandNM/meta-learning
ConvBlock
false
96
[ "MIT" ]
0
173fcd4b929168e9bd7948581293020a3a932857
https://github.com/ArmandNM/meta-learning/tree/173fcd4b929168e9bd7948581293020a3a932857
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv2d = torch.nn.Conv2d(in_channels=in_channels, out_channels =out_channels, kernel_size=3, padding=1) self.batchnorm2d = torch.nn...
L2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class L2(nn.Module): def __init__(self): super(L2, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output - target, p=2, dim=1).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
B06901052/deep-stabilization
L2
false
97
[ "Apache-2.0" ]
0
b6030b463cf1f1128660e900669f43e742aa2651
https://github.com/B06901052/deep-stabilization/tree/b6030b463cf1f1128660e900669f43e742aa2651
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, output, target): lossvalue = torch.norm(output - target, p=2, dim=1).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4,...
MLP_multiple_class
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch class MLP_multiple_class(torch.nn.Module): def __init__(self, dim, n_labels, drop=0.3): super().__init__() self.fc_1 = torch.nn.Linear(dim, 80) self.fc_2 = torch.nn.Linear(80, 10) self.fc_3 = torch.nn.Linear(10, n_labels) self.act = torch.nn.ReLU() sel...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C...
Awannaphasch2016/tgn
MLP_multiple_class
false
98
[ "Apache-2.0" ]
0
a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
https://github.com/Awannaphasch2016/tgn/tree/a0eb4b4759cb44e053dfb6a825ccac1d54dba33f
import torch class Model(torch.nn.Module): def __init__(self, dim, n_labels, drop=0.3): super().__init__() self.fc_1 = torch.nn.Linear(dim, 80) self.fc_2 = torch.nn.Linear(80, 10) self.fc_3 = torch.nn.Linear(10, n_labels) self.act = torch.nn.ReLU() self.dropout = t...
L1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, output, target): lossvalue = torch.abs(output - target).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
B06901052/deep-stabilization
L1
false
99
[ "Apache-2.0" ]
0
b6030b463cf1f1128660e900669f43e742aa2651
https://github.com/B06901052/deep-stabilization/tree/b6030b463cf1f1128660e900669f43e742aa2651
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, output, target): lossvalue = torch.abs(output - target).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] de...
SmallNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F class SmallNN(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.l1 = nn.Linear(in_channels, 32) self.l2 = nn.Linear(32, 32) self.l3 = nn.Linear(32, out_channels) def forward(sel...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
AustinCai/gmaxup-augmentation
SmallNN
false
100
[ "MIT" ]
0
a64ca0a76eb333e5ce6b217c301d27ca04d73bce
https://github.com/AustinCai/gmaxup-augmentation/tree/a64ca0a76eb333e5ce6b217c301d27ca04d73bce
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.l1 = nn.Linear(in_channels, 32) self.l2 = nn.Linear(32, 32) self.l3 = nn.Linear(32, out_channels) def forward(self,...
LinearModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class LinearModel(nn.Module): def __init__(self, context_points: 'int'): super().__init__() self.window = context_points self.linear = nn.Linear(context_points, 1) def forward(self, y_c): _bs, _length, d_y = y_c.shape inp = y_c[:, -se...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_st...
Azerrroth/spacetimeformer
LinearModel
false
101
[ "MIT" ]
0
e822444a6d696a1edb9e446d6f3482a70681be3c
https://github.com/Azerrroth/spacetimeformer/tree/e822444a6d696a1edb9e446d6f3482a70681be3c
import torch from torch import nn class Model(nn.Module): def __init__(self, context_points: 'int'): super().__init__() self.window = context_points self.linear = nn.Linear(context_points, 1) def forward(self, y_c): _bs, _length, d_y = y_c.shape inp = y_c[:, -self.win...
AdMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class AdMSoftmaxLoss(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLoss, self).__init__() self.s = s self.m = m self.in_fe...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
AdMSoftmaxLoss
false
102
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super().__init__() self.s = s self.m = m self.in_features = in_features ...
OutConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class OutConv(nn.Module): def __init__(self, in_channels, out_channels): super(OutConv, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch....
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
B06901052/deep-stabilization
OutConv
false
103
[ "Apache-2.0" ]
0
b6030b463cf1f1128660e900669f43e742aa2651
https://github.com/B06901052/deep-stabilization/tree/b6030b463cf1f1128660e900669f43e742aa2651
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, ...
Follow_loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch.autograd import Variable def torch_norm_quat(quat, USE_CUDA=True): batch_size = quat.size()[0] quat_out = Variable(torch.zeros((batch_size, 4), requires_grad=True)) if USE_CUDA is True: quat_out = quat_out for i in range(batch_size): norm_quat = torch.norm(quat[...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_s...
B06901052/deep-stabilization
Follow_loss
false
104
[ "Apache-2.0" ]
0
b6030b463cf1f1128660e900669f43e742aa2651
https://github.com/B06901052/deep-stabilization/tree/b6030b463cf1f1128660e900669f43e742aa2651
import torch from torch.autograd import Variable def torch_norm_quat(quat, USE_CUDA=True): batch_size = quat.size()[0] quat_out = Variable(torch.zeros((batch_size, 4), requires_grad=True)) if USE_CUDA is True: quat_out = quat_out for i in range(batch_size): norm_quat = torch.norm(quat[...
Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(Model, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, features): pooled = features.mean(dim=1) predicted = self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
B06901052/s3prl
Model
false
105
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(Model, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, features): pooled = features.mean(dim=1) predicted = self...
ChannelNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ChannelNorm(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNorm, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
B06901052/s3prl
ChannelNorm
false
106
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super().__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(to...
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.nn.functional as F def attention(q, k, v, d_k, mask=None, dropout=None): scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1000000000.0...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
AviVarma/torchASN-Transformer
MultiHeadAttention
false
107
[ "MIT" ]
0
55bccf4cdb099cd8e9ac99f5f87f989ce2add983
https://github.com/AviVarma/torchASN-Transformer/tree/55bccf4cdb099cd8e9ac99f5f87f989ce2add983
import math import torch import torch.nn as nn import torch.nn.functional as F def attention(q, k, v, d_k, mask=None, dropout=None): scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1000000000.0...
GatedConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch class MaskedConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch a...
B0BBB/seq2seq.pytorch
GatedConv1d
false
108
[ "MIT" ]
0
54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
https://github.com/B0BBB/seq2seq.pytorch/tree/54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch class MaskedConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation ...
LayerNorm1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch class LayerNorm1d(nn.Module): def __init__(self, num_features, eps=1e-06, affine=True): super(LayerNorm1d, self).__init__() self.eps = eps self.num_features = num_features self.affine = aff...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch ...
B0BBB/seq2seq.pytorch
LayerNorm1d
false
109
[ "MIT" ]
0
54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
https://github.com/B0BBB/seq2seq.pytorch/tree/54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch class Model(nn.Module): def __init__(self, num_features, eps=1e-06, affine=True): super().__init__() self.eps = eps self.num_features = num_features self.affine = affine if self.aff...