python_code
stringlengths
0
456k
from torch.optim.lr_scheduler import CosineAnnealingLR as _CosineAnnealingLR from colossalai.registry import LR_SCHEDULERS from .delayed import DelayerScheduler, WarmupDelayerScheduler, WarmupScheduler @LR_SCHEDULERS.register_module class CosineAnnealingLR(_CosineAnnealingLR): r"""Set the learning rate of each p...
from torch.optim.lr_scheduler import _LRScheduler from colossalai.registry import LR_SCHEDULERS @LR_SCHEDULERS.register_module class LinearWarmupLR(_LRScheduler): """Linearly warmup learning rate and then linearly decay. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer. to...
from torch.optim.lr_scheduler import OneCycleLR as _OneCycleLR from colossalai.registry import LR_SCHEDULERS @LR_SCHEDULERS.register_module class OneCycleLR(_OneCycleLR): r"""Sets the learning rate of each parameter group according to the 1cycle learning rate policy. The 1cycle policy anneals the learning ...
from typing import List from torch.optim.lr_scheduler import MultiStepLR as _MultiStepLR from colossalai.registry import LR_SCHEDULERS from .delayed import WarmupScheduler @LR_SCHEDULERS.register_module class MultiStepLR(_MultiStepLR): """Decays the learning rate of each parameter group by gamma once the nu...
from .cosine import CosineAnnealingLR, CosineAnnealingWarmupLR, FlatAnnealingLR, FlatAnnealingWarmupLR from .linear import LinearWarmupLR from .multistep import MultiStepLR, MultiStepWarmupLR from .onecycle import OneCycleLR from .poly import PolynomialLR, PolynomialWarmupLR from .torch import LambdaLR, MultiplicativeL...
from torch.optim.lr_scheduler import _LRScheduler from colossalai.registry import LR_SCHEDULERS from .delayed import WarmupScheduler @LR_SCHEDULERS.register_module class PolynomialLR(_LRScheduler): """Polynomial learning rate scheduler. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optim...
from torch.optim.lr_scheduler import _LRScheduler class _enable_get_lr_call: def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False ...
from torch.optim.lr_scheduler import LambdaLR as _LambdaLR from torch.optim.lr_scheduler import MultiplicativeLR as _MultiplicativeLR from torch.optim.lr_scheduler import StepLR as _StepLR from torch.optim.lr_scheduler import ExponentialLR as _ExponentialLR from colossalai.registry import LR_SCHEDULERS @LR_SCHEDULER...
from typing import List, Optional import torch.nn.functional as F from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor import ColoTensor, distspec, ColoTensorSpec, ReplicaSpec from ._utils import GeneralTensor, convert_to_colo_tensor @colo_op_impl(F.layer_norm) def colo_layernorm( input_te...
import torch.nn.functional as F from typing import Optional from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor import ComputePattern, ColoTensorSpec, ComputePattern, ComputeSpec, ColoTensor, ShardSpec, \ ReplicaSpec from ._utils import GeneralTensor, convert_to_colo_tensor, reduce_input ...
from copy import deepcopy from typing import Optional import torch.nn.functional as F from colossalai.tensor import ColoTensor, ColoTensorSpec, ComputePattern, ComputeSpec, ReplicaSpec, ShardSpec from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor.sharding_spec import ShardingSpec from ._uti...
from .addmm import colo_addmm from .batch_norm import colo_batch_norm from .element_wise import * from .embedding import colo_embedding from .embedding_bag import colo_embedding_bag from .layernorm import colo_layernorm from .linear import colo_linear from .loss import colo_cross_entropy from .view import colo_view
import torch.nn.functional as F from typing import Optional from torch import Tensor from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor import ComputePattern, ComputePattern, ComputeSpec, ColoTensor, distspec, ColoTensorSpec, \ ShardSpec, ReplicaSpec from ._utils import GeneralTensor, conv...
import torch import torch.nn.functional as F from torch import Tensor from colossalai.tensor import ColoTensor, ColoTensorSpec from colossalai.tensor.op_wrapper import colo_op_impl from ._utils import GeneralTensor, convert_to_colo_tensor def register_elementwise_op(op): @colo_op_impl(op) def elementwise_o...
import torch import torch.nn.functional as F from typing import Optional from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor import ColoTensor, ColoTensorSpec from colossalai.nn.loss.loss_1d import VocabParallelCrossEntropyLoss1D from ._utils import GeneralTensor, convert_to_colo_tensor @colo...
import math import torch from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor import ColoTensor, ColoTensorSpec, ReplicaSpec from typing import Optional, Union def _all_int(my_iter): return all(isinstance(i, int) for i in my_iter) def _get_valid_shape(shape): if isinstance(shape, lis...
from typing import Optional import torch.nn.functional as F from colossalai.tensor import ColoTensor, ColoTensorSpec, ReplicaSpec from colossalai.tensor.op_wrapper import colo_op_impl from ._utils import GeneralTensor, convert_to_colo_tensor @colo_op_impl(F.batch_norm) def colo_batch_norm( input: GeneralTensor...
import torch from typing import Union, Optional, List from colossalai.tensor import ColoTensor import torch import torch.distributed as dist from colossalai.global_variables import tensor_parallel_env as env from colossalai.nn.layer.utils import divide from colossalai.tensor import ProcessGroup, ColoTensorSpec Genera...
import torch from colossalai.tensor.op_wrapper import colo_op_impl from colossalai.tensor import ComputePattern, ComputePattern, ComputeSpec, ColoTensor from colossalai.tensor import distspec, ColoTensorSpec, ShardSpec, ReplicaSpec from ._utils import GeneralTensor, Number, convert_to_colo_tensor from ._utils import re...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from contextlib import contextmanager class ParallelLayer(nn.Module): global_state_dict: bool = True def __init__(self): super().__init...
from .colossalai_layer import * from .parallel_1d import * from .parallel_2d import * from .parallel_2p5d import * from .parallel_3d import * from .parallel_sequence import * from .moe import * from .utils import * from .vanilla import * from .wrapper import *
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch import distributed as dist from colossalai.communication import ring_forward from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_sequence._utils import _cal...
from ._operation import RingQK, RingAV from .layers import TransformerSelfAttentionRing __all__ = ['TransformerSelfAttentionRing', 'RingAV', 'RingQK']
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import colossalai import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel...
#!/usr/bin/env python # -*- encoding: utf-8 -*- def _calc_incoming_device_range(i, rank, world_size, sub_seq_length): device_of_incoming_k = (rank - i - 1) % world_size start_idx = sub_seq_length * device_of_incoming_k end_idx = sub_seq_length * (device_of_incoming_k + 1) return start_idx, end_idx d...
import torch.nn as nn import torch.distributed as dist from typing import List, Tuple, Union from colossalai.context import ParallelMode from colossalai.core import global_context as gpc class PipelineSharedModuleWrapper: def __init__(self, pipeline_ranks: Union[List[int], Tuple[int]]) -> None: assert le...
from .pipeline_wrapper import PipelineSharedModuleWrapper __all__ = ['PipelineSharedModuleWrapper']
from typing import Any, Tuple import torch import torch.distributed as dist from colossalai.communication.collective import (all_gather, all_reduce, reduce_scatter) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import get_current_devic...
from ._operation import reduce_by_batch_2p5d, split_batch_2p5d from .layers import (Classifier2p5D, Embedding2p5D, LayerNorm2p5D, Linear2p5D, PatchEmbedding2p5D, VocabParallelClassifier2p5D, VocabParallelEmbedding2p5D) __all__ = [ 'split_batch_2p5d', 'reduce_by_batch_2p5d', 'Linear2p5D', 'Laye...
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from colossalai.communication import broadcast from colossalai.context import ParallelMode, seed from colossalai.core import global_context as gpc from colossalai.global_variab...
from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env def get_tesseract_dim_dep_from_env(): try: tesseract_dim = env.tesseract_dim tesseract_dep = env.tesseract_dep asse...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional, Tuple import torch from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter from colossalai.constants import INPUT_GROUP_3D, WE...
from ._operation import reduce_by_batch_3d, split_batch_3d, split_tensor_3d from .layers import (Classifier3D, Embedding3D, LayerNorm3D, Linear3D, PatchEmbedding3D, VocabParallelClassifier3D, VocabParallelEmbedding3D) __all__ = [ 'reduce_by_batch_3d', 'split_tensor_3d', 'split_batch_3d', 'Line...
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from colossalai.communication import all_reduce, broadcast from colossalai.constants import INPUT_GROUP_3D, INPUT_X_WEI...
from collections import OrderedDict from functools import partial import torch from torch import Tensor from colossalai.constants import INPUT_GROUP_3D, INPUT_X_WEIGHT_3D, OUTPUT_GROUP_3D, OUTPUT_X_WEIGHT_3D, WEIGHT_GROUP_3D from colossalai.core import global_context as gpc from colossalai.global_variables import ten...
from .common import (ACT2FN, CheckpointModule, _ntuple, divide, get_tensor_parallel_mode, set_tensor_parallel_attribute_by_partition, set_tensor_parallel_attribute_by_size, to_2tuple) __all__ = [ 'CheckpointModule', 'divide', 'ACT2FN', 'set_tensor_parallel_attribute_by_size', 'set_tensor_p...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import collections.abc from itertools import repeat import numpy as np import torch from colossalai.constants import IS_TENSOR_PARALLEL, NUM_PARTITIONS from colossalai.global_variables import tensor_parallel_env as env from colossalai.utils import checkpoint from torch ...
import torch import torch.distributed as dist from colossalai.core import global_context as gpc try: import fused_mix_prec_layer_norm_cuda except: fused_mix_prec_layer_norm_cuda = None class FusedLayerNormAffineFunction1D(torch.autograd.Function): r"""Layernorm Args: input: input matrix. ...
from .layers import (Classifier1D, Dropout1D, Embedding1D, LayerNorm1D, Linear1D, Linear1D_Col, Linear1D_Row, PatchEmbedding1D, VocabParallelClassifier1D, VocabParallelEmbedding1D) __all__ = [ 'Linear1D', 'Linear1D_Col', 'Linear1D_Row', 'Embedding1D', 'Dropout1D', 'Classifier1D', 'VocabParalle...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math from collections import OrderedDict from typing import Callable, Tuple import torch import torch.nn.functional as F from torch import Tensor from torch.nn.parameter import Parameter from colossalai.communication import broadcast from colossalai.context impo...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env from ..utils import divide def set_parallel_input(input_parallel: bool): env.parallel_input_1d = inpu...
from typing import Any, Optional, Tuple import torch import torch.distributed as dist from colossalai.communication.collective import (all_gather, all_reduce, reduce, reduce_scatter) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import...
from ._operation import reduce_by_batch_2d, split_batch_2d from .layers import (Classifier2D, Embedding2D, LayerNorm2D, Linear2D, PatchEmbedding2D, VocabParallelClassifier2D, VocabParallelEmbedding2D) __all__ = [ 'split_batch_2d', 'reduce_by_batch_2d', 'Linear2D', 'LayerNorm2D', 'Classifier2D'...
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from colossalai.communication import broadcast from colossalai.context import ParallelMode, seed from colossalai.core import global_context as gpc from colossalai.global_variab...
from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env def get_summa_dim_from_env() -> int: try: summa_dim = env.summa_dim assert summa_dim > 0, 'SUMMA_DIM must be larger than ze...
import math from typing import Callable from colossalai.utils import get_current_device from torch import dtype, nn from ... import init as init from ..parallel_1d import Embedding1D, PatchEmbedding1D, VocabParallelEmbedding1D from ..parallel_2d import Embedding2D, PatchEmbedding2D, VocabParallelEmbedding2D from ..pa...
import inspect import math from typing import Callable from torch import dtype, nn from colossalai.utils import get_current_device from ... import init as init from ..parallel_1d import * from ..parallel_2d import * from ..parallel_2p5d import * from ..parallel_3d import * from ..utils import get_tensor_parallel_mod...
from ._utils import partition_batch from .dropout import Dropout from .embedding import Embedding, PatchEmbedding from .linear import Classifier, Linear from .normalization import LayerNorm __all__ = ['Linear', 'Classifier', 'Embedding', 'PatchEmbedding', 'LayerNorm', 'Dropout', 'partition_batch']
import torch.nn as nn from colossalai.context import ParallelMode, seed from ..parallel_1d import * from ..utils import get_tensor_parallel_mode from ._utils import ColossalaiModule class Dropout(ColossalaiModule): """Dropout layer of colossalai. Args: p (float, optional): probability of an element ...
from colossalai.utils import get_current_device from torch import nn from ..parallel_1d import LayerNorm1D from ..parallel_2d import LayerNorm2D from ..parallel_2p5d import LayerNorm2p5D from ..parallel_3d import LayerNorm3D from ..utils import get_tensor_parallel_mode from ..vanilla import VanillaLayerNorm from ._uti...
import torch.nn as nn from torch import Tensor from ..parallel_2d._operation import split_batch_2d from ..parallel_2p5d._operation import split_batch_2p5d from ..parallel_3d._operation import split_batch_3d from ..utils import get_tensor_parallel_mode _parallel_split_batch = {'2d': split_batch_2d, '2.5d': split_batch...
from .layers import ( DropPath, VanillaClassifier, VanillaLayerNorm, VanillaLinear, VanillaPatchEmbedding, WrappedDropout, WrappedDropPath, ) __all__ = [ "VanillaLayerNorm", "VanillaPatchEmbedding", "VanillaClassifier", "DropPath", "WrappedDropout", "WrappedDropPath", "VanillaLinear...
import math from typing import Callable import torch import torch.nn.functional as F from torch import Tensor from torch import nn as nn from torch.nn.parameter import Parameter from colossalai.context import seed from colossalai.nn import init as init from colossalai.registry import LAYERS from colossalai.utils.cuda...
from typing import Any, Optional, Tuple import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup COL_MOE_KERNEL_FLAG = False try: from colossalai._C import moe except: moe = None def build_moe_if_not_prebuilt(): # load moe kernel during runtime i...
from .experts import Experts, FFNExperts, TPExperts from .layers import MoeLayer, MoeModule from .routers import MoeRouter, Top1Router, Top2Router from .utils import NormalNoiseGenerator, UniformNoiseGenerator, build_ffn_experts __all__ = [ 'Experts', 'FFNExperts', 'TPExperts', 'Top1Router', 'Top2Router', 'MoeLaye...
import torch import torch.nn.functional as F from colossalai.utils import get_current_device from colossalai.context.moe_context import MOE_CONTEXT from .experts import FFNExperts, TPExperts class ForceFP32Parameter(torch.nn.Parameter): def half(self, memory_format=None): return self.data.clone() class...
import math import torch import torch.nn as nn from colossalai.context import ParallelMode, seed from colossalai.utils import get_current_device from colossalai.context.moe_context import MOE_CONTEXT from colossalai.zero.init_ctx import no_shard_zero_decrator from typing import Type class MoeExperts(nn.Module): ...
import math import torch import torch.nn as nn import torch.nn.functional as F from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils import get_current_device from colossalai.nn.layer.moe._operation import COL_MOE_KERNEL_FLAG, AllToAll, AllGather, \ ReduceScatter, MoeDispatch, MoeCombine fro...
import math from abc import ABC import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist from colossalai.utils import get_current_device from colossalai.context import MOE_CONTEXT from colossalai.nn.layer.moe._operation import moe_cumsum from typing import Callable, Optional ...
from torch import nn from ._utils import calc_acc from .accuracy_2d import Accuracy2D from .accuracy_2p5d import Accuracy2p5D from .accuracy_3d import Accuracy3D from colossalai.nn.layer.utils import get_tensor_parallel_mode _parallel_accuracy = { '2d': Accuracy2D, '2.5d': Accuracy2p5D, '3d': Accuracy3D, ...
import torch from colossalai.nn.layer.parallel_2d import reduce_by_batch_2d, split_batch_2d from torch import nn from ._utils import calc_acc class Accuracy2D(nn.Module): """Accuracy for 2D parallelism """ def __init__(self): super().__init__() def forward(self, logits, targets): ""...
import torch from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d from colossalai.nn.layer.parallel_3d._utils import get_parallel_mode_from_env from torch import nn from ._utils import calc_acc class Accuracy3D(nn.Module): ...
import torch from colossalai.nn.layer.parallel_2p5d import reduce_by_batch_2p5d, split_batch_2p5d from torch import nn from ._utils import calc_acc class Accuracy2p5D(nn.Module): """Accuracy for 2p5D parallelism """ def __init__(self): super().__init__() def forward(self, logits, targets): ...
import torch def calc_acc(logits, targets): preds = torch.argmax(logits, dim=-1) correct = torch.sum(targets == preds) return correct
import os import warnings from pathlib import Path from typing import Any, Dict, List, Optional, Set, Type, Union import torch import torch.nn as nn from torch.nn.modules.module import _addindent try: from torch.fx.graph import Graph, PythonCode, _custom_builtins, _is_from_torch, _PyTreeCodeGen from torch.fx....
# meta patch from https://github.com/pytorch/pytorch/blob/master/torch/_meta_registrations.py # should be activated for PyTorch version 1.12.0 and below # refer to https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/native_functions.yaml # for more meta_registrations from typing import Callable, List, ...
import operator import torch from torch.fx.proxy import Proxy, Attribute from typing import List, Union, Any from colossalai.fx.tracer.meta_patch import meta_patched_function __all__ = ['ColoProxy'] class ColoProxy(Proxy): """ ColoProxy is a proxy class which uses meta tensor to handle data-dependent control...
from ._compatibility import compatibility, is_compatible_with_meta from .graph_module import ColoGraphModule from .passes import MetaInfoProp, metainfo_trace from .tracer import ColoTracer, meta_trace, symbolic_trace
from typing import Callable import torch try: from . import _meta_registrations META_COMPATIBILITY = True except: META_COMPATIBILITY = False def compatibility(is_backward_compatible: bool = False) -> Callable: """A decorator to make a function compatible with different versions of PyTorch. Args...
import enum import functools import inspect import operator from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch.fx import Graph, Node, Proxy, Tracer from torch.utils._pytree import tree_map from colossalai.fx import ColoGraphModule, c...
class PatchRegistry: def __init__(self, name): self.name = name self.store = {} def register(self, source): def wrapper(func): self.store[source] = func return func return wrapper def get(self, source): assert source in self.store ...
from colossalai.fx.tracer.meta_patch.patched_function.python_ops import operator_getitem from ._meta_trace import meta_trace from ._symbolic_trace import symbolic_trace from .tracer import ColoTracer
from typing import List, Union, Any from ..proxy import ColoProxy, ColoAttribute import torch from .meta_patch import meta_patched_function, meta_patched_module __all__ = ['is_element_in_list', 'extract_meta'] def is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]): if isinstance(elements, (tup...
from typing import Any, Callable, Dict, Optional, Union import torch from colossalai.fx import ColoGraphModule from colossalai.fx._compatibility import compatibility from .tracer import ColoTracer @compatibility(is_backward_compatible=True) def symbolic_trace( root: Union[torch.nn.Module, Callable[..., Any]], ...
#!/usr/bin/env python """ tracer.py: Implemented a tracer which supports control flow and user-defined meta arguments. The implementation is partly inspired HuggingFace's fx tracer """ import enum import functools import inspect import operator from contextlib import contextmanager from typing import Any, Dict,...
import torch from torch.fx import Graph, Node from torch.utils._pytree import tree_map def normalize_tuple(x): if not isinstance(x, tuple): return (x,) return x def is_autogradable(x): return isinstance(x, torch.Tensor) and x.is_floating_point() def meta_trace(module: torch.nn.Module, fake_dev...
from .patched_function import * from .patched_module import *
import math import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.Conv1d) def torch_nn_conv1d(self, input): # the output shape is calculated using the formula stated # at https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html#torch.nn.Conv1d l_in = input...
import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.Embedding) def torch_nn_embedding(self, input): result_shape = input.shape + (self.embedding_dim,) return torch.empty(result_shape, device='meta')
import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.Linear) def torch_nn_linear(self, input): last_dim = input.shape[-1] assert last_dim == self.in_features, f'Expected hidden size {self.in_features} but got {last_dim} for the torch.nn.Linear patch' return torc...
import math import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.AvgPool1d) def torch_nn_avgpool1d(self, input): num_dim = input.dim() assert num_dim in [2, 3], f'expected the input to have 2 or 3 dimensions, but got {num_dim} dimensions' l_in = input.shape[-1...
from .activation_function import * from .convolution import * from .embedding import * from .linear import * from .normalization import * from .pooling import * from .rnn import *
import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.ReLU) @meta_patched_module.register(torch.nn.Sigmoid) @meta_patched_module.register(torch.nn.GELU) @meta_patched_module.register(torch.nn.Tanh) @meta_patched_module.register(torch.nn.ReLU6) @meta_patched_module.register(t...
import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.LayerNorm) @meta_patched_module.register(torch.nn.GroupNorm) @meta_patched_module.register(torch.nn.BatchNorm1d) @meta_patched_module.register(torch.nn.BatchNorm2d) @meta_patched_module.register(torch.nn.BatchNorm3d) def ...
from typing import Optional import torch from ...registry import meta_patched_module @meta_patched_module.register(torch.nn.GRU) @meta_patched_module.register(torch.nn.RNN) def torch_nn_rnn(self, input, hx): assert input.shape[ -1] == self.input_size, f'Expected input to have input size {self.input_size...
import torch from ...registry import meta_patched_function @meta_patched_function.register(torch.matmul) @meta_patched_function.register('matmul') # for built-in op @ def torch_matmul(input, other, *, out=None): # copied from huggingface.utils.fx d1 = input.dim() d2 = other.dim() shape = None ...
import collections import math from itertools import repeat import torch from ...registry import meta_patched_function def _ntuple(n, name="parse"): def parse(x): if isinstance(x, collections.abc.Iterable): return tuple(x) return tuple(repeat(x, n)) parse.__name__ = name re...
import torch from ...registry import meta_patched_function @meta_patched_function.register(torch.nn.functional.embedding) def torch_nn_functional_embedding(input, weight, padding_idx=None, max_norm=None, ...
from .activation_function import * from .arithmetic import * from .convolution import * from .embedding import * from .normalization import * from .torch_ops import *
import operator import torch from colossalai.fx.proxy import ColoProxy from ...registry import meta_patched_function @meta_patched_function.register(operator.getitem) def operator_getitem(a, b): # copied from huggingface.utils.fx def to_concrete(t): if isinstance(t, torch.Tensor): concr...
import torch from ...registry import meta_patched_function @meta_patched_function.register(torch.arange) def torch_arange(*args, **kwargs): n = len(args) step = 1 if n == 1: start = 0 end = args[0] elif n == 2: start, end = args else: start, end, step = args if...
import torch from ...registry import meta_patched_function @meta_patched_function.register(torch.nn.functional.relu) def torch_nn_func_relu(input, inplace=False): return torch.empty(input.shape, device='meta')
import torch from ...registry import meta_patched_function @meta_patched_function.register(torch.nn.functional.layer_norm) def torch_nn_func_layernorm(input, normalized_shape, weight=None, bias=None, eps=1e-05): return torch.empty(input.shape, device='meta') @meta_patched_function.register(torch.nn.functional....
from .patched_bias_addition_function import * from .patched_bias_addition_module import *
import operator from abc import ABC, abstractmethod import torch import torch.nn.functional as F class BiasAdditionFunc(ABC): """ This class is used to construct the restructure computation graph for call_func node with bias addition inside. """ def __init__(self, tracer, target, args, kwargs, s...
import operator import torch import torch.nn.functional as F from ...registry import bias_addition_function from .bias_addition_function import LinearBasedBiasFunc @bias_addition_function.register(F.linear) class Linear(LinearBasedBiasFunc): def extract_kwargs_from_origin_func(self): assert 'bias' in s...
from .addbmm import Addbmm from .addmm import Addmm from .bias_addition_function import BiasAdditionFunc, LinearBasedBiasFunc, func_to_func_dict, method_to_func_dict from .linear import Linear
import operator import torch import torch.nn.functional as F from ...registry import bias_addition_function, bias_addition_method from .bias_addition_function import LinearBasedBiasFunc @bias_addition_method.register(torch.Tensor.addbmm) @bias_addition_function.register(torch.addbmm) class Addbmm(LinearBasedBiasFun...
import operator import torch import torch.nn.functional as F from ...registry import bias_addition_function, bias_addition_method from .bias_addition_function import LinearBasedBiasFunc @bias_addition_method.register(torch.Tensor.addmm) @bias_addition_function.register(torch.addmm) class Addmm(LinearBasedBiasFunc):...
import operator from abc import ABC, abstractmethod import torch import torch.nn.functional as F class BiasAdditionModule(ABC): """ This class is used to construct the restructure computation graph for call_module node with bias addition inside. """ def __init__(self, tracer, target, args, kwarg...