python_code stringlengths 0 456k |
|---|
import torch
from colossalai.fx.tracer.meta_patch import patched_function
from functools import partial
def _run(data, patch_fn):
try:
output = patch_fn(data)
return output
except Exception as e:
return e
def _assert_output_shape(data, patch_fn, expect_exception, output_shape):
o... |
import torch
import torch.nn as nn
from torch.fx import GraphModule
from colossalai.fx import ColoTracer as Tracer
class ControlFlowModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(10, 10)
self.linear2 = nn.Linear(10, 10)
def forward(self, x, y):
... |
import torch
from colossalai.fx.tracer.meta_patch import patched_module
def _run(data, module, patch_fn):
try:
if isinstance(data, dict):
output = patch_fn(module, **data)
if isinstance(data, tuple) or isinstance(data, list):
output = patch_fn(module, *data)
else:
... |
import pytest
import timm.models as tm
import torch
from colossalai.fx import symbolic_trace
def trace_and_compare(model_cls, data, meta_args=None):
# trace
model = model_cls()
# convert to eval for inference
# it is important to set it to eval mode before tracing
# without this statement, the t... |
import torch
from torchaudio_utils import trace_and_compare
from torchaudio.models import ConvTasNet, DeepSpeech, Wav2Letter, WaveRNN
from torchaudio.models.wavernn import MelResNet, UpsampleNetwork
import pytest
def test_wave2letter_waveform():
batch_size = 2
num_features = 1
num_classes = 40
input_l... |
import torch
from colossalai.fx import symbolic_trace
def trace_and_compare(model, data_gen, need_meta=False, need_concrete=False, kwargs_transform=False):
data = data_gen()
concrete_args = data if need_concrete else {}
meta_args = {k: v.to('meta') for k, v in data.items()} if need_meta else {}
mode... |
import torch
from torchaudio.models import Tacotron2
from torchaudio_utils import trace_and_compare
import pytest
def _get_tacotron2_model(n_mels, decoder_max_step=2000, gate_threshold=0.5):
return Tacotron2(
mask_padding=False,
n_mels=n_mels,
n_symbol=20,
n_frames_per_step=1,
... |
import torch
from torchaudio.models.wav2vec2 import (
hubert_base,
hubert_large,
hubert_xlarge,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
)
from torchaudio_utils import trace_and_compare
import pytest
MODEL_LIST = [
hubert_base,
hubert_large,
hubert_xlarge,
wav2vec2_b... |
import torch
from torchaudio_utils import trace_and_compare
from torchaudio.models import Emformer, Conformer
import pytest
def test_conformer():
input_dim = 80
batch_size = 10
num_frames = 400
num_heads = 4
ffn_dim = 128
num_layers = 4
depthwise_conv_kernel_size = 31
model = Conforme... |
import torch
import torchvision
import torchvision.models as tm
from packaging import version
from colossalai.fx import symbolic_trace
def test_torchvision_models():
MODEL_LIST = [
tm.vgg11, tm.resnet18, tm.densenet121, tm.mobilenet_v3_small, tm.resnext50_32x4d, tm.wide_resnet50_2,
tm.regnet_x_16... |
import pytest
import torch
import transformers
from hf_tracer_utils import trace_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGTH = 16
def test_t5():
MODEL_LIST = [
transformers.T5Model,
transformers.T5ForConditionalGeneration,
transformers.T5EncoderModel,
]
config = transforme... |
import pytest
import torch
import transformers
from hf_tracer_utils import trace_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGTH = 16
# TODO: remove this skip once we handle the latest gpt model
@pytest.mark.skip
def test_gpt():
MODEL_LIST = [
transformers.GPT2Model,
transformers.GPT2LMHeadMod... |
import pytest
import torch
import transformers
from hf_tracer_utils import trace_model_and_compare_output
BATCH_SIZE = 2
SEQ_LENGTH = 16
def test_single_sentence_bert():
MODEL_LIST = [
transformers.BertModel,
transformers.BertForPreTraining,
transformers.BertLMHeadModel,
transform... |
import pytest
import torch
import transformers
from hf_tracer_utils import trace_model_and_compare_output
BATCH_SIZE = 2
SEQ_LENGTH = 16
def test_single_sentence_albert():
MODEL_LIST = [
transformers.AlbertModel,
transformers.AlbertForPreTraining,
transformers.AlbertForMaskedLM,
t... |
import torch
from numpy import isin
from torch.fx import GraphModule
from torch.utils._pytree import tree_flatten
from colossalai.fx import symbolic_trace
def trace_model_and_compare_output(model, data_gen):
# must turn on eval mode to ensure the output is consistent
model.eval()
try:
kwargs = d... |
import pytest
import torch
import transformers
from hf_tracer_utils import trace_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGTH = 16
def test_opt():
MODEL_LIST = [
transformers.OPTModel,
transformers.OPTForCausalLM,
]
config = transformers.OPTConfig(hidden_size=128, num_hidden_layers... |
import pytest
import torch
import transformers
from hf_tracer_utils import trace_model_and_compare_output
from colossalai.fx import symbolic_trace
try:
import diffusers
HAS_DIFFUSERS = True
except ImportError:
HAS_DIFFUSERS = False
BATCH_SIZE = 2
SEQ_LENGTH = 5
HEIGHT = 224
WIDTH = 224
IN_CHANNELS = 3
LA... |
import torch
from colossalai.fx import symbolic_trace
try:
from torchrec.models import dlrm
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
... |
import pytest
import torch
from colossalai.fx import symbolic_trace
try:
from torchrec.models import deepfm
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTenso... |
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.fx import GraphModule
from torch.utils.checkpoint import checkpoint
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import Co... |
import copy
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.fx import GraphModule
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.uti... |
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.fx import GraphModule
from torch.utils.checkpoint import checkpoint
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import Co... |
from typing import Optional, Tuple, Union
import torch
import torch.fx
import torchvision.models as tm
from gpt_utils import gpt2_medium, gpt2_xl
from torch.fx import symbolic_trace
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp, is... |
import torch
import torch.nn as nn
from transformers import GPT2Config, GPT2LMHeadModel
class GPTLMModel(nn.Module):
def __init__(self,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_seq_len=1024,
vocab_size=50257,
... |
import pytest
import timm.models as tm
import torch
from timm_utils import split_model_and_compare_output
@pytest.mark.skip('balance split v2 is not ready')
def test_timm_models_without_control_flow():
MODEL_LIST = [
tm.resnest.resnest50d,
tm.beit.beit_base_patch16_224,
tm.cait.cait_s24_2... |
import torch
from torch.fx import symbolic_trace
from torch.fx import GraphModule
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass
from colossalai.fx import ColoTracer
import inspect
import random
import numpy as np
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.ran... |
import inspect
import random
import numpy as np
import pytest
import torch
import torchvision
import torchvision.models as tm
from packaging import version
from torch.fx import GraphModule
from colossalai.fx import ColoTracer
from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_spli... |
import torch
from torch.fx import GraphModule
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass
from colossalai.fx import ColoTracer
from colossalai.pipeline.middleware import Partition, PartitionInputVal, PartitionOutputVal, Topo
from colossalai.pipeline.middlewar... |
import pytest
import torch
import transformers
from topo_utils import split_model_and_get_DAG, check_topo, MLP
BATCH_SIZE = 1
SEQ_LENGHT = 16
def test_opt():
MODEL_LIST = [
MLP,
transformers.OPTModel,
]
CONFIGS = [
{'dim': 10, 'layers': 12},
transformers.OPTConfig(vocab_si... |
import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGHT = 16
@pytest.mark.skip('balance split v2 is not ready')
def test_t5():
MODEL_LIST = [
transformers.T5Model,
transformers.T5ForConditionalGeneration,
transformers.T5... |
import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGHT = 16
@pytest.mark.skip('balance split v2 is not ready')
def test_opt():
MODEL_LIST = [
transformers.OPTModel,
transformers.OPTForCausalLM,
]
config = transformers.... |
import torch
from torch.fx import symbolic_trace
from torch.fx import GraphModule
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass
from colossalai.fx import ColoTracer
import inspect
import random
import numpy as np
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.ran... |
import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 64
SEQ_LENGHT = 16
NUM_EPOCHS = 2
NUM_CHUNKS = 1
@pytest.mark.skip('balance split v2 is not ready')
def test_gpt():
MODEL_LIST = [
transformers.GPT2Model,
transformers.GPT2LMHeadModel,
... |
import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 2
SEQ_LENGHT = 16
@pytest.mark.skip('balance split v2 is not ready')
def test_single_sentence_albert():
MODEL_LIST = [
transformers.AlbertModel,
transformers.AlbertForPreTraining,
... |
import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 2
SEQ_LENGHT = 16
@pytest.mark.skip('balance split v2 is not ready')
def test_single_sentence_bert():
MODEL_LIST = [
transformers.BertModel,
transformers.BertForPreTraining,
tra... |
import pytest
import timm.models as tmm
import torch
import torchvision.models as tm
from colossalai.fx._compatibility import is_compatible_with_meta
if is_compatible_with_meta():
from colossalai.fx import meta_trace
tm_models = [
tm.vgg11,
tm.resnet18,
tm.densenet121,
tm.mobilenet_v3_small,
t... |
import pytest
import timm.models as tmm
import torch
import torchvision.models as tm
from colossalai.fx._compatibility import is_compatible_with_meta
if is_compatible_with_meta():
from colossalai.fx.profiler import MetaTensor
tm_models = [
tm.vgg11,
tm.resnet18,
tm.densenet121,
tm.mobilenet_v3_sma... |
from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from colossalai.fx._compatibility import is_compatible_with_meta
if is_compatible_with_meta():
from colossalai.fx.profiler import MetaTensor
aten = torch.ops.aten
registered_meta = {
('aten.convolution.default', True): ... |
from functools import partial
import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.amp import AMP_TYPE
from colossalai.core import global_context as gpc
from colossalai.utils import free_port
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.tes... |
import os
from functools import partial
from pathlib import Path
import colossalai
from colossalai.testing.utils import rerun_if_address_is_in_use
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.core import global_context as gpc
from colossalai.logging import get_dis... |
import torch
from timm.models.beit import Beit
from colossalai.utils.cuda import get_current_device
from .registry import non_distributed_component_funcs
from .utils.dummy_data_generator import DummyDataGenerator
class DummyDataLoader(DummyDataGenerator):
img_size = 64
num_channel = 3
num_class = 10
... |
#!/usr/bin/env python
import torch
import torch.nn as nn
from colossalai.nn import CheckpointModule
from .registry import non_distributed_component_funcs
from .utils.dummy_data_generator import DummyDataGenerator
class NetWithRepeatedlyComputedLayers(CheckpointModule):
"""
This model is to test with layers... |
import torch
import torch.nn as nn
from transformers import GPT2Config, GPT2LMHeadModel
from colossalai.utils.cuda import get_current_device
from .registry import non_distributed_component_funcs
from .utils.dummy_data_generator import DummyDataGenerator
class DummyDataLoader(DummyDataGenerator):
vocab_size = 12... |
#!/usr/bin/env python
class Registry:
def __init__(self):
self._registry = dict()
def register(self, name):
assert name not in self._registry
def _regsiter(callable_):
self._registry[name] = callable_
return _regsiter
def get_callable(self, name: str):
... |
import torch
import torch.nn as nn
from colossalai.nn import CheckpointModule
from colossalai.utils.cuda import get_current_device
from .registry import non_distributed_component_funcs
from .utils.dummy_data_generator import DummyDataGenerator
class SimpleNet(CheckpointModule):
"""
In this no-leaf module, i... |
import torch
import torch.nn as nn
import torch.nn.functional as F
from colossalai.nn import CheckpointModule
from .registry import non_distributed_component_funcs
from .utils import DummyDataGenerator
class SubNet(nn.Module):
def __init__(self, out_features) -> None:
super().__init__()
self.bi... |
import torch
import transformers
from packaging import version
from transformers import AlbertConfig, AlbertForSequenceClassification
from .bert import get_bert_data_loader
from .registry import non_distributed_component_funcs
@non_distributed_component_funcs.register(name='albert')
def get_training_components():
... |
from . import (
beit,
bert,
gpt2,
hanging_param_model,
inline_op_model,
nested_model,
repeated_computed_layers,
resnet,
simple_net,
)
from .utils import run_fwd_bwd
from . import albert # isort:skip
__all__ = [
'bert', 'gpt2', 'hanging_param_model', 'inline_op_model', 'neste... |
from torchvision.models import resnet18
from .registry import non_distributed_component_funcs
from pathlib import Path
import os
import torch
from torchvision.transforms import transforms
from torchvision.datasets import CIFAR10
from colossalai.utils import get_dataloader
def get_cifar10_dataloader(train):
# buil... |
import torch
import torch.nn as nn
import torch.nn.functional as F
from colossalai.nn import CheckpointModule
from .registry import non_distributed_component_funcs
from .utils.dummy_data_generator import DummyDataGenerator
class InlineOpModule(CheckpointModule):
"""
a module with inline Ops
"""
def... |
import torch
import torch.nn as nn
import torch.nn.functional as F
from colossalai.nn import CheckpointModule
from .registry import non_distributed_component_funcs
from .utils.dummy_data_generator import DummyDataGenerator
class HangingParamModule(CheckpointModule):
"""
Hanging Parameter: a parameter dose n... |
import torch
import transformers
from packaging import version
from torch.utils.data import SequentialSampler
from transformers import BertConfig, BertForSequenceClassification
from .registry import non_distributed_component_funcs
def get_bert_data_loader(
n_class,
batch_size,
total_samples,
... |
from abc import ABC, abstractmethod
class DummyDataGenerator(ABC):
def __init__(self, length=10):
self.length = length
@abstractmethod
def generate(self):
pass
def __iter__(self):
self.step = 0
return self
def __next__(self):
if self.step < self.length:
... |
from .dummy_data_generator import DummyDataGenerator
from .executor import run_fwd_bwd
|
import torch
def run_fwd_bwd(model, data, label, criterion, optimizer=None) -> torch.Tensor:
"""run_fwd_bwd
run fwd and bwd for the model
Args:
model (torch.nn.Module): a PyTorch model
data (torch.Tensor): input data
label (torch.Tensor): label
criterion (Optional[Callable... |
import math
import torch
import torch.nn as nn
from numpy import dtype
from colossalai.testing import parameterize
from colossalai.utils import multi_tensor_applier
def torch_adam_update(
step,
lr,
beta1,
beta2,
eps,
weight_decay,
param,
grad,
exp_avg,
exp_avg_sq,
use_ada... |
import pytest
import torch
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.nn.optimizer import CPUAdam, HybridAdam
def move_some_params_to_cuda(model, torch_model):
model.embed.weight.data = model.embed.weight.cuda()
torch_model.embed.weight.data = model.embed.wei... |
import torch
import torch.nn as nn
from torch.optim.adam import Adam
from torch.optim import AdamW
from colossalai.nn.optimizer.hybrid_adam import HybridAdam
from colossalai.testing import parameterize
RE = 1024
@parameterize('adamw', [False, True])
@parameterize('device', ['cpu', 'cuda:0'])
@parameterize('p_dtype'... |
import math
import torch
from colossalai.testing import parameterize
def torch_adam_update(
step,
lr,
beta1,
beta2,
eps,
weight_decay,
param,
grad,
exp_avg,
exp_avg_sq,
use_adamw,
):
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
if wei... |
import torch
import torch.nn as nn
from torch.optim.adam import Adam
from torch.optim import AdamW
from colossalai.nn.optimizer.fused_adam import FusedAdam
from colossalai.testing import parameterize
class FC(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Sequential(nn.L... |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor
from test... |
import torch
import pytest
import colossalai
import torch.nn.functional as F
import torch.multiprocessing as mp
from functools import partial
from colossalai.tensor import ColoTensor, ProcessGroup, ColoTensorSpec
from colossalai.utils import get_current_device
from colossalai.testing import rerun_if_address_is_in_use
f... |
from torch.nn import functional as F
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.tensor import ColoParameter, ColoTensorSpec, ProcessGroup
f... |
import colossalai
import torch
import pytest
import torch.nn as nn
import torch.multiprocessing as mp
from colossalai.tensor import ColoTensor, ProcessGroup
from colossalai.tensor import ColoTensorSpec
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from functools import... |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port, get_current_device
from colossalai.tensor import ColoTensorSpec, ProcessGroup,... |
import torch
import pytest
import colossalai
import torch.nn.functional as F
import torch.multiprocessing as mp
from functools import partial
from colossalai.tensor import ColoTensor, ProcessGroup, ColoTensorSpec, ShardSpec
from colossalai.utils import get_current_device
from torch.nn import Parameter
from colossalai.t... |
from torch.nn import functional as F
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor
from... |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.amp.amp_type import AMP_TYPE
from colossalai.logging import get_dist_logger
from colossalai.trainer import Trainer
from colossalai.utils import MultiTimer, free_port
from tests.components_to_te... |
import os
from functools import partial
from pathlib import Path
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine.schedule import Pipel... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication import (recv_backward, recv_forward, recv_obj_meta, send_backward,
send_backw... |
# referenced from Megatron and used to testify communication
import os
import os.path as osp
from functools import partial
from pathlib import Path
import colossalai
import pytest
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.co... |
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication import all_gather, all_reduce, reduce_scatter
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initiali... |
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication.p2p import send_forward, recv_forward, send_backward, recv_backward, send_forward_recv_backward, send_backward_recv_forward
from colossalai.context import ParallelM... |
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication.p2p_v2 import send_forward, recv_forward, send_backward, recv_backward, init_process_group
from colossalai.context import ParallelMode, Initializer_Pipeline
from co... |
from functools import partial
from typing import List
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication.p2p_v2 import _send_object, _recv_object, init_process_group
from colossalai.context import ParallelMode
from colossalai.core import global_... |
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import colossalai
from colossalai.amp import convert_to_apex_amp
from colossalai.context import MOE_CONTEXT
from colossalai.engine.gradient_handler import MoeGradientHandler
from colossalai.nn import MoeLoss
from colossalai.nn... |
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import colossalai
from colossalai.context import MOE_CONTEXT
from colossalai.engine.gradient_handler import MoeGradientHandler
from colossalai.nn import MoeLoss
from colossalai.testing import assert_equal_in_group, parameteriz... |
from functools import partial
import pytest
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import colossalai
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import free_port, get_current_device
from colossalai.nn.layer.moe import... |
from functools import partial
import pytest
import torch.nn as nn
import torch.multiprocessing as mp
import torch.distributed as dist
import colossalai
from colossalai.utils import free_port, get_current_device
from colossalai.nn.layer.moe import Experts
from colossalai.context.moe_context import MOE_CONTEXT
from colos... |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.context import MOE_CONTEXT
from colossalai.tensor import ColoParameter
from c... |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.nn import CheckpointModule
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossala... |
from functools import partial
import pytest
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import torch.distributed as dist
import colossalai
from colossalai.utils import free_port, get_current_device
from colossalai.nn.layer.moe import Top1Router, UniformNoiseGenerator, MoeLayer, Experts
from co... |
from typing import Any, Dict, List
import torch
import torch.fx
import colossalai
from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.core import global_context as gpc
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.passes.meta_info_prop import MetaInfoProp... |
from functools import partial
from typing import List, Tuple
import pytest
import torch
import torch.multiprocessing as mp
try:
from diffusers import UNet2DModel
MODELS = [UNet2DModel]
HAS_REPO = True
except:
MODELS = []
HAS_REPO = False
from test_autochunk_diffuser_utils import run_test
from co... |
from functools import partial
from typing import Dict, List, Tuple
import pytest
import torch
import torch.fx
import torch.multiprocessing as mp
try:
from fastfold.model.nn.evoformer import EvoformerBlock
HAS_REPO = True
except:
HAS_REPO = False
from test_autochunk_alphafold_utils import run_test
from c... |
from functools import partial
from typing import List, Tuple
import pytest
import torch
import torch.fx
import torch.multiprocessing as mp
try:
from fastfold.model.nn.evoformer import EvoformerStack
HAS_REPO = True
except:
HAS_REPO = False
from test_autochunk_alphafold_utils import run_test
from colossa... |
import time
from typing import Any, Dict, List
import torch
import torch.fx
import colossalai
from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.utils import free_por... |
from functools import partial
from typing import Dict, List, Tuple
import pytest
import torch
import torch.fx
import torch.multiprocessing as mp
try:
from fastfold.model.nn.evoformer import ExtraMSABlock
HAS_REPO = True
except:
HAS_REPO = False
from test_autochunk_alphafold_utils import run_test
from col... |
from typing import Any, Dict, List
import torch
import torch.fx
import colossalai
from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.autochunk.utils import flat_list
from colossalai.core import global_context as gpc
from colossalai.fx.graph_module import ColoGraphModule
from coloss... |
from typing import Any, Dict, List
import torch
import torch.fx
import colossalai
from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.core import global_context as gpc
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.passes.meta_info_prop import MetaInfoProp... |
import time
from typing import Any, Dict, List
import torch
import torch.fx
import colossalai
from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.fx.profiler import pa... |
from functools import partial
from typing import List, Tuple
import pytest
import torch
import torch.multiprocessing as mp
try:
from transformers import GPT2Config, GPT2Model
MODELS = [GPT2Model]
HAS_REPO = True
except:
MODELS = []
HAS_REPO = False
from test_autochunk_transformer_utils import run... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import ... |
from typing import Optional
class TensorParallelEnv(object):
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
self.load(*args, *... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import argparse
import os
import pprint
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.nn.parallel import DistributedDataParallel as... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
ALLOWED_MODES = [None, '1d', '2d', '2.5d', '3d', 'sequence']
TENSOR_PARALLEL_MODE = 'tensor_parallel_mode'
# initializer
INITIALIZER_MAPPING = {
'data': 'Initializer_Data',
'tensor': 'Initializer_Tensor',
'pipeline': 'Initializer_Pipeline',
'embedding': ... |
from .initialize import (
get_default_parser,
initialize,
launch,
launch_from_openmpi,
launch_from_slurm,
launch_from_torch,
)
try:
# .version will be created by setup.py
from .version import __version__
except ModuleNotFoundError:
# this will only happen if the user did not run `pi... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.context.parallel_context import global_context
__all__ = ['global_context'] |
from typing import List, Dict, Tuple
import os
import threading
from torch.distributed import rpc
import torch.distributed as dist
from colossalai.tensor import ProcessGroup
class PipelineProcessGroup:
# TODO : flexible API for DP size and TP size
# In the future design mode, dp_degree and tp_degree should ... |
import torch
import inspect
from colossalai.utils.model.utils import InsertPostInitMethodToModuleSubClasses
from .utils import partition_uniform, partition_balanced, build_kwargs_for_function, \
build_kwargs_for_module, exec_func_with_kwargs, exec_funcs_with_kwargs, \
call_module, custo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.