python_code
stringlengths
0
456k
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_...
from typing import Dict, List from torch.fx import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, ShardingStrategy, TrainCycleItem, ) from colossalai.device.device_mesh import DeviceMesh from .strategy_generator import OutputStrategyGenerator __...
import operator from functools import reduce from typing import List import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possibl...
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.tensor.shape_consistency import CollectiveCommPattern from colossalai.tensor.sharding_spec import ShardingS...
""" This file will not be automatically imported by `colossalai.testing` as this file has a dependency on `pytest`. Therefore, you need to explicitly import this file `from colossalai.testing.pytest_wrapper import <func>`.from """ import pytest import os def run_on_environment_flag(name: str): """ Condition...
from .comparison import assert_equal, assert_not_equal, assert_close, assert_close_loose, assert_equal_in_group from .utils import parameterize, rerun_on_exception, rerun_if_address_is_in_use, skip_if_not_enough_gpus __all__ = [ 'assert_equal', 'assert_not_equal', 'assert_close', 'assert_close_loose', 'assert_equa...
import random import numpy as np import torch def seed_all(seed, cuda_deterministic=False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if cuda_deterministic: # slower...
import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup from torch.testing import assert_close def assert_equal(a: Tensor, b: Tensor): assert torch.all(a == b), f'expected a and b to be equal but they are not, {a} vs {b}' def assert_not_equal(a: Tensor, ...
import re import torch from typing import Callable, List, Any from functools import partial from inspect import signature from packaging import version def parameterize(argument: str, values: List[Any]) -> Callable: """ This function is to simulate the same behavior as pytest.mark.parameterize. As we want...
from typing import Tuple import torch import torch.nn as nn from colossalai.logging import get_dist_logger from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2 from colossalai.zero.sharded_optim import LowLevelZeroOptimizer, ShardedOptimizerV2 from ..nn.optimizer.zero_optimizer import ZeroOptimi...
from .init_context import ZeroInitContext, no_shard_zero_context, no_shard_zero_decrator __all__ = ['ZeroInitContext', 'no_shard_zero_context', 'no_shard_zero_decrator']
import contextlib import functools from typing import Optional from contextlib import AbstractContextManager import torch import torch.nn as nn import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.context.singleton...
from functools import partial from typing import Optional import torch import torch.distributed as dist from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.log...
from .low_level_optim import LowLevelZeroOptimizer from .sharded_optim_v2 import ShardedOptimizerV2 __all__ = ['ShardedOptimizerV2', 'LowLevelZeroOptimizer']
from enum import Enum from os import stat from typing import Dict, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context ...
import math from typing import Optional import torch import torch.distributed as dist from torch._six import inf from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from colossalai.tensor import ColoParameter from colossalai.utils import is_model_parallel_parameter def flatten(input_): ret...
from typing import List from torch import Tensor from torch.distributed import ProcessGroup from .base_store import BaseStore class ParameterStore(BaseStore): def __init__(self, torch_pg: ProcessGroup): super().__init__(torch_pg) # param partitioning data structures self._fp16_param_to_...
from typing import List from torch import Tensor from .base_store import BaseStore class GradientStore(BaseStore): def __init__(self, *args): super().__init__(*args) # bookkeeping data structures self._averaged_gradients = dict() # for backward reduction hooks self._grad...
from .bucket_store import BucketStore from .gradient_store import GradientStore from .parameter_store import ParameterStore from .tensor_bucket import TensorBucket __all__ = ['GradientStore', 'ParameterStore', 'BucketStore', 'TensorBucket']
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class TensorBucket: def __init__(self, size): self._max_size = size self._current_size = 0 self._bucket = [] @property def max_size(self): return self._max_size @property def current_size(s...
import torch.distributed as dist from torch.distributed import ProcessGroup class BaseStore: def __init__(self, torch_pg: ProcessGroup): self._world_size = dist.get_world_size(group=torch_pg) self._local_rank = dist.get_rank(group=torch_pg) @property def world_size(self): return ...
from torch.distributed import ProcessGroup from .base_store import BaseStore class BucketStore(BaseStore): def __init__(self, torch_pg: ProcessGroup): super().__init__(torch_pg) self._params = dict() self._num_elements_in_bucket = dict() self.reset() def num_elements_in_buc...
from typing import Optional import torch import torch.distributed as dist from colossalai.gemini.memory_tracer import MemStatsCollector from colossalai.gemini.ophooks import BaseOpHook from colossalai.gemini.stateful_tensor import TensorState from colossalai.gemini.stateful_tensor_mgr import StatefulTensorMgr from co...
from .zero_hook import ZeroHook __all__ = ['ZeroHook']
from contextlib import contextmanager from enum import Enum from functools import partial from typing import List import torch from colossalai.gemini import TensorState from colossalai.gemini.gemini_mgr import GeminiManager from colossalai.tensor.param_op_hook import ColoParamOpHook from colossalai.utils import is_dd...
import torch from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState class ShardedTensor(StatefulTensor): def __init__(self, tensor: torch.Tensor, state: TensorState = TensorState.HOLD) -> None: r""" A tensor sharded in multiple processes. Constructed from an existing torch.Tens...
import torch from typing import Optional, Tuple from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor from colossalai.gemini.tensor_utils import colo_tensor_mem_usage from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState from typing import List EMPTY_TENSOR_DICT = {} def get_empt...
from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor from colossalai.zero.sharded_param.sharded_param import ShardedParamV2 __all__ = ['ShardedTensor', 'ShardedParamV2']
from typing import List, Optional import torch import torch.distributed as dist from colossalai.utils import get_current_device from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor from torch._utils import _flatten_dense_tensors as flatten from .tensor_shard_strategy import TensorShardStrategy cla...
from .base_shard_strategy import BaseShardStrategy from .bucket_tensor_shard_strategy import BucketTensorShardStrategy from .tensor_shard_strategy import TensorShardStrategy __all__ = ['BaseShardStrategy', 'TensorShardStrategy', 'BucketTensorShardStrategy']
from abc import ABC, abstractmethod from typing import List, Optional import torch.distributed as dist from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor class BaseShardStrategy(ABC): def __init__(self) -> None: """Abstract Shard Strategy. Use to shard a tensors on multiple GPUs. ...
import torch import torch.nn.functional as F from typing import Tuple def get_shard(tensor: torch.Tensor, rank: int, world_size: int) -> Tuple[torch.Tensor, int]: """Return the local shard of a full tensor.""" # Shard using torch.chunk to match all-gather/reduce-scatter. chunks = list(torch.flatten(tensor...
from typing import List, Optional import torch import torch.distributed as dist from colossalai.utils import get_current_device from colossalai.zero.shard_utils import BaseShardStrategy from colossalai.zero.shard_utils.commons import get_shard from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor from...
from .sharded_model_v2 import ShardedModelV2 __all__ = ['ShardedModelV2']
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import functools import os from typing import Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist from torch ...
import torch from colossalai.zero.sharded_model import ShardedModelV2 import copy def col_model_deepcopy(sharded_model: ShardedModelV2, other_model: torch.nn.Module): """ copy param of the ShardedModelV2 to other_model. Note the other_model has to be the same as self. """ for zero_param, param in...
import functools import itertools from collections import OrderedDict from copy import deepcopy from typing import Any, Iterator, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch.nn.parameter import Parameter from colossalai.cont...
from typing import Any, Callable, List, Tuple import torch import torch.nn.functional as F from typing import Union from colossalai.gemini.stateful_tensor import StatefulTensor def get_gradient_predivide_factor(world_size: int) -> float: factor: int = 1 while world_size % factor == 0 and world_size / factor ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from .amp_type import AMP_TYPE from colossalai.context import Config import torch.nn as nn from torch.optim import Optimizer from torch.nn.modules.loss import _Loss from .torch_amp import convert_to_torch_amp from .apex_amp import convert_to_apex_amp from .naive_amp impo...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from enum import Enum class AMP_TYPE(Enum): APEX = 'apex' TORCH = 'torch' NAIVE = 'naive'
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.optim import Optimizer from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.kernel.op_builder import FusedOptimBu...
import inspect import torch.nn as nn from torch.optim import Optimizer from colossalai.utils import is_no_pp_or_last_stage from ._fp16_optimizer import FP16Optimizer from .grad_scaler import ConstantGradScaler, DynamicGradScaler from .naive_amp import NaiveAMPModel, NaiveAMPOptimizer def convert_to_naive_amp(model...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Any import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch.distributed import ReduceOp from torch.optim import Optimizer from c...
from typing import List from torch import Tensor def has_inf_or_nan(tensor): """Check if tensor has inf or nan values. Args: tensor (:class:`torch.Tensor`): a torch tensor object Returns: bool: Whether the tensor has inf or nan. True for yes and False for no. """ try: # ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from .base_grad_scaler import BaseGradScaler __all__ = ['ConstantGradScaler'] class ConstantGradScaler(BaseGradScaler): """A gradient scaler which uses constant loss scale Args: initial_scale (float): the initial loss scale verbose (bool): whet...
from .base_grad_scaler import BaseGradScaler from .constant_grad_scaler import ConstantGradScaler from .dynamic_grad_scaler import DynamicGradScaler __all__ = ['BaseGradScaler', 'ConstantGradScaler', 'DynamicGradScaler']
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Dict import torch from torch import Tensor from colossalai.logging import get_dist_logger __all__ = ['BaseGradScaler'] class BaseGradScaler(ABC): """A base class for the gradient scaler. Args: i...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional import torch from .base_grad_scaler import BaseGradScaler __all__ = ['DynamicGradScaler'] class DynamicGradScaler(BaseGradScaler): """A gradient scaler which uses dynamic loss scale Args: initial_scale (float): the initia...
#!/usr/bin/env python # -*- encoding: utf-8 -*- # modified from https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py # to support tensor parallel import warnings from collections import abc, defaultdict from enum import Enum from typing import Any, Dict, List, Optional, Tuple import torch impo...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.cuda.amp as torch_amp import torch.nn as nn from torch import Tensor from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.nn.optimizer import ColossalaiOptimizer from colossalai.utils import clip_grad_norm_fp32 from ._g...
from typing import Optional import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.context import Config from .torch_amp import TorchAMPLoss, TorchAMPModel, TorchAMPOptimizer def convert_to_torch_amp(model: nn.Module, optimizer: Opti...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn try: import apex.amp as apex_amp except ImportError: pass from torch import Tensor from colossalai.nn.optimizer import ColossalaiOptimizer from colossalai.utils import clip_grad_norm_fp32 class ApexAMPOptimizer(ColossalaiOptimizer): ...
import torch.nn as nn from torch.optim import Optimizer from .apex_amp import ApexAMPOptimizer def convert_to_apex_amp(model: nn.Module, optimizer: Optimizer, amp_config): r"""A helper function to wrap training components with Apex AMP modules Args: model (:class:`torch.nn.Module`): your model objec...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from types import ModuleType from typing import List class Registry: """This is a registry class used to register classes and modules so that a universal object builder can be enabled. Args: name (str): The name of the registry . third_par...
import torch.distributed.optim as dist_optim import torch.nn as nn import torch.optim as optim from .registry import Registry LAYERS = Registry("layers", third_party_library=[nn]) MODELS = Registry("models") OPTIMIZERS = Registry("optimizers", third_party_library=[optim, dist_optim]) DATASETS = Registry("datasets") D...
from .alpha_beta_profiler import AlphaBetaProfiler from .calc_pipeline_strategy import alpa_dp __all__ = ['AlphaBetaProfiler', 'alpa_dp']
import math import time from typing import Dict, List, Tuple import torch import torch.distributed as dist from colossalai.logging import get_dist_logger GB = int((1 << 30)) BYTE = 4 FRAMEWORK_LATENCY = 0 class AlphaBetaProfiler: ''' Profile alpha and beta value for a given device list. Usage: ...
from math import pow import numpy as np def get_submesh_choices(num_hosts, num_devices_per_host, mode="new"): submesh_choices = [] i = 1 p = -1 while i <= num_devices_per_host: i *= 2 p += 1 assert pow(2, p) == num_devices_per_host, ("Only supports the cases where num_devices_per_...
import operator from functools import reduce from typing import List, Tuple import torch import torch.distributed as dist class DeviceMesh: """A logical view of a physical mesh. The logical view is used in the search process. A physical mesh can have multiple logical views. (e.g., a 2x8 physical mesh ...
from enum import Enum class ComputePattern(Enum): TP1D = 0 TP2D = 1 TP2P5D = 2 TP3D = 3 class ComputeSpec(object): """ComputeSpec The Specification for compuattion pattern Args: compute_pattern (ComputePattern): an Enum instance for compute pattern. """ def __init__(sel...
from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, List, Tuple import torch from colossalai.tensor.colo_tensor import ColoTensor from colossalai.tensor.tensor_spec import ColoTensorSpec class ColoParamOpHook(ABC): """ Hook which is triggered by each operation w...
from . import distspec from .colo_parameter import ColoParameter from .colo_tensor import ColoTensor from .comm_spec import CollectiveCommPattern, CommSpec from .compute_spec import ComputePattern, ComputeSpec from .dist_spec_mgr import DistSpecManager from .distspec import ReplicaSpec, ShardSpec from .param_op_hook im...
from typing import Optional import torch from colossalai.tensor.colo_tensor import ColoTensor from colossalai.tensor.const import TensorType from colossalai.tensor.param_op_hook import ColoParamOpHookManager from colossalai.tensor.tensor_spec import ColoTensorSpec def filter_colo_parameters(*args, **kwargs): pa...
from dataclasses import dataclass from typing import Optional from colossalai.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.tensor.process_group import ProcessGroup from .compute_spec import ComputeSpec @dataclass class ColoTensorSpec: """ ColoTensorSpec A data class for specificat...
import operator from enum import Enum from functools import reduce import torch import torch.distributed as dist from torch.distributed import ReduceOp __all__ = [ 'CollectiveCommPattern', 'CommSpec', ] def _all_gather(tensor, comm_spec): ''' Implement all gather operation on device mesh based on in...
import math from copy import deepcopy from dataclasses import dataclass from typing import Dict, List, Tuple import numpy as np import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, TrainCycleItem from colossalai.context.singleton_meta import SingletonMeta from colossalai.tensor...
from enum import Enum from typing import List __all__ = ['ReplicaSpec', 'ShardSpec'] class DistPlacementPattern(Enum): REPLICATE = 'r' SHARD = 's' class _DistSpec: """_DistSpec A class indicates Distributed Specification. The DistSpec is only works for the tensor parallel process groups. B...
from typing import ( Callable, Dict, ) import functools # Custom sharded ops _COLOSSAL_OPS: Dict[str, Callable] = {} def _register_colo_op(op, func): global _COLOSSAL_OPS _COLOSSAL_OPS[op] = func def colo_op_impl(func): """ Provides a way for users to write their own custom operator. This ...
from typing import Dict, Iterator, List, Tuple, Union import torch import torch.nn as nn from colossalai.tensor.colo_tensor import ColoTensor def all_gather_simulator(target_pair): ''' Simulating all-gather operation, analyze the communication cost and simulate the influence of the DimSpec. We don'...
import math from copy import copy from functools import lru_cache from typing import Callable, Optional, Set import torch from colossalai.tensor.dist_spec_mgr import DistSpecManager from colossalai.tensor.distspec import DistPlacementPattern, ReplicaSpec, _DistSpec from colossalai.tensor.process_group import ProcessG...
from contextlib import contextmanager import torch import torch.distributed as dist # from colossalai.nn.layer.utils import divide from numpy import prod from packaging import version from colossalai.logging import get_dist_logger from colossalai.tensor.distspec import _DistSpec from colossalai.tensor.process_group i...
import operator from copy import deepcopy from functools import reduce import torch from colossalai.device.device_mesh import DeviceMesh from .utils import merge_same_dim_mesh_list __all__ = ['_DimSpec', 'ShardingException', 'ShardingSpec'] ALLGATHER_COST = 20 SHARD_COST = 5 STEP_PENALTY = 6 NAN = 'nan' class _D...
from typing import List, Optional import torch from colossalai.context.singleton_meta import SingletonMeta from colossalai.logging import get_dist_logger class PyTorchProcessGroupDict(metaclass=SingletonMeta): def __init__(self): # distributed settings # use this dict to record all Pytorch Proc...
from enum import Enum class TensorType(Enum): MODEL = 0 NONMODEL = 1 # mainly activations
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ReduceOp from colossalai.context import ParallelMode from colossalai.core import global_context as gpc _all_gather_func = dist._all_gather_base \ if "all_gather_int...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import List, Tuple, Union import torch import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import get_current_device from functools import reduce i...
from .collective import all_gather, reduce_scatter, all_reduce, broadcast, reduce from .p2p import (send_forward, send_forward_recv_forward, send_backward_recv_forward, send_backward, send_backward_recv_backward, send_forward_recv_backward, send_forward_backward_recv_forward_backward, ...
import torch import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import get_current_device from typing import Union, List, Tuple TensorShape = Union[torch.Size, List[int], Tuple[int]] def send_meta_helper(...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import io import pickle from typing import Any, List, Tuple, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroupNCCL from torch.distributed import distributed_c10d as c10d from colossalai.context.parallel_mode import ParallelM...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import get_current_device, synchronize def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode) -> torch...
from .builder import build_from_config, build_from_registry, build_gradient_handler __all__ = ['build_gradient_handler', 'build_from_config', 'build_from_registry']
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect from colossalai.registry import * def build_from_config(module, config: dict): """Returns an object of :class:`module` constructed from `config`. Args: module: A python or user-defined class config: A python dict containing info...
from ._base_engine import Engine from .gradient_handler import * __all__ = ['Engine']
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import List, Iterable from torch.nn import Module from torch.nn.modules.loss import _Loss from colossalai.logging import get_dist_logger from torch import Tensor from colossalai.gemini.ophooks import register_ophooks_recursively, BaseOpHook from colossalai.e...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod import torch from typing import Iterable, Callable from colossalai.logging import get_dist_logger from colossalai.utils import get_current_device class BaseSchedule(ABC): """A basic helper class to control the process of traini...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Tuple, Iterable from colossalai import engine import colossalai.communication.p2p_v2 as comm import torch.cuda from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils.cuda impor...
from ._base_schedule import BaseSchedule from ._pipeline_schedule import PipelineSchedule, InterleavedPipelineSchedule, get_tensor_shape from ._non_pipeline_schedule import NonPipelineSchedule __all__ = ['BaseSchedule', 'NonPipelineSchedule', 'PipelineSchedule', 'InterleavedPipelineSchedule', 'get_tensor_shape']
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Iterable import torch import inspect from ._base_schedule import BaseSchedule from colossalai.utils import conditional_context from typing import Callable class NonPipelineSchedule(BaseSchedule): """A helper schedule class for no pipeline parall...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect from typing import Callable, List, Tuple, Union import colossalai.communication as comm import torch.cuda from colossalai.amp.naive_amp import NaiveAMPModel from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_conte...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod class BaseGradientHandler(ABC): """A basic helper class to handle all-reduce operations of gradients across different parallel groups before optimization. Args: model (Module): Model where the gradients accumula...
from ._base_gradient_handler import BaseGradientHandler from ._data_parallel_gradient_handler import DataParallelGradientHandler from ._zero_gradient_handler import ZeROGradientHandler from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler from ._pipeline_parallel_gradient_handler import Pipel...
from colossalai.core import global_context as gpc from colossalai.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler from ...context.parallel_mode import ParallelMode from .utils import bucket_allreduce @GRADIENT_HANDLER.register_module class DataParallelGradientHandler(BaseGradi...
#!/usr/bin/env python from collections import defaultdict import torch import torch.distributed as dist from colossalai.core import global_context as gpc from colossalai.registry import GRADIENT_HANDLER from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from ._base_gradient_handler import Base...
from typing import Iterable import torch.distributed as dist import torch.nn as nn from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors def bucket_allreduce(param_list: Iterable[nn.Parameter], group=None): # get communication world size comm_size = dist.get_world_size(group) # bucket...
from colossalai.core import global_context as gpc from colossalai.registry import GRADIENT_HANDLER from colossalai.utils.moe import get_moe_epsize_param_dict from ._base_gradient_handler import BaseGradientHandler from ...context.parallel_mode import ParallelMode from .utils import bucket_allreduce from colossalai.cont...
from colossalai.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler @GRADIENT_HANDLER.register_module class ZeROGradientHandler(BaseGradientHandler): """A helper class to handle all-reduce operations in a data parallel group. A all-reduce collective communication will be o...
from colossalai.core import global_context as gpc from colossalai.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler from ...context.parallel_mode import ParallelMode from .utils import bucket_allreduce @GRADIENT_HANDLER.register_module class SequenceParallelGradientHandler(BaseG...
import torch.nn as nn from typing import List from colossalai.engine import BaseGradientHandler from typing import Iterable from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from ._gradient_accumulation import GradAccumDataloader, GradAccumOptimizer, GradAccumLrSchedulerByStep, GradAcc...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Union import torch.nn as nn from torch import Tensor from typing import Iterable, Any, Tuple from colossalai.nn.optimizer import ColossalaiOptimizer from torch.nn.parallel.distributed import DistributedDataParallel from torch.optim import Optimizer fro...