python_code
stringlengths
0
456k
from .pipelinable import PipelinableContext, PipelinableModel from .layer_spec import LayerSpec __all__ = ['PipelinableModel', 'PipelinableContext', 'LayerSpec']
import heapq import inspect import torch from colossalai.logging import get_dist_logger from colossalai.nn.layer.utils import CheckpointModule from typing import List from collections import OrderedDict def _binary_partition(weights: List, start: int, end: int): """Returns the binary partition position of `weigh...
import torch from colossalai.utils.model.utils import call_to_str class LayerSpec: """ """ def __init__(self, typename, *module_args, **module_kwargs): self.typename = typename self.module_args = module_args self.module_kwargs = module_kwargs self.children = None ...
from .topo import Topo, Partition, PartitionOutputVal, PartitionInputVal __all__ = ['Topo', 'Partition', 'PartitionOutputVal', 'PartitionInputVal']
from typing import Dict, List from dataclasses import dataclass # This file includes data structure used by Pipeline Middleware. @dataclass class ValPosition: partition_id: int offset: int def __str__(self) -> str: res = f'[partition_id:{self.partition_id},offset:{self.offset}]' retur...
from .fx import get_topology as get_fx_topology __all__ = ['get_fx_topology']
from torch.fx.graph_module import GraphModule from colossalai.pipeline.middleware.topo import Partition, PartitionInputVal, PartitionOutputVal, Topo import torch def partition_name_to_id(partition_name, is_input=False, is_output=False): if is_input: partition_id = 0 elif is_output: partition_id...
from ._pipeline_schedule import FillDrainPipelineEngine, OneFOneBPipelineEngine, ChimeraPipelineEngine from .utils import pytree_map __all__ = ['FillDrainPipelineEngine', 'OneFOneBPipelineEngine', 'ChimeraPipelineEngine', 'pytree_map']
import inspect import math import threading from abc import ABC, abstractmethod from enum import Enum from functools import partial from typing import Any, Callable, Dict, List, Tuple import torch import torch.distributed.rpc as rpc from torch import autograd, nn, optim from torch._C._distributed_rpc import PyRRef fro...
import argparse import os import warnings from typing import Any, Callable, Dict, List, Tuple, Type, Union import torch import torch.distributed.rpc as rpc import torch.multiprocessing as mp from torch._C._distributed_rpc import _is_current_rpc_agent_set from torch.futures import Future from colossalai.initialize imp...
import threading from typing import Callable, Dict, List import torch import torch.distributed as dist from torch._C._distributed_rpc import PyRRef from torch.futures import Future from colossalai.pipeline.pipeline_process_group import ppg from colossalai.pipeline.rpc._pipeline_base import Phase, PipelineEngineBase, ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect import sys from importlib.machinery import SourceFileLoader from pathlib import Path from colossalai.logging import get_dist_logger class Config(dict): """This is a wrapper class for dict objects so that values of which can be accessed as attribu...
from .config import Config, ConfigException from .parallel_context import ParallelContext from .parallel_mode import ParallelMode from .moe_context import MOE_CONTEXT from .process_group_initializer import * from .random import *
#!/usr/bin/env python # -*- encoding: utf-8 -*- import random import socket from collections import Counter from threading import local from typing import Union import numpy as np import torch import torch.distributed as dist from colossalai.constants import ALLOWED_MODES, INITIALIZER_MAPPING from colossalai.context....
from typing import Tuple import torch import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.context.singleton_meta import SingletonMeta from colossalai.tensor import ProcessGroup def _check_sanity(): from colossalai.core import global_context as gpc if gp...
class SingletonMeta(type): """ The Singleton class can be implemented in different ways in Python. Some possible methods include: base class, decorator, metaclass. We will use the metaclass because it is best suited for this purpose. """ _instances = {} def __call__(cls, *args, **kwargs): ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from enum import Enum # parallel modes class ParallelMode(Enum): """This is an enumeration class containing all possible parallel modes. """ GLOBAL = 'global' # common parallel DATA = 'data' # model parallel - containing tensor and pipeline p...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from torch import distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitializer @DIST_GROUP_INITIALIZER.register_module class Initializer_Pipeline(Pro...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import torch.distributed as dist from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitialize...
import math import torch.distributed as dist from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitializer def _check_summa_env_var(summa_dim): # c...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitializer @DIST_GRO...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Tensor(ProcessGrou...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Model(ProcessGroupI...
from .initializer_1d import Initializer_1D from .initializer_2d import Initializer_2D from .initializer_2p5d import Initializer_2p5D from .initializer_3d import Initializer_3D from .initializer_data import Initializer_Data from .initializer_pipeline import Initializer_Pipeline from .initializer_sequence import Initiali...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from colossalai.context import Config class ProcessGroupInitializer(ABC): """An object, knowing the parallelism configuration, that initializes parallel groups. Args: rank (int): The rank of current process. ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import torch.distributed as dist from colossalai.context import Config from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_ini...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from torch import distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Data(ProcessG...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .initializer_tensor import Initializer_Tensor from .process_group_initializer import ProcessGroupInitializer @DIST_GROUP_INITIALIZER.r...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch import Tensor from colossalai.context.parallel_mode import ParallelMode class SeedManager: """This class is a manager of all random seeds involved in the system. Note: The parallel_mode should be concluded in ``ParallelMode``. ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import functools from contextlib import contextmanager import torch.cuda from torch import Tensor from .seed_manager import SeedManager from ..parallel_mode import ParallelMode _SEED_MANAGER = SeedManager() def get_seeds(): """Returns the seeds of the seed manag...
from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states, sync_states, moe_set_seed, reset_seeds) __all__ = [ 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states', 'sync_sta...
from enum import Enum from typing import Optional import torch from typing import Union from colossalai.gemini.gemini_context import GeminiMemoryManager def sizeof_tensor(tensor: torch.Tensor): return tensor.numel() * tensor.element_size() class TensorState(Enum): FREE = 0 HOLD = 1 HOLD_AFTER_FWD =...
from enum import EnumMeta class GeminiMemoryManager(object): def __init__(self, states_cls: EnumMeta): super().__init__() self.states_cls = states_cls self._cnter = 0 # the counter of instances self.total_mem = dict() self.state_mem = dict() self.state_mem['cpu...
import functools from abc import ABC, abstractmethod from time import time from typing import Dict, List, Optional, Tuple, Type import torch from colossalai.gemini.chunk import Chunk, ChunkManager from colossalai.gemini.memory_tracer import ChunkMemStatsCollector from colossalai.utils import get_current_device from c...
import functools from time import time from typing import List, Optional, Tuple import torch from colossalai.gemini.chunk import Chunk, ChunkManager from colossalai.gemini.memory_tracer import MemStats from .memory_tracer import ChunkMemStatsCollector from .placement_policy import PlacementPolicyFactory class Gemi...
from .chunk import ChunkManager, TensorInfo, TensorState, search_chunk_configuration from .gemini_mgr import GeminiManager from .stateful_tensor_mgr import StatefulTensorMgr from .tensor_placement_policy import TensorPlacementPolicyFactory __all__ = [ 'StatefulTensorMgr', 'TensorPlacementPolicyFactory', 'GeminiMan...
import torch from colossalai.gemini.stateful_tensor import StatefulTensor from typing import Union, Tuple def is_storage_empty(tensor: torch.Tensor) -> bool: return tensor.storage().size() == 0 def free_storage(tensor: torch.Tensor) -> None: if not is_storage_empty(tensor): tensor.storage().resize_(...
from abc import ABC, abstractmethod from time import time from typing import List, Optional import torch from colossalai.utils import get_current_device from colossalai.utils.memory import colo_device_memory_capacity from colossalai.gemini.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage f...
import functools import torch import types from colossalai.utils.cuda import get_current_device from colossalai.gemini.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState from colossalai.gemini.tensor_placement_policy im...
from .chunk import Chunk, ChunkFullError, TensorInfo, TensorState from .manager import ChunkManager from .search_utils import classify_params_by_dp_degree, search_chunk_configuration from .utils import init_chunk_manager __all__ = ['Chunk', 'ChunkManager', 'classify_params_by_dp_degree', 'search_chunk_configuration', ...
from dataclasses import dataclass from enum import Enum from typing import Dict, List, Optional import torch import torch.distributed as dist from colossalai.tensor import ProcessGroup as ColoProcessGroup from colossalai.utils import get_current_device class TensorState(Enum): FREE = 0 COMPUTE = 1 HOLD ...
from time import time from typing import Optional import torch import torch.distributed as dist import torch.nn as nn from colossalai.gemini.chunk import ChunkManager from colossalai.gemini.chunk.search_utils import search_chunk_configuration from colossalai.utils import is_ddp_ignored def safe_div(a, b): if a ...
from collections import deque from typing import Deque, Dict, Iterable, List, Optional, Set, Tuple import torch from colossalai.gemini.chunk import Chunk, ChunkFullError, TensorState from colossalai.tensor import ColoTensor from colossalai.utils import get_current_device class ChunkManager: """ A manager cl...
import math from typing import Dict, List, Optional, Tuple import numpy as np import torch.distributed as dist import torch.nn as nn from colossalai.gemini.memory_tracer import MemStats, OrderedParamGenerator from colossalai.tensor import ColoParameter from colossalai.utils import is_ddp_ignored def _filter_exlarge...
import torch.nn from colossalai.gemini.memory_tracer import MemStats from colossalai.gemini.ophooks.runtime_mem_tracer_hook import GradMemStats, GradMemTracerHook, ParamMemTracerHook from colossalai.nn.parallel.data_parallel import _cast_float from colossalai.tensor.param_op_hook import ColoParamOpHookManager __all__...
from typing import Optional import torch import torch.nn as nn from torch.fx import symbolic_trace from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp, is_compatible_with_meta from colossalai.gemini.chunk import ChunkManager if is_compa...
from typing import Optional from colossalai.gemini.chunk import ChunkManager from colossalai.gemini.memory_tracer import MemStats from colossalai.utils import get_current_device from colossalai.utils.memory import colo_device_memory_capacity from .memstats_collector import MemStatsCollector class ChunkMemStatsColle...
from .param_runtime_order import OrderedParamGenerator # isort:skip from .memory_stats import MemStats # isort:skip from .memory_monitor import AsyncMemoryMonitor, SyncCudaMemoryMonitor # isort:skip from .memstats_collector import MemStatsCollector # isort:skip from .chunk_memstats_collector import ChunkMem...
from abc import ABC import torch class ParamGenerator(ABC): def append(self, param: torch.nn.Parameter): pass def generate(self): pass def clear(self): pass class OrderedParamGenerator(ParamGenerator): """OrderedParamGenerator Contain the order of parameters visited ...
from typing import Optional, Tuple import torch def colo_model_optimizer_usage(optim) -> Tuple[int, int]: """Trace the optimizer memory usage Args: optim (ShardedOptimV2): an instance of ShardedOptimver Returns: Tuple[int, int]: cuda/cpu memory usage in Byte """ if optim is None...
from typing import Any, Dict, List, Optional import torch from colossalai.gemini.memory_tracer import OrderedParamGenerator class MemStats(object): def __init__(self) -> None: """ Store the non model data statistics used for Gemini and ZeroOptimizer. """ # (preop_step, List[para...
import json from abc import abstractmethod from concurrent.futures import ThreadPoolExecutor from time import sleep, time import torch from colossalai.utils import colo_device_memory_used, get_current_device class MemoryMonitor: """Base class for all types of memory monitor. All monitors should have a list ...
import time from typing import List, Optional import torch from colossalai.gemini.memory_tracer import SyncCudaMemoryMonitor from colossalai.gemini.stateful_tensor import StatefulTensor from colossalai.utils.memory import colo_device_memory_used from .memory_stats import MemStats class MemStatsCollector: """ ...
from ._param_hookmgr import BaseParamHookMgr __all__ = ["BaseParamHookMgr"]
from typing import Callable, List import torch import functools class BaseParamHookMgr(object): def __init__(self, param_list: List[torch.nn.Parameter]) -> None: r""" register backward hook on every parameters of module """ self._param_list = param_list self._hook_list = [...
from .utils import BaseOpHook, register_ophooks_recursively __all__ = ["BaseOpHook", "register_ophooks_recursively"]
from contextlib import contextmanager from enum import Enum from functools import partial from typing import List import torch from colossalai.gemini.memory_tracer import MemStats, SyncCudaMemoryMonitor from colossalai.gemini.tensor_utils import alloc_storage, free_storage from colossalai.tensor.param_op_hook import ...
import torch from typing import List, Callable, Optional from abc import ABC, abstractmethod import torch class BaseOpHook(ABC): """This class allows users to add customized operations before and after the execution of a PyTorch submodule""" def __init__(self): pass @abstractmethod def ...
import torch from colossalai.registry import OPHOOKS from . import BaseOpHook @OPHOOKS.register_module class ShardParamHook(BaseOpHook): """ A hook to process sharded param before and afther FWD and BWD operator executing. """ def __init__(self): super().__init__() def niter(self): ...
import torch from colossalai.registry import OPHOOKS from . import BaseOpHook @OPHOOKS.register_module class ShardGradMemTracerHook(BaseOpHook): """ A hook to process sharded param before and afther FWD and BWD operator executing. """ def __init__(self): super().__init__() def pre_fwd_...
from ._ops import * from .layer import * from .loss import * from .lr_scheduler import * from .metric import * from .optimizer import *
import math import warnings from torch import Tensor import torch.nn as nn def zeros_(): """Return the initializer filling the input Tensor with the scalar zeros""" def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): return nn.init.zeros_(tensor) return initializer def o...
from typing import Optional import torch from colossalai.gemini.chunk import init_chunk_manager from colossalai.gemini.gemini_mgr import GeminiManager from colossalai.gemini.memory_tracer import MemStats from .data_parallel import ZeroDDP class GeminiDDP(ZeroDDP): def __init__(self, module: t...
import itertools from collections import OrderedDict from functools import partial from typing import Dict, Iterable, List, Optional, Set import torch import torch.distributed as dist import torch.nn as nn from colossalai.gemini.chunk import Chunk, ChunkManager, TensorState from colossalai.gemini.gemini_mgr import Ge...
from .data_parallel import ColoDDP, ZeroDDP from .gemini_parallel import GeminiDDP from .zero_wrapper import zero_model_wrapper, zero_optim_wrapper __all__ = ['ColoDDP', 'ZeroDDP', 'GeminiDDP', 'zero_model_wrapper', 'zero_optim_wrapper']
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import functools from typing import Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist from torch import Ten...
from collections import OrderedDict from copy import copy from typing import Optional, Set import torch import torch.distributed as dist import torch.nn as nn from colossalai.gemini.chunk import Chunk from colossalai.utils import get_current_device def get_temp_total_chunk_on_cuda(chunk: Chunk): if chunk.is_gat...
from copy import copy from typing import Dict, Optional import torch import torch.nn as nn from .gemini_parallel import GeminiDDP def zero_model_wrapper(model: nn.Module, zero_stage: int = 1, gemini_config: Optional[Dict] = None): """This wrapper function is used to wrap your training model for ZeRO DDP. E...
from .colo_module import ColoModule from colossalai.tensor import ComputePattern, distspec, ProcessGroup, ShardSpec class ColoEmbedding(ColoModule): def __init__(self): super(ColoEmbedding, self).__init__() self._register_shard_params(['weight']) def register(self, compute_pattern, pg: Proce...
from .colo_module import ColoModule from colossalai.tensor import ComputePattern, distspec, ProcessGroup, ShardSpec class ColoLinear(ColoModule): def __init__(self): super(ColoLinear, self).__init__() self._register_shard_params(['weight', 'bias']) def register(self, compute_pattern, pg: Pro...
from .colo_module import ColoModule from .linear import ColoLinear from .embedding import ColoEmbedding from .module_utils import register_colo_module, is_colo_module, get_colo_module, init_colo_module, check_colo_module from .cache_embedding import CachedEmbeddingBag, ParallelCachedEmbeddingBag, CachedParamMgr, Limit...
from typing import Dict from colossalai.tensor import ColoParameter, ComputeSpec, ProcessGroup from colossalai.tensor import distspec from . import ColoModule import torch _COLOSSAL_MODULES: Dict[type, ColoModule] = {} def register_colo_module(module_type: type, colo_module: ColoModule): global _COLOSSAL_MODULES...
from colossalai.tensor.distspec import _DistSpec from colossalai.tensor import ComputePattern from typing import List, Dict class ColoModule(object): def __init__(self): self._shard_params: List[str] = [] self._allowed_patterns: Dict[ComputePattern, Dict[str, Dict[str, _DistSpec]]] = {} def ...
import numpy as np import torch from torch.profiler import record_function from typing import List, Optional from contexttimer import Timer from .copyer import LimitBuffIndexCopyer from enum import Enum import sys from contextlib import contextmanager class EvictionStrategy(Enum): LFU = 1 # dataset aware evic...
import torch from torch import LongTensor class LimitBuffIndexCopyer(object): """LimitBuffIndexCopyer Index Copy using limited temp buffer on CUDA. Args: size (int): buffer size """ def __init__(self, size: int) -> None: self._buff_size = size @torch.no_grad() def index...
import torch import torch.nn.functional as F from typing import List, Optional, Iterator, Tuple from .cached_embedding import CachedEmbeddingBag from colossalai.nn._ops._utils import dual_all_to_all from colossalai.tensor import ColoParameter, ShardSpec, ComputePattern, ProcessGroup, ColoTensorSpec, ColoTensor from ....
import torch class TablewiseEmbeddingBagConfig: ''' example: def prepare_tablewise_config(args, cache_ratio, ...): embedding_bag_config_list: List[TablewiseEmbeddingBagConfig] = [] ... return embedding_bag_config_list ''' def __init__(self, num_embeddings:...
from .cache_mgr import CachedParamMgr, EvictionStrategy from .copyer import LimitBuffIndexCopyer from .cached_embedding import CachedEmbeddingBag from .parallel_cached_embedding import ParallelCachedEmbeddingBag from .embedding_config import TablewiseEmbeddingBagConfig from .parallel_cached_embedding_tablewise import P...
import torch import torch.nn.functional as F from typing import List, Optional, Iterator, Tuple, Union from .base_embedding import BaseEmbeddingBag from .cache_mgr import CachedParamMgr, EvictionStrategy from torch.nn.parameter import Parameter class CachedEmbeddingBag(BaseEmbeddingBag): """CachedEmbeddingBag ...
import torch import torch.distributed as dist import torch.nn as nn from torch.profiler import record_function from .cached_embedding import CachedEmbeddingBag from colossalai.tensor import ProcessGroup from colossalai.nn._ops._utils import dual_all_to_all_tablewise from .embedding_config import TablewiseEmbeddingBag...
import abc import torch.nn as nn class BaseEmbeddingBag(abc.ABC, nn.Module): def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2., scale_grad_by_freq=False, sparse=False, mode='mean', inclu...
import torch import torch.distributed as dist import torch.nn.functional as F from .cached_embedding import CachedEmbeddingBag from .cache_mgr import EvictionStrategy from .embedding_config import TablewiseEmbeddingBagConfig from colossalai.tensor import ProcessGroup from colossalai.nn._ops._utils import dual_all_to_a...
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_adam.py import torch from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier @OPTIMIZERS.register_module class FusedAdam(torch.optim.Optimizer): """Implements Adam algorithm. `FusedAdam` req...
""" Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb """ import torch from torch.optim import Optimizer from colossalai.registry import OPTIMIZERS @OPTIMIZERS.register_module class Lamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimi...
import math import warnings from enum import Enum from typing import Any, Dict, Set, Tuple import torch import torch.distributed as dist from torch.nn import Parameter from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from colossalai.gemini.chunk import Chunk, ChunkM...
"""Adapted from https://github.com/NUS-HPC-AI-Lab/LARS-ImageNet-PyTorch/blob/main/lars.py""" from typing import Iterable import torch from torch.optim import Optimizer from colossalai.registry import OPTIMIZERS @OPTIMIZERS.register_module class Lars(Optimizer): r"""Implements the LARS optimizer from `"Large ba...
from .colossalai_optimizer import ColossalaiOptimizer from .fused_adam import FusedAdam from .fused_lamb import FusedLAMB from .fused_sgd import FusedSGD from .lamb import Lamb from .lars import Lars from .cpu_adam import CPUAdam from .hybrid_adam import HybridAdam __all__ = ['ColossalaiOptimizer', 'FusedLAMB', 'Fused...
import math from typing import Optional import torch from colossalai.kernel.op_builder import CPUAdamBuilder from colossalai.registry import OPTIMIZERS from .nvme_optimizer import NVMeOptimizer @OPTIMIZERS.register_module class CPUAdam(NVMeOptimizer): """Implements Adam algorithm. Supports parameters upda...
import torch import torch.nn as nn from torch import Tensor from torch.optim import Optimizer from colossalai.utils import clip_grad_norm_fp32 class ColossalaiOptimizer(Optimizer): def __init__(self, optim: Optimizer): self.optim = optim @property def param_groups(self): return self.opti...
from typing import Any import torch from colossalai.nn.optimizer import HybridAdam from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer __all__ = ['GeminiAdamOptimizer'] class GeminiAdamOptimizer(ZeroOptimizer): def __init__(self, model: torch.nn.Module, **defaults: Any) -> None: optimizer...
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_lamb.py import torch from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier @OPTIMIZERS.register_module class FusedLAMB(torch.optim.Optimizer): """Implements LAMB algorithm. `FusedLAMB` req...
import torch import os import tempfile import math from torch.nn.parameter import Parameter from typing import Optional, List, Dict, Callable class NVMeOptimizer(torch.optim.Optimizer): """A base class for offloading optimizer states. Args: params: parameters defaults (dict): default dict ...
from typing import Any, Optional import torch from colossalai.kernel.op_builder import CPUAdamBuilder, FusedOptimBuilder from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier from .nvme_optimizer import NVMeOptimizer @OPTIMIZERS.register_module class HybridAdam(NVMeOptimizer)...
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_sgd.py import torch from torch.optim.optimizer import Optimizer, required from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier @OPTIMIZERS.register_module class FusedSGD(Optimizer): r"""Implem...
import torch.nn as nn from colossalai.registry import LOSSES from torch.nn.modules.loss import _Loss from colossalai.context.moe_context import MOE_CONTEXT @LOSSES.register_module class MoeCrossEntropyLoss(_Loss): r"""torch.nn.CrossEntropyLoss added with auxiliary loss. Args: input (:class:`torch.ten...
from colossalai.global_variables import tensor_parallel_env as env from colossalai.nn.layer.utils import get_tensor_parallel_mode from torch import nn from torch.nn.modules.loss import * from torch.nn.modules.loss import _Loss from .loss_1d import VocabParallelCrossEntropyLoss1D from .loss_2d import CrossEntropyLoss2D...
import torch import torch.distributed as dist from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.registry import LOSSES from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.modules.loss import _Loss class _VocabParallelCrossEntropy1D(torch.autograd...
import torch import torch.distributed as dist from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_2d import reduce_by_batch_2d, split_batch_2d from colossalai.nn.layer.parallel_2d._utils import assert_summa_initialization from colossalai.regist...
import torch import torch.distributed as dist from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D, OUTPUT_GROUP_3D from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d from colossalai.nn.layer.parallel_3d._utils import get_paralle...
import torch import torch.distributed as dist from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_2p5d import reduce_by_batch_2p5d, split_batch_2p5d from colossalai.nn.layer.parallel_2p5d._utils import assert_tesseract_initialization from colos...