python_code
stringlengths
0
456k
import torch import torch.nn.functional as F from ...registry import bias_addition_module from .bias_addition_module import BiasAdditionModule @bias_addition_module.register(torch.nn.Linear) class BiasAdditionLinear(BiasAdditionModule): def extract_kwargs_from_mod(self): return {} def generate(self...
from .bias_addition_module import * from .conv import * from .linear import *
import torch import torch.nn.functional as F from torch.nn.modules.utils import _pair, _reverse_repeat_tuple, _single, _triple from ...registry import bias_addition_module from .bias_addition_module import BiasAdditionModule @bias_addition_module.register(torch.nn.Conv1d) @bias_addition_module.register(torch.nn.Conv...
import torch from torch.fx.graph_module import GraphModule from typing import Callable, List, Dict, Any, Optional from torch.fx._compatibility import compatibility from packaging import version import inspect @compatibility(is_backward_compatible=True) class Partition: """ Adapted from https://github.com/pyto...
from dataclasses import asdict from typing import Any, Dict, List, NamedTuple, Tuple import torch import torch.fx from torch.fx.node import Argument, Node, Target from torch.utils._pytree import tree_map from colossalai.fx._compatibility import compatibility, is_compatible_with_meta from colossalai.fx.profiler import...
from .adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass from .concrete_info_prop import ConcreteInfoProp from .meta_info_prop import MetaInfoProp, metainfo_trace from .shard_1d_pass import column_shard_linear_pass, row_shard_linear_pass
import torch from typing import Dict from torch.fx.node import Node, map_arg from torch.fx.graph import Graph def get_comm_size(prev_partition, next_partition): """ Given two partitions (parent and child), calculate the communication size between the two. """ # Keep tracking the communication size ...
from dataclasses import asdict from typing import Any, Dict, List, NamedTuple, Optional, Tuple import torch import torch.fx from torch.fx.node import Argument, Node, Target from torch.utils._pytree import tree_flatten from colossalai.fx._compatibility import compatibility from colossalai.fx.profiler import GraphInfo,...
import torch import torch.nn as nn import operator from colossalai.tensor import ProcessGroup from colossalai.tensor.distspec import ShardSpec from colossalai.tensor.compute_spec import ComputePattern, ComputeSpec ELEMENTWISE_MODULE_OP = [torch.nn.Dropout, torch.nn.ReLU] ELEMENTWISE_FUNC_OP = [ torch.add, operator...
import torch from torch.fx.graph_module import GraphModule from typing import Callable, List, Dict, Any, Optional from torch.fx._compatibility import compatibility from packaging import version from colossalai.fx.passes.meta_info_prop import TensorMetadata import inspect from typing import List from colossalai.fx.passe...
import torch from torch.fx import symbolic_trace from torch.fx.node import Node from colossalai.fx.passes.split_module import split_module def pipe_split(): pass def avgcompute_split_pass(gm: torch.fx.GraphModule, pp_size: int): """ In avgcompute_split_pass, we split module by the fwd flops. """ ...
import torch from typing import List from torch.fx import symbolic_trace from torch.fx.node import Node from colossalai.fx.passes.split_module import split_module from colossalai.tensor.shape_consistency import ShapeConsistencyManager from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.sharding_...
import copy import math from typing import List, Tuple import torch from colossalai.fx import is_compatible_with_meta from colossalai.fx.codegen.activation_checkpoint_codegen import \ _find_nested_ckpt_regions from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.algorithms.ckpt_solver_r...
from .ckpt_solver_chen import chen_greedy from .linearize import linearize from .ckpt_solver_rotor import solver_rotor from .ckpt_solver_pofo import solver_pofo
import math def _discretize(mem_unit, values): return [math.ceil(value / mem_unit) for value in values] class Chain: def __init__(self, fw, bw, cw, cbw, ftmp, btmp, check=True): self.fweight = fw self.bweight = bw self.cweight = cw self.cbweight = cbw self.fwd_mem_tm...
import math import sys from typing import List, Tuple from torch.fx import Node from colossalai.fx.codegen.activation_checkpoint_codegen import _find_nested_ckpt_regions from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.profiler import activation_size, calculate_fwd_out, calculate_fwd_tmp, par...
import math from typing import List, Set, Tuple import torch from torch.fx import GraphModule, Node from colossalai.fx.profiler import calculate_fwd_in, calculate_fwd_tmp __all__ = ['chen_greedy'] CKPT_OP = ['call_module', 'call_method', 'call_function', 'get_attr'] def _all_potential_ckpt_nodes(gm: GraphModule) -...
from typing import List, Any from torch.fx import GraphModule, Node from colossalai.fx.profiler import is_inplace # Common nodes are type of nodes that could be seen as attributes and remain # unchanged throughout the whole model, it will be used several times by # different blocks of model, so that it is hard for us ...
from setuptools import setup, Extension import os this_dir = os.path.dirname(os.path.abspath(__file__)) ext_modules = [Extension( 'dynamic_programs_C_version', sources=[os.path.join(this_dir, 'dynamic_programs.c')], )] setup( name='rotor c extension', version='0.1', description='rotor c extension ...
from .activation_checkpoint_codegen import *
from typing import Any, Callable, Dict, Iterable, List, Tuple import torch import colossalai try: from torch.fx.graph import ( CodeGen, PythonCode, _custom_builtins, _CustomBuiltin, _format_target, _is_from_torch, _Namespace, _origin_type_map, ...
import torch __all__ = ['ALIAS_ATEN', 'INPLACE_NEW', 'INPLACE_MATH_ATEN', 'CLONE_ATEN', 'RELU_LIKE_OPS', 'RELU_LIKE_MOD'] aten = torch.ops.aten ALIAS_ATEN = [ aten.detach.default, aten.t.default, aten.transpose.int, aten.view.default, aten._unsafe_view.default, aten._reshape_alias.default, ] ...
from .._compatibility import is_compatible_with_meta if is_compatible_with_meta(): from .opcount import flop_mapping from .profiler import profile_function, profile_method, profile_module from .shard_utils import ( calculate_bwd_time, calculate_fwd_in, calculate_fwd_out, cal...
import uuid import torch from torch.types import _bool, _device, _dtype from torch.utils._pytree import tree_flatten, tree_map from .._compatibility import compatibility from .constants import ALIAS_ATEN __all__ = ['MetaTensor'] def set_data_ptr(x): if isinstance(x, torch.Tensor): if not x.data_ptr(): ...
# adopted from https://github.com/facebookresearch/fvcore/blob/main/fvcore/nn/jit_handles.py # ideas from https://pastebin.com/AkvAyJBw import operator from functools import partial, reduce from numbers import Number from typing import Any, Callable, List import torch from packaging import version aten = torch.ops.a...
from dataclasses import dataclass, field from enum import Enum from functools import partial from typing import Dict, List from torch.fx import Graph, Node from .._compatibility import compatibility from .memory_utils import activation_size, is_inplace class Phase(Enum): FORWARD = 0 BACKWARD = 1 PLACEHO...
import time from functools import partial from typing import Any, Callable, Dict, Tuple import torch from torch.fx import Graph, Node from torch.fx.node import Argument, Target from torch.nn.parameter import Parameter from torch.utils._pytree import tree_map from .._compatibility import compatibility from .constants ...
import torch from torch.fx import Node from .._compatibility import compatibility, is_compatible_with_meta from .memory_utils import activation_size if is_compatible_with_meta(): from .constants import OUTPUT_SAVED_MOD, OUTPUT_SAVED_OPS __all__ = ["calculate_fwd_in", "calculate_fwd_tmp", "calculate_fwd_out"] @...
from typing import Dict, List, Tuple, Union import torch from torch.fx import GraphModule, Node from .._compatibility import compatibility, is_compatible_with_meta __all__ = ['activation_size', 'parameter_size', 'is_inplace'] @compatibility(is_backward_compatible=True) def activation_size(out: Union[torch.Tensor, ...
class ProfilerRegistry: def __init__(self, name): self.name = name self.store = {} def register(self, source): def wrapper(func): self.store[source] = func return func return wrapper def get(self, source): assert source in self.store ...
from operator import add, floordiv, getitem, mul, neg, pos, setitem, sub import torch __all__ = ['INPLACE_OPS', 'INPLACE_METHOD', 'NON_INPLACE_METHOD'] # TODO fill out the inplace ops INPLACE_OPS = [ add, sub, mul, floordiv, neg, pos, getitem, setitem, getattr, torch.Tensor.cp...
from .profiler import profile_function, profile_method, profile_module from .profiler_function import * from .profiler_module import * from .registry import meta_profiler_function, meta_profiler_module from .shard_utils import calculate_fwd_in, calculate_fwd_out, calculate_fwd_tmp
from dataclasses import dataclass from typing import Any, Callable, Dict, Tuple import torch from torch.fx.node import Argument, Target from ..._compatibility import compatibility from ..memory_utils import activation_size from .constants import INPLACE_METHOD, INPLACE_OPS, NON_INPLACE_METHOD from .registry import me...
# for PyTorch 1.11 compatibility uses from typing import Dict, List, Tuple, Union import torch from torch.fx import GraphModule, Node from ..._compatibility import compatibility __all__ = ["calculate_fwd_in", "calculate_fwd_tmp", "calculate_fwd_out"] @compatibility(is_backward_compatible=True) def calculate_fwd_in...
from typing import Optional, Tuple import torch from ..registry import meta_profiler_module # TODO: This is hard to compute memory cost @meta_profiler_module.register(torch.nn.MultiheadAttention) def torch_nn_msa(self: torch.nn.MultiheadAttention, query: torch.Tensor, key: torch.Tens...
import operator from functools import reduce import math from typing import Tuple import torch from ..registry import meta_profiler_module @meta_profiler_module.register(torch.nn.Conv1d) def torch_nn_conv1d(self: torch.nn.Conv1d, input: torch.Tensor) -> Tuple[int, int]: # the output shape is calculated using the ...
from typing import Tuple import torch from ..registry import meta_profiler_module @meta_profiler_module.register(torch.nn.Embedding) def torch_nn_embedding(self: torch.nn.Embedding, input: torch.Tensor) -> Tuple[int, int]: # nn.Embedding is a dictionary lookup, so technically it has 0 FLOPs. (https://discuss.pyto...
from typing import Tuple import torch from ..registry import meta_profiler_module @meta_profiler_module.register(torch.nn.Linear) @meta_profiler_module.register(torch.nn.modules.linear.NonDynamicallyQuantizableLinear) def torch_nn_linear(self: torch.nn.Linear, input: torch.Tensor) -> Tuple[int, int]: out_features...
from typing import Tuple import torch from ..registry import meta_profiler_module @meta_profiler_module.register(torch.nn.AvgPool1d) @meta_profiler_module.register(torch.nn.AvgPool2d) @meta_profiler_module.register(torch.nn.AvgPool3d) @meta_profiler_module.register(torch.nn.MaxPool1d) @meta_profiler_module.register(t...
from .activation_function import * from .attention import * from .convolution import * from .dropout import * from .embedding import * from .linear import * from .normalization import * from .pooling import * from .rnn import * from .torch_op import *
import operator import torch from ..registry import meta_profiler_module from typing import Optional, Tuple, Union @meta_profiler_module.register(torch.nn.Flatten) def torch_nn_flatten(self: torch.nn.Flatten, input: torch.Tensor) -> Tuple[int, int]: flops = 0 macs = 0 return flops, macs
from typing import Tuple import torch from ..registry import meta_profiler_module @meta_profiler_module.register(torch.nn.Dropout) def torch_nn_dropout(self: torch.nn.Module, input: torch.Tensor) -> Tuple[int, int]: # nn.Embedding is a dictionary lookup, so technically it has 0 FLOPs. (https://discuss.pytorch.org...
from typing import Tuple import torch from ..registry import meta_profiler_module # TODO: different activation has different FLOPs count, currently unused. _multiplier = { torch.nn.ReLU: 1, torch.nn.PReLU: 4, torch.nn.Sigmoid: 4, torch.nn.Tanh: 5, torch.nn.LeakyReLU: 3, torch.nn.ELU: 4, tor...
from typing import Tuple, Union import torch from ..registry import meta_profiler_module @meta_profiler_module.register(torch.nn.InstanceNorm1d) @meta_profiler_module.register(torch.nn.InstanceNorm2d) @meta_profiler_module.register(torch.nn.InstanceNorm3d) @meta_profiler_module.register(torch.nn.LayerNorm) @meta_prof...
from functools import reduce import operator import torch from ..registry import meta_profiler_module from typing import Optional, Tuple, Union def _rnn_flops(flops: int, macs: int, module: torch.nn.RNNBase, w_ih: torch.Tensor, w_hh: torch.Tensor) -> Tuple[int, int]: # copied from https://github.co...
import operator from functools import reduce from typing import Any, Optional, Tuple, Union import torch from ..registry import meta_profiler_function def _elementwise_flops_compute(input, other): # copied from https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/profiling/flops_profiler/profiler.py#L763 ...
import torch from typing import Optional from ..registry import meta_profiler_function @meta_profiler_function.register(torch.nn.functional.embedding) def torch_nn_functional_embedding( input: torch.Tensor, weight: torch.Tensor, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, ...
from typing import Tuple import torch from ..registry import meta_profiler_function @meta_profiler_function.register(torch.nn.functional.linear) def torch_nn_linear(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None) -> Tuple[int, int]: out_features = weight.shape[0] macs = torch.numel(input...
from typing import Tuple, Union import torch from ..registry import meta_profiler_function @meta_profiler_function.register(torch.nn.functional.avg_pool1d) @meta_profiler_function.register(torch.nn.functional.avg_pool2d) @meta_profiler_function.register(torch.nn.functional.avg_pool3d) @meta_profiler_function.register...
from .activation_function import * from .arithmetic import * from .embedding import * from .linear import * from .normalization import * from .pooling import * from .python_ops import * from .torch_ops import *
import operator from typing import Any, Tuple import torch from ..registry import meta_profiler_function @meta_profiler_function.register(operator.getitem) def operator_getitem(a: Any, b: Any) -> Tuple[int, int]: flops = 0 macs = 0 return flops, macs @meta_profiler_function.register(getattr) def python_...
from functools import reduce import operator from typing import Any, Optional, Tuple import torch from ..registry import meta_profiler_function @meta_profiler_function.register(torch.arange) @meta_profiler_function.register(torch.finfo) @meta_profiler_function.register(torch.permute) @meta_profiler_function.register(...
from typing import Tuple import torch from ..registry import meta_profiler_function # TODO: different activation has different FLOPs count, currently unused. _multiplier = { torch.nn.functional.relu: 1, torch.nn.functional.prelu: 4, torch.nn.functional.sigmoid: 4, torch.nn.functional.tanh: 5, torch...
from typing import List, Optional, Tuple import torch from ..registry import meta_profiler_function @meta_profiler_function.register(torch.nn.functional.instance_norm) def torch_nn_func_instancenorm( input: torch.Tensor, running_mean: Optional[torch.Tensor] = None, running_var: Optional[torch.Tensor] = No...
import torch import gc import psutil from collections import namedtuple from colossalai.context.parallel_mode import ParallelMode from colossalai.utils import get_current_device from colossalai.core import global_context as gpc from colossalai.context.parallel_mode import ParallelMode from colossalai.logging import ge...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import time from typing import Tuple from .cuda import synchronize class Timer: """A timer object which helps to log the execution times, and provides different tools to assess the times. """ def __init__(self): self._started = False self._s...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch.utils.checkpoint import check_backward_validity, detach_variable from colossalai.context.random import get_states, get_current_mode, set_seed_states, set_mode, sync_states from .cuda import get_current_device import weakref def copy_to_device(...
from .activation_checkpoint import checkpoint from .checkpointing import load_checkpoint, save_checkpoint from .common import ( clip_grad_norm_fp32, conditional_context, copy_tensor_parallel_attributes, count_zeros_fp32, disposable, ensure_path_exists, free_port, is_ddp_ignored, is_d...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import functools import os import random import socket from collections import defaultdict from contextlib import contextmanager from pathlib import Path from typing import Callable, Dict, List, Optional, Union import torch import torch.distributed as dist from torch._si...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch def set_to_cuda(models): """Send model to gpu. :param models: nn.module or a list of module """ if isinstance(models, list) and len(models) > 1: ret = [] for model in models: ret.append(model.to(get_current_devi...
import torch.nn as nn import torch.distributed as dist from colossalai.core import global_context as gpc from colossalai.context.moe_context import MOE_CONTEXT from colossalai.context import ParallelMode from .common import is_using_ddp from typing import Dict, List def get_moe_epsize_param_dict(model: nn.Module) -> ...
from collections import OrderedDict from itertools import chain import torch import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.constants import IS_TENSOR_PARALLEL try: from torch.nn.modules.module import _EXT...
import torch import torch.distributed as dist from colossalai.tensor import ColoTensor from colossalai.nn.optimizer import ColossalaiOptimizer from colossalai.utils.checkpoint.utils import gather_tensor, scatter_tensor from typing import Optional, Dict def save_checkpoint(path: str, epoch: int, ...
from .module_checkpoint import save_checkpoint, load_checkpoint __all__ = ['save_checkpoint', 'load_checkpoint']
import torch import torch.distributed as dist from colossalai.tensor import ColoTensor, ColoTensorSpec from colossalai.tensor.distspec import _DistSpec, DistPlacementPattern def robust_broadcast(tensor): with torch.no_grad(): is_cpu_ten = tensor.device.type == 'cpu' if is_cpu_ten: b_da...
from colossalai.utils.rank_recorder.rank_recorder import recorder __all__ = ["recorder"]
import time from typing import List, Dict import json import os import time import shutil import atexit import torch import torch.distributed as dist import json import matplotlib.pyplot as plt import matplotlib.colors as mcolors cmap = list(mcolors.TABLEAU_COLORS.values()) LOG_FOLDER = "record.log" MAX_WAIT_TIME =...
from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048 * 32)
# modified from https://github.com/NVIDIA/apex/blob/master/apex/multi_tensor_apply/multi_tensor_apply.py class MultiTensorApply(object): """ Apply an operation to a list of tensors efficiently. Args: chunk_size (int): Size of a chunk. """ available = False warned = False def __i...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod class BaseSampler(ABC): def __init__(self, dataset, batch_size): self.dataset = dataset self.batch_size = batch_size @abstractmethod def __len__(self): pass @abstractmethod def __iter__(...
from .base_sampler import BaseSampler from .data_parallel_sampler import DataParallelSampler, get_dataloader __all__ = ['BaseSampler', 'DataParallelSampler', 'get_dataloader']
#!/usr/bin/env python # -*- encoding: utf-8 -*- # adpated from torch.utils.data.DistributedSampler import math import random import numpy as np from typing import TypeVar, Iterator import torch from torch.utils.data import Sampler, Dataset, DataLoader from colossalai.context.parallel_mode import ParallelMode from co...
import gc import inspect import torch import torch.nn as nn from typing import Optional from collections import defaultdict LINE_WIDTH = 108 LINE = '-' * LINE_WIDTH + '\n' class TensorDetector(): def __init__(self, show_info: bool = True, log: str = None, inclu...
from .tensor_detector import TensorDetector
#!/usr/bin/env python # coding: utf-8 import inspect import types from typing import Callable, List import torch import torch.nn as nn from colossalai.tensor import ColoParameter, ColoTensor from colossalai.utils.model.utils import substitute_init_recursively class LazyInitContext(): """ A context to allow...
import contextlib import copy import gc import pprint from typing import Callable, List, Optional, Union import torch import torch.nn as nn from torch.utils._pytree import tree_map from colossalai.device.device_mesh import DeviceMesh from colossalai.fx.profiler import MetaTensor from colossalai.tensor.shape_consisten...
from typing import Any, Dict, Iterator, Optional, Tuple, Union import torch from torch import nn from colossalai.nn.parallel.layers import ColoEmbedding, ColoLinear, register_colo_module from colossalai.tensor import ColoParameter, ColoTensor, ProcessGroup from .utils import InsertPostInitMethodToModuleSubClasses #...
import torch import functools from typing import Optional def substitute_init_recursively(cls, func, visited: set): for subcls in cls.__subclasses__(): substitute_init_recursively(subcls, func, visited) if subcls not in visited: func(subcls) visited.add(subcls) def call_t...
import os import threading import time import torch from enum import Enum from typing import List from colossalai.gemini.stateful_tensor import StatefulTensor from colossalai.gemini.ophooks import BaseOpHook from colossalai.engine import Engine from colossalai.utils.profiler.extention import ProfilerExtension class D...
from .legacy import * from .profiler import profile
from abc import ABC, abstractmethod class ProfilerExtension(ABC): @abstractmethod def prepare_trace(self): pass @abstractmethod def start_trace(self): pass @abstractmethod def stop_trace(self): pass @abstractmethod def extend_chrome_trace(self, trace: dict) ...
import os from typing import List from colossalai.engine import Engine from torch.profiler import profile as torch_profile from torch.profiler.profiler import ProfilerAction from typing import Any, Callable, Iterable, Optional from torch.autograd import ProfilerActivity import json import os import tempfile import gzip...
from pathlib import Path from torch.autograd.profiler import profile from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth from typing import List def _get_size(dtype: str): if dtype == "fp16": return 2 elif dtype == "fp32": return 4 else: raise NotI...
import inspect from pathlib import Path from functools import partial import torch from torch.autograd.profiler import profile import torch.distributed as dist from torch.distributed import ReduceOp from colossalai.utils import get_current_device from .prof_utils import BaseProfiler, _format_time, _format_memory, _form...
from .comm_profiler import CommProfiler from .pcie_profiler import PcieProfiler from .prof_utils import ProfilerContext, BaseProfiler from .mem_profiler import MemProfiler __all__ = ['BaseProfiler', 'CommProfiler', 'PcieProfiler', 'MemProfiler', 'ProfilerContext']
from abc import ABC, abstractmethod from pathlib import Path from typing import Union, List from colossalai.core import global_context as gpc # copied from high version pytorch to support low version def _format_time(time_us): """Defines how to format time in FunctionEvent""" US_IN_SECOND = 1000.0 * 1000.0 ...
import shutil import tempfile from abc import ABC, abstractmethod from typing import Dict, List, Type from .reader import CheckpointReader, DiskCheckpointReader from .writer import CheckpointWriter, DiskCheckpointWriter _backends: Dict[str, Type['CheckpointIOBackend']] = {} def register(name: str): assert name ...
import warnings from typing import Any, Callable, Dict, Generator, List, Optional, Tuple import torch.distributed as dist from torch.nn import Module from torch.optim import Optimizer from .backend import get_backend from .convertor import (CheckpointConvertor, ModelCheckpointMerger, ModelCheckpointRedistor, Optimize...
from .io import load, merge, redist, save from .meta import (ParamDistMeta, ParamRedistMeta, PipelineRedistMeta, RankRedistMeta, RedistMeta)
import os from abc import ABC, abstractmethod from collections import Counter from typing import Dict, Generator, List, Optional, Tuple import torch from .constant import GLOBAL_META_FILE_NAME, OTHER_CKPT_FILE_NAME from .meta import ParamDistMeta from .utils import is_duplicated_list class CheckpointReader(ABC): ...
import torch from numpy import prod from torch import Tensor from typing import List, Optional, Tuple from collections import defaultdict from .meta import ParamDistMeta, ParamRedistMeta def unflatten_zero_param(tensors: List[Tensor], dist_metas: List[ParamDistMeta]) -> Tensor: assert len(tensors) > 0 and len(dis...
import warnings from copy import deepcopy from itertools import chain from typing import Any, Callable, Dict, List, Optional, Tuple from torch import Tensor from torch.nn import Module from torch.nn.parameter import Parameter from torch.optim import Optimizer from .meta import ParamDistMeta def run_if_not_none(fn: ...
from abc import ABC, abstractmethod from collections import defaultdict from typing import Any, Callable, Dict, List, Optional from torch import Tensor from .distributed import merge_param, unmerge_param from .meta import ParamDistMeta, RedistMeta from .utils import (ModelCheckpointSharder, OptimizerCheckpointSharder...
from abc import ABC, abstractmethod from typing import Optional from .constant import MODEL_CKPT_FILE_NAME, OPTIM_CKPT_FILE_NAME, META_CKPT_FILE_NAME, OTHER_CKPT_FILE_NAME, GLOBAL_META_FILE_NAME import torch import os class CheckpointWriter(ABC): def __init__(self, base_name: str, overwrite: bool = False, rank: ...
import re GLOBAL_META_FILE_NAME = 'global_meta.bin' MODEL_CKPT_FILE_NAME = 'model.bin' OPTIM_CKPT_FILE_NAME = 'optim.bin' META_CKPT_FILE_NAME = 'meta.bin' OTHER_CKPT_FILE_NAME = 'other.bin' CKPT_PAT = re.compile(r'global_meta|model|optim|meta|other')
from dataclasses import dataclass from typing import List, Optional, Set, Dict @dataclass class ParamDistMeta: # parallel info dp_rank: int dp_world_size: int tp_rank: int tp_world_size: int # tp info tp_shard_dims: Optional[List[int]] = None tp_num_parts: Optional[List[int]] = None ...
import copy from typing import Dict, List, Tuple from torch.fx.node import Node from .estimate_memory import EstimateMemory from .reorder_graph import ReorderGraph from .select_chunk import SelectChunk from .trace_flow import TraceFlow from .trace_indice import TraceIndice from .utils import NodeMgr, get_logger, get_...
import copy from typing import Any, Callable, Dict, Iterable, List, Tuple import torch from torch.fx.node import Node, map_arg from colossalai.fx.profiler import activation_size, parameter_size from .utils import NodeMgr, delete_free_var_from_last_use, get_node_shape, is_non_memory_node class EstimateMemory(object...
from .trace_indice import TraceIndice from .utils import NodeMgr class ReorderGraph(object): """ Reorder node list and indice trace list """ def __init__(self, trace_indice: TraceIndice, node_mgr: NodeMgr) -> None: self.trace_indice = trace_indice self.node_mgr = node_mgr self...