python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
from setuptools import setup, find_packages
setup(
name = 'Mega-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.0',
license='MIT',
description = 'Mega - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https:... | Mega-pytorch-main | setup.py |
from mega_pytorch.mega_pytorch import Mega
from mega_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# co... | Mega-pytorch-main | train.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.fft import rfft, irfft
from einops import rearrange
from einops.layers.torch import Rearrange
from scipy.fftpack import next_fast_len
# functions
def exists(val):
return val is not Non... | Mega-pytorch-main | mega_pytorch/mega_pytorch.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwar... | Mega-pytorch-main | mega_pytorch/autoregressive_wrapper.py |
from mega_pytorch.mega_pytorch import MegaLayer, Mega, MultiHeadedEMA
| Mega-pytorch-main | mega_pytorch/__init__.py |
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['deep_daze']
from version import __version__
setup(
name = 'deep-daze',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'imagine = deep_daze.cli:main',
],
},
version = __versi... | deep-daze-main | setup.py |
__version__ = '0.11.1'
| deep-daze-main | deep_daze/version.py |
from deep_daze.deep_daze import DeepDaze, Imagine
| deep-daze-main | deep_daze/__init__.py |
import sys
import fire
from deep_daze import Imagine
def train(
text=None,
img=None,
learning_rate=1e-5,
num_layers=16,
hidden_size=256,
batch_size=4,
gradient_accumulate_every=4,
epochs=20,
iterations=1050,
save_every=100,
imag... | deep-daze-main | deep_daze/cli.py |
import os
import subprocess
import sys
import random
from datetime import datetime
from pathlib import Path
import torch
import torch.nn.functional as F
from siren_pytorch import SirenNet, SirenWrapper
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torch_optimizer import DiffGrad, AdamP
impo... | deep-daze-main | deep_daze/deep_daze.py |
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from pathlib import Path
import hashlib
import os
import urllib
import warnings
from typing import Union, List
from torchvision.transforms import Compose, Normalize
from tqdm import tq... | deep-daze-main | deep_daze/clip.py |
from setuptools import setup, find_packages
setup(
name = 'reformer_pytorch',
packages = find_packages(exclude=['examples', 'pretraining']),
version = '1.4.4',
license='MIT',
description = 'Reformer, the Efficient Transformer, Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url =... | reformer-pytorch-master | setup.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.autopadder import Autopadder
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = tor... | reformer-pytorch-master | reformer_pytorch/generative_tools.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from reformer_pytorch.reformer_pytorch import Reformer, ReformerLM, LSHSelfAttention
def pad_to_multiple(tensor, seqlen, multiple, dim=-1):
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) ... | reformer-pytorch-master | reformer_pytorch/autopadder.py |
import re
from torch import nn
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
... | reformer-pytorch-master | reformer_pytorch/reformer_enc_dec.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init... | reformer-pytorch-master | reformer_pytorch/reversible.py |
from torch import nn
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention
from collections import defaultdict
class Recorder(nn.Module):
def __init__(self, net):
super().__init__()
self.iter = 0
self.recordings = defaultdict(list)
self.net = net
self.... | reformer-pytorch-master | reformer_pytorch/recorder.py |
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention, Reformer, ReformerLM
from reformer_pytorch.reformer_enc_dec import ReformerEncDec
from reformer_pytorch.recorder import Recorder
from reformer_pytorch.autopadder import Autopadder
| reformer-pytorch-master | reformer_pytorch/__init__.py |
import math
import torch
import torch.nn as nn
from torch.nn import Identity
import torch.nn.functional as F
from torch.autograd import Function
from functools import partial, reduce, wraps
from itertools import chain
from operator import mul
from local_attention import LocalAttention
from axial_positional_embedding i... | reformer-pytorch-master | reformer_pytorch/reformer_pytorch.py |
import deepspeed
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset... | reformer-pytorch-master | examples/enwik8_deepspeed/train.py |
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1... | reformer-pytorch-master | examples/enwik8_simple/train.py |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from tqdm import tqdm
from reformer_pytorch import Reformer, ReformerLM
from transformers import BertTokenizer, PreTrainedTokenizer
from fairseq.optim.adafactor import Adafactor
... | reformer-pytorch-master | pretraining/self-supervised.py |
from setuptools import setup, find_packages
setup(
name = 'tranception-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.8',
license='MIT',
description = 'Tranception - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
... | tranception-pytorch-main | setup.py |
from tranception_pytorch.tranception_pytorch import Tranception
| tranception-pytorch-main | tranception_pytorch/__init__.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from einops_exts import rearrange_many
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# relat... | tranception-pytorch-main | tranception_pytorch/tranception_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'g-mlp-pytorch',
packages = find_packages(),
version = '0.1.5',
license='MIT',
description = 'gMLP - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/g-mlp-pytorch',
keywords = [
'... | g-mlp-pytorch-main | setup.py |
from g_mlp_pytorch import gMLP
from g_mlp_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1... | g-mlp-pytorch-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
... | g-mlp-pytorch-main | g_mlp_pytorch/autoregressive_wrapper.py |
from g_mlp_pytorch.g_mlp_pytorch import gMLP, gMLPVision, gMLPBlock, SpatialGatingUnit
| g-mlp-pytorch-main | g_mlp_pytorch/__init__.py |
from random import randrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# functions
def exists(val):
return val is not None
def pair(val):
return (val, val) if not isinstance(val, tuple) els... | g-mlp-pytorch-main | g_mlp_pytorch/g_mlp_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'charformer-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Charformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/charformer-pytorch',
ke... | charformer-pytorch-main | setup.py |
from charformer_pytorch.charformer_pytorch import GBST
| charformer-pytorch-main | charformer_pytorch/__init__.py |
import math
from math import gcd
import functools
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(... | charformer-pytorch-main | charformer_pytorch/charformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'retrieval-augmented-ddpm',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Retrieval-Augmented Denoising Diffusion Probabilistic Models',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = ... | retrieval-augmented-ddpm-main | setup.py |
retrieval-augmented-ddpm-main | retrieval_augmented_ddpm/retrieval_augmented_ddpm.py | |
retrieval-augmented-ddpm-main | retrieval_augmented_ddpm/__init__.py | |
import argparse
from pathlib import Path
from tqdm import tqdm
# torch
import torch
from einops import repeat
# vision imports
from PIL import Image
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import __version__
from dalle_pytorch import DiscreteVAE, O... | DALLE-pytorch-main | generate.py |
import math
from math import sqrt
import argparse
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolde... | DALLE-pytorch-main | train_vae.py |
from setuptools import setup, find_packages
exec(open('dalle_pytorch/version.py').read())
setup(
name = 'dalle-pytorch',
packages = find_packages(),
include_package_data = True,
version = __version__,
license='MIT',
description = 'DALL-E - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmai... | DALLE-pytorch-main | setup.py |
import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.util... | DALLE-pytorch-main | train_dalle.py |
from inspect import isfunction
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from rotary_embedding_torch import apply_rotary_emb
# helpers
def exists(val):
return val is not None
def uniq(arr):
return{el: True for el in ... | DALLE-pytorch-main | dalle_pytorch/attention.py |
__version__ = '1.6.6'
| DALLE-pytorch-main | dalle_pytorch/version.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dic... | DALLE-pytorch-main | dalle_pytorch/reversible.py |
from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
fr... | DALLE-pytorch-main | dalle_pytorch/dalle_pytorch.py |
from dalle_pytorch.dalle_pytorch import DALLE, CLIP, DiscreteVAE
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from pkg_resources import get_distribution
from dalle_pytorch.version import __version__
| DALLE-pytorch-main | dalle_pytorch/__init__.py |
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import torch
import youtokentome as yttm
from tokenizers import Tokenizer
from tokenizers.processors import ByteLevel
from transformers import BertTokenizer
import htm... | DALLE-pytorch-main | dalle_pytorch/tokenizer.py |
from pathlib import Path
from random import randint, choice
import PIL
from torch.utils.data import Dataset
from torchvision import transforms as T
class TextImageDataset(Dataset):
def __init__(self,
folder,
text_len=256,
image_size=128,
trunca... | DALLE-pytorch-main | dalle_pytorch/loader.py |
from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from d... | DALLE-pytorch-main | dalle_pytorch/transformer.py |
"""
Utility functions for optional distributed execution.
To use,
1. set the `BACKENDS` to the ones you want to make available,
2. in the script, wrap the argument parser with `wrap_arg_parser`,
3. in the script, set and use the backend by calling
`set_backend_from_args`.
You can check whether a backend is in use ... | DALLE-pytorch-main | dalle_pytorch/distributed_utils.py |
import io
import sys
import os
import requests
import PIL
import warnings
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt, log
from packaging import version
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
import importlib
... | DALLE-pytorch-main | dalle_pytorch/vae.py |
"""
An abstract backend for distributed deep learning.
Provides several standard utility methods under a common API.
Please check the documentation of the class `DistributedBackend` for
details to implement a new backend.
"""
from importlib import import_module
class DistributedBackend:
"""An abstract backend c... | DALLE-pytorch-main | dalle_pytorch/distributed_backends/distributed_backend.py |
from .deepspeed_backend import DeepSpeedBackend
from .distributed_backend import DistributedBackend
from .dummy_backend import DummyBackend
from .horovod_backend import HorovodBackend
| DALLE-pytorch-main | dalle_pytorch/distributed_backends/__init__.py |
import torch
from .distributed_backend import DistributedBackend
class HorovodBackend(DistributedBackend):
"""Distributed backend using Horovod."""
BACKEND_MODULE_NAME = 'horovod.torch'
BACKEND_NAME = 'Horovod'
def wrap_arg_parser(self, parser):
return parser
def check_batch_size(self,... | DALLE-pytorch-main | dalle_pytorch/distributed_backends/horovod_backend.py |
import json
import os
import torch
from .distributed_backend import DistributedBackend
class DeepSpeedBackend(DistributedBackend):
"""Distributed backend using the DeepSpeed engine."""
BACKEND_MODULE_NAME = 'deepspeed'
BACKEND_NAME = 'DeepSpeed'
def wrap_arg_parser(self, parser):
if not se... | DALLE-pytorch-main | dalle_pytorch/distributed_backends/deepspeed_backend.py |
from .distributed_backend import DistributedBackend
class DummyBackend(DistributedBackend):
"""Acts like a distributed backend.
Used as a stand-in replacement to obtain a non-distributed program.
"""
# We define this so we can use `super().__init__` but want this to
# throw an error upon import.... | DALLE-pytorch-main | dalle_pytorch/distributed_backends/dummy_backend.py |
from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '1.1.4',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/perform... | performer-pytorch-main | setup.py |
import deepspeed
from performer_pytorch import PerformerLM
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import Data... | performer-pytorch-main | examples/enwik8_deepspeed/train.py |
import tqdm
import torch
import torch.optim as optim
from performer_pytorch import PerformerEncDec
from apex import amp
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 1e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
... | performer-pytorch-main | examples/toy_tasks/enc_dec_copy_apex.py |
import tqdm
import torch
import torch.optim as optim
from performer_pytorch import PerformerEncDec
from torch.cuda.amp import autocast, GradScaler
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 1e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cyc... | performer-pytorch-main | examples/toy_tasks/enc_dec_copy.py |
from performer_pytorch import PerformerLM
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from torch.cuda.am... | performer-pytorch-main | examples/enwik8_simple/train.py |
import re
import torch
from torch import nn
from performer_pytorch.performer_pytorch import PerformerLM
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
m... | performer-pytorch-main | performer_pytorch/performer_enc_dec.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def exists(val):
return val is not None
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F... | performer-pytorch-main | performer_pytorch/autoregressive_wrapper.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dic... | performer-pytorch-main | performer_pytorch/reversible.py |
from performer_pytorch.performer_pytorch import PerformerLM, Performer, FastAttention, SelfAttention, CrossAttention, ProjectionUpdater
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
from performer_pytorch.performer_enc_dec import PerformerEncDec
| performer-pytorch-main | performer_pytorch/__init__.py |
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import autocast
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
from local_attention import LocalAttention
from axial_positional_embedding import AxialPositionalEm... | performer-pytorch-main | performer_pytorch/performer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'PaLM-rlhf-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.1',
license='MIT',
description = 'PaLM + Reinforcement Learning with Human Feedback - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_descripti... | PaLM-rlhf-pytorch-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from lion_pytorch import Lion
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from palm_rlhf_pytorch import PaLM
from accelerate import Accelerator
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_A... | PaLM-rlhf-pytorch-main | train.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# ... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/attention.py |
import math
import copy
from pathlib import Path
from collections import namedtuple
from functools import wraps
from itertools import zip_longest
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import einsum, nn
import torch.nn.functional as F
f... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/palm.py |
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.ppo import RLHFTrainer, ActorCritic
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/__init__.py |
import math
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
# decorators
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *a... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/utils.py |
from torch.optim import AdamW, Adam
from lion_pytorch import Lion
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_o... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/optimizer.py |
import torch
from torch import nn
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# LoRA - https://arxiv.org/abs/2106.09685
class LoRA(nn.Module):
def __init__(
self,
dim,
dim_out,
r = 8,
alpha = No... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/lora.py |
import math
from pathlib import Path
import copy
from tqdm import tqdm
from functools import partial
from collections import deque, namedtuple
from random import randrange
from beartype import beartype
from beartype.typing import List, Optional, Callable, Deque
import torch
from torch import nn
import torch.nn.functi... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/ppo.py |
import copy
from pathlib import Path
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from pal... | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/reward.py |
from setuptools import setup, find_packages
setup(
name = 'lion-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Lion Optimizer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url... | lion-pytorch-main | setup.py |
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
# clone param and exp_avg before autotuning takes place
# as those are updated in-place
def clone_inplace_updated_para... | lion-pytorch-main | lion_pytorch/triton.py |
from typing import Tuple, Optional, Callable
import torch
from torch.optim.optimizer import Optimizer
# functions
def exists(val):
return val is not None
# update functions
def update_fn(p, grad, exp_avg, lr, wd, beta1, beta2):
# stepweight decay
p.data.mul_(1 - lr * wd)
# weight update
upda... | lion-pytorch-main | lion_pytorch/lion_pytorch.py |
from lion_pytorch.lion_pytorch import Lion
| lion-pytorch-main | lion_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'CoCa-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.12',
license='MIT',
description = 'CoCa, Contrastive Captioners are Image-Text Foundation Models - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_d... | CoCa-pytorch-main | setup.py |
from coca_pytorch.coca_pytorch import CoCa
| CoCa-pytorch-main | coca_pytorch/__init__.py |
import torch
from torch import einsum, nn
import torch.nn.functional as F
from torch.autograd import Function
import torch.distributed as dist
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# distributed
... | CoCa-pytorch-main | coca_pytorch/coca_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'se3-transformer-pytorch',
packages = find_packages(),
include_package_data = True,
version = '0.9.0',
license='MIT',
description = 'SE3 Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github... | se3-transformer-pytorch-main | setup.py |
import torch
import torch.nn.functional as F
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from se3_transformer_pytorch.se3_transformer_pytorch import SE3Transformer
torch.set_default_dtype(torch.float64)
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader,... | se3-transformer-pytorch-main | denoise.py |
import time
import torch
import numpy as np
from lie_learn.representations.SO3.spherical_harmonics import sh
from se3_transformer_pytorch.spherical_harmonics import get_spherical_harmonics_element
from se3_transformer_pytorch.utils import benchmark
def test_spherical_harmonics():
dtype = torch.float64
theta... | se3-transformer-pytorch-main | tests/test_spherical_harmonics.py |
import torch
from se3_transformer_pytorch.se3_transformer_pytorch import SE3Transformer
from se3_transformer_pytorch.irr_repr import rot
from se3_transformer_pytorch.utils import torch_default_dtype, fourier_encode
def test_transformer():
model = SE3Transformer(
dim = 64,
depth = 1,
num_deg... | se3-transformer-pytorch-main | tests/test_equivariance.py |
import torch
from se3_transformer_pytorch.spherical_harmonics import clear_spherical_harmonics_cache
from se3_transformer_pytorch.irr_repr import spherical_harmonics, irr_repr, compose
from se3_transformer_pytorch.utils import torch_default_dtype
@torch_default_dtype(torch.float64)
def test_irr_repr():
"""
Thi... | se3-transformer-pytorch-main | tests/test_irrep_repr.py |
import torch
from se3_transformer_pytorch.basis import get_basis, get_R_tensor, basis_transformation_Q_J
from se3_transformer_pytorch.irr_repr import irr_repr
def test_basis():
max_degree = 3
x = torch.randn(2, 1024, 3)
basis = get_basis(x, max_degree)
assert len(basis.keys()) == (max_degree + 1) ** 2,... | se3-transformer-pytorch-main | tests/test_basis.py |
from math import pi, sqrt
from functools import reduce
from operator import mul
import torch
from functools import lru_cache
from se3_transformer_pytorch.utils import cache
# constants
CACHE = {}
def clear_spherical_harmonics_cache():
CACHE.clear()
def lpmv_cache_key_fn(l, m, x):
return (l, m)
# spherical... | se3-transformer-pytorch-main | se3_transformer_pytorch/spherical_harmonics.py |
import os
from math import pi
import torch
from torch import einsum
from einops import rearrange
from itertools import product
from contextlib import contextmanager
from se3_transformer_pytorch.irr_repr import irr_repr, spherical_harmonics
from se3_transformer_pytorch.utils import torch_default_dtype, cache_dir, exist... | se3-transformer-pytorch-main | se3_transformer_pytorch/basis.py |
from math import sqrt
from itertools import product
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn, einsum
from se3_transformer_pytorch.basis import get_basis
from se3_transformer_pytorch.utils import exists, default, uniq, map_values, batched_index_select, masked... | se3-transformer-pytorch-main | se3_transformer_pytorch/se3_transformer_pytorch.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# helpers
def map_values(fn, x):
out = {}
for (k, v) in x.items():
out[k] = fn(v)
return out
def dict_chunk(x, chunks, dim):
out1 = {}
ou... | se3-transformer-pytorch-main | se3_transformer_pytorch/reversible.py |
from se3_transformer_pytorch.se3_transformer_pytorch import SE3Transformer
| se3-transformer-pytorch-main | se3_transformer_pytorch/__init__.py |
import os
import sys
import time
import pickle
import gzip
import torch
import contextlib
from functools import wraps, lru_cache
from filelock import FileLock
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def un... | se3-transformer-pytorch-main | se3_transformer_pytorch/utils.py |
import os
import numpy as np
import torch
from torch import sin, cos, atan2, acos
from math import pi
from pathlib import Path
from functools import wraps
from se3_transformer_pytorch.utils import exists, default, cast_torch_tensor, to_order
from se3_transformer_pytorch.spherical_harmonics import get_spherical_harmoni... | se3-transformer-pytorch-main | se3_transformer_pytorch/irr_repr.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
class SinusoidalEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(se... | se3-transformer-pytorch-main | se3_transformer_pytorch/rotary.py |
from setuptools import setup, find_packages
setup(
name = 'halonet-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'HaloNet - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/halonet-pytorch',
keywords = ... | halonet-pytorch-main | setup.py |
from halonet_pytorch.halonet_pytorch import HaloAttention
| halonet-pytorch-main | halonet_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# relative positional embedding
def to(x):
return {'device': x.device, 'dtype': x.dtype}
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueez... | halonet-pytorch-main | halonet_pytorch/halonet_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'isab-pytorch',
packages = find_packages(),
version = '0.2.3',
license='MIT',
description = 'Induced Set Attention Block - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
... | isab-pytorch-main | setup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.