python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
from stanfordnlp.models.common.pretrain import Pretrain
from stanfordnlp.models.depparse.data import DataLoader
from stanfordnlp.models.depparse.trainer import Trainer
from stanfordnlp.pipeline.processor import UDProcessor
class DepparseProcessor(UDProcessor):
def __init__(self, config, use_gpu):
# set u... | stanfordnlp-master | stanfordnlp/pipeline/depparse_processor.py |
stanfordnlp-master | stanfordnlp/pipeline/__init__.py | |
"""
Pipeline that runs tokenize,mwt,pos,lemma,depparse
"""
import itertools
import torch
from distutils.util import strtobool
from stanfordnlp.pipeline.doc import Document
from stanfordnlp.pipeline.tokenize_processor import TokenizeProcessor
from stanfordnlp.pipeline.mwt_processor import MWTProcessor
from stanfordnlp... | stanfordnlp-master | stanfordnlp/pipeline/core.py |
"""
base classes for processors
"""
from abc import ABC, abstractmethod
# base class for all processors
class Processor(ABC):
@abstractmethod
def process(self, doc):
pass
# base class for UD processors
class UDProcessor(Processor):
@abstractmethod
def process(self, doc):
pass
... | stanfordnlp-master | stanfordnlp/pipeline/processor.py |
"""
Basic data structures
"""
import io
import re
from stanfordnlp.models.common.conll import FIELD_TO_IDX as CONLLU_FIELD_TO_IDX
multi_word_token_line = re.compile("([0-9]+)\-([0-9]+)")
class Document:
def __init__(self, text):
self._text = text
self._conll_file = None
self._sentences... | stanfordnlp-master | stanfordnlp/pipeline/doc.py |
from stanfordnlp.models.common.conll import FIELD_TO_IDX
from stanfordnlp.models.lemma.data import DataLoader
from stanfordnlp.models.lemma.trainer import Trainer
from stanfordnlp.pipeline.processor import UDProcessor
class LemmaProcessor(UDProcessor):
def __init__(self, config, use_gpu):
# check if in i... | stanfordnlp-master | stanfordnlp/pipeline/lemma_processor.py |
import io
from stanfordnlp.models.common import conll
from stanfordnlp.models.mwt.data import DataLoader
from stanfordnlp.models.mwt.trainer import Trainer
from stanfordnlp.pipeline.processor import UDProcessor
class MWTProcessor(UDProcessor):
def __init__(self, config, use_gpu):
# set up configurations... | stanfordnlp-master | stanfordnlp/pipeline/mwt_processor.py |
r"""
Python CoreNLP: a server based interface to Java CoreNLP.
"""
import io
import os
import logging
import json
import shlex
import subprocess
import time
import sys
from six.moves.urllib.parse import urlparse
import requests
from stanfordnlp.protobuf import Document, parseFromDelimitedString, writeToDelimitedStri... | stanfordnlp-master | stanfordnlp/server/client.py |
from stanfordnlp.protobuf import to_text
from stanfordnlp.protobuf import Document, Sentence, Token, IndexedWord, Span
from stanfordnlp.protobuf import ParseTree, DependencyGraph, CorefChain
from stanfordnlp.protobuf import Mention, NERMention, Entity, Relation, RelationTriple, Timex
from stanfordnlp.protobuf import Qu... | stanfordnlp-master | stanfordnlp/server/__init__.py |
"""
Defines a base class that can be used to annotate.
"""
import io
from multiprocessing import Process
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from six.moves import http_client as HTTPStatus
from stanfordnlp.protobuf import Document, parseFromDelimitedString, writeToDelimitedString
c... | stanfordnlp-master | stanfordnlp/server/annotator.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple shell program to pipe in
"""
import corenlp
import json
import re
import csv
import sys
from collections import namedtuple, OrderedDict
FLOAT_RE = re.compile(r"\d*\.\d+")
INT_RE = re.compile(r"\d+")
def dictstr(arg):
"""
Parse a key=value string as ... | stanfordnlp-master | stanfordnlp/server/main.py |
import sys
import json
with open(sys.argv[1]) as f:
d = json.load(f)
l = max([0] + [len(" ".join(x[0][1])) for x in d])
with open(sys.argv[2]) as f:
d = json.load(f)
l = max([l] + [len(" ".join(x[0][1])) for x in d])
print(l)
| stanfordnlp-master | stanfordnlp/utils/max_mwt_length.py |
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
# fix up multi-word token
lines = []
for line in sys.stdin:
line = line.strip()
lines += [line]
input_lines = []
with open(input_file) as f:
for line in f:
line = line.strip()
input_lines += [line]
with open(output_file, 'w... | stanfordnlp-master | stanfordnlp/utils/post_insert_mwt.py |
import sys
backoff_models = { "UD_Breton-KEB": "ga_idt",
"UD_Czech-PUD": "cs_pdt",
"UD_English-PUD": "en_ewt",
"UD_Faroese-OFT": "no_nynorsk",
"UD_Finnish-PUD": "fi_tdt",
"UD_Japanese-Modern": "ja_gsd",
"U... | stanfordnlp-master | stanfordnlp/utils/select_backoff.py |
import sys
import json
toklabels = sys.argv[1]
if toklabels.endswith('.json'):
with open(toklabels, 'r') as f:
l = json.load(f)
l = [''.join([str(x[1]) for x in para]) for para in l]
else:
with open(toklabels, 'r') as f:
l = ''.join(f.readlines())
l = l.split('\n\n')
sentlen = [len(... | stanfordnlp-master | stanfordnlp/utils/avg_sent_len.py |
stanfordnlp-master | stanfordnlp/utils/__init__.py | |
import argparse
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument('plaintext_file', type=str, help="Plaintext file containing the raw input")
parser.add_argument('conllu_file', type=str, help="CoNLL-U file containing tokens and sentence breaks")
parser.add_argument('-o', '--output', default=... | stanfordnlp-master | stanfordnlp/utils/prepare_tokenizer_data.py |
import argparse
import re
import sys
from collections import Counter
import json
def para_to_chunks(text, char_level_pred):
chunks = []
preds = []
lastchunk = ''
lastpred = ''
for idx in range(len(text)):
if re.match('^\w$', text[idx], flags=re.UNICODE):
lastchunk += text[idx]
... | stanfordnlp-master | stanfordnlp/utils/postprocess_vietnamese_tokenizer_data.py |
import sys
with open(sys.argv[2], 'w') as fout:
with open(sys.argv[1], 'r') as fin:
idx = 0
mwt_begin = 0
mwt_end = -1
for line in fin:
line = line.strip()
if line.startswith('#'):
print(line, file=fout)
continue
e... | stanfordnlp-master | stanfordnlp/utils/contract_mwt.py |
from collections import OrderedDict
from functools import reduce
import json
import numpy as np
from operator import mul
import os
import pickle
from pprint import pprint
import random
import sys
import subprocess
config_file, sweep_progress, command = sys.argv[1], sys.argv[2], sys.argv[3:]
with open(config_file, 'r'... | stanfordnlp-master | stanfordnlp/utils/sweep.py |
import os
import re
import sys
name_map = {'af_afribooms': 'UD_Afrikaans-AfriBooms', 'grc_perseus': 'UD_Ancient_Greek-Perseus', 'grc_proiel': 'UD_Ancient_Greek-PROIEL', 'ar_padt': 'UD_Arabic-PADT', 'hy_armtdp': 'UD_Armenian-ArmTDP', 'eu_bdt': 'UD_Basque-BDT', 'br_keb': 'UD_Breton-KEB', 'bg_btb': 'UD_Bulgarian-BTB', 'b... | stanfordnlp-master | stanfordnlp/utils/generate_ete_report.py |
"""
utilities for getting resources
"""
import os
import requests
import sys
import urllib.request
import zipfile
from tqdm import tqdm
from pathlib import Path
# set home dir for default
HOME_DIR = str(Path.home())
DEFAULT_MODEL_DIR = os.path.join(HOME_DIR,'stanfordnlp_resources')
# list of language shorthands
con... | stanfordnlp-master | stanfordnlp/utils/resources.py |
#!/usr/bin/env python3
# Compatible with Python 2.7 and 3.2+, can be used either as a module
# or a standalone executable.
#
# Copyright 2017, 2018 Institute of Formal and Applied Linguistics (UFAL),
# Faculty of Mathematics and Physics, Charles University, Czech Republic.
#
# This Source Code Form is subject to the t... | stanfordnlp-master | stanfordnlp/utils/conll18_ud_eval.py |
"""
Entry point for training and evaluating a lemmatizer.
This lemmatizer combines a neural sequence-to-sequence architecture with an `edit` classifier
and two dictionaries to produce robust lemmas from word forms.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import sys
i... | stanfordnlp-master | stanfordnlp/models/lemmatizer.py |
"""
Wrapper functions to run UDPipe modules just as other neural modules. Only one module will be run at each call.
For more information on the UDPipe system, please visit: http://ufal.mff.cuni.cz/udpipe.
"""
import os
import io
import argparse
import subprocess
import time
from stanfordnlp.models.common import conl... | stanfordnlp-master | stanfordnlp/models/udpipe_wrapper.py |
"""
Entry point for training and evaluating a multi-word token (MWT) expander.
This MWT expander combines a neural sequence-to-sequence architecture with a dictionary
to decode the token into multiple words.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import sys
import o... | stanfordnlp-master | stanfordnlp/models/mwt_expander.py |
stanfordnlp-master | stanfordnlp/models/__init__.py | |
"""
Entry point for training and evaluating a dependency parser.
This implementation combines a deep biaffine graph-based parser with linearization and distance features.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
"""
Training and evaluation for the parser.
"""
import s... | stanfordnlp-master | stanfordnlp/models/parser.py |
"""
Entry point for training and evaluating a neural tokenizer.
This tokenizer treats tokenization and sentence segmentation as a tagging problem, and uses a combination of
recurrent and convolutional architectures.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import rand... | stanfordnlp-master | stanfordnlp/models/tokenizer.py |
"""
Entry point for training and evaluating a POS/morphological features tagger.
This tagger uses highway BiLSTM layers with character and word-level representations, and biaffine classifiers
to produce consistant POS and UFeats predictions.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018univer... | stanfordnlp-master | stanfordnlp/models/tagger.py |
"""
An indentity lemmatizer that mimics the behavior of a normal lemmatizer but directly uses word as lemma.
"""
import os
import argparse
import random
from stanfordnlp.models.lemma.data import DataLoader
from stanfordnlp.models.lemma import scorer
from stanfordnlp.models.common import utils
def parse_args():
p... | stanfordnlp-master | stanfordnlp/models/identity_lemmatizer.py |
from collections import Counter
from stanfordnlp.models.common.vocab import BaseVocab
import stanfordnlp.models.common.seq2seq_constant as constant
class Vocab(BaseVocab):
def build_vocab(self):
pairs = self.data
allchars = "".join([src + tgt for src, tgt in pairs])
counter = Counter(allch... | stanfordnlp-master | stanfordnlp/models/mwt/vocab.py |
stanfordnlp-master | stanfordnlp/models/mwt/__init__.py | |
"""
A trainer class to handle training and testing of models.
"""
import sys
import numpy as np
from collections import Counter
import torch
from torch import nn
import torch.nn.init as init
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.trainer import Trainer as BaseTrai... | stanfordnlp-master | stanfordnlp/models/mwt/trainer.py |
import random
import numpy as np
import os
from collections import Counter
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.mw... | stanfordnlp-master | stanfordnlp/models/mwt/data.py |
"""
Utils and wrappers for scoring lemmatizers.
"""
from stanfordnlp.models.common.utils import ud_scores
def score(system_conllu_file, gold_conllu_file):
""" Wrapper for word segmenter scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation["Words"]
p, r, f = el.precis... | stanfordnlp-master | stanfordnlp/models/mwt/scorer.py |
stanfordnlp-master | stanfordnlp/models/depparse/__init__.py | |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import stanfordnlp.models.depparse.mapping_utils as util
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanfordnlp.models.common.biaffine import DeepBiaffineScorer
from s... | stanfordnlp-master | stanfordnlp/models/depparse/model.py |
from __future__ import unicode_literals, print_function, division
import os
import numpy as np
import scipy
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils... | stanfordnlp-master | stanfordnlp/models/depparse/mapping_utils.py |
"""
A trainer class to handle training and testing of models.
"""
import sys
import torch
from torch import nn
from stanfordnlp.models.common.trainer import Trainer as BaseTrainer
from stanfordnlp.models.common import utils, loss
from stanfordnlp.models.common.chuliu_edmonds import chuliu_edmonds_one_root
from stanfo... | stanfordnlp-master | stanfordnlp/models/depparse/trainer.py |
import random
import torch
from conllu import parse_tree, parse_tree_incr, parse, parse_incr
import networkx as nx
import scipy
import scipy.sparse.csgraph as csg
import numpy as np
import stanfordnlp.models.depparse.mapping_utils as util
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from stanf... | stanfordnlp-master | stanfordnlp/models/depparse/data.py |
"""
Utils and wrappers for scoring parsers.
"""
from stanfordnlp.models.common.utils import ud_scores
def score(system_conllu_file, gold_conllu_file, verbose=True):
""" Wrapper for UD parser scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation['UAS']
p = el.precision... | stanfordnlp-master | stanfordnlp/models/depparse/scorer.py |
"""
Supports for pretrained data.
"""
import os
import lzma
import numpy as np
import torch
from .vocab import BaseVocab, VOCAB_PREFIX
class PretrainedWordVocab(BaseVocab):
def build_vocab(self):
self._id2unit = VOCAB_PREFIX + self.data
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
c... | stanfordnlp-master | stanfordnlp/models/common/pretrain.py |
# Adapted from Tim's code here: https://github.com/tdozat/Parser-v3/blob/master/scripts/chuliu_edmonds.py
import numpy as np
def tarjan(tree):
""""""
indices = -np.ones_like(tree)
lowlinks = -np.ones_like(tree)
onstack = np.zeros_like(tree, dtype=bool)
stack = list()
_index = [0]
cycles =... | stanfordnlp-master | stanfordnlp/models/common/chuliu_edmonds.py |
from copy import copy
from collections import Counter, OrderedDict
import os
import pickle
PAD = '<PAD>'
PAD_ID = 0
UNK = '<UNK>'
UNK_ID = 1
EMPTY = '<EMPTY>'
EMPTY_ID = 2
ROOT = '<ROOT>'
ROOT_ID = 3
VOCAB_PREFIX = [PAD, UNK, EMPTY, ROOT]
class BaseVocab:
""" A base class for common vocabulary operations. Each su... | stanfordnlp-master | stanfordnlp/models/common/vocab.py |
"""
The full encoder-decoder model, built on top of the base seq2seq modules.
"""
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common import utils
from stanfordnlp.models.common.seq2seq_module... | stanfordnlp-master | stanfordnlp/models/common/seq2seq_model.py |
"""
A wrapper/loader for the official conll-u format files.
"""
import os
import io
FIELD_NUM = 10
FIELD_TO_IDX = {'id': 0, 'word': 1, 'lemma': 2, 'upos': 3, 'xpos': 4, 'feats': 5, 'head': 6, 'deprel': 7, 'deps': 8, 'misc': 9}
class CoNLLFile():
def __init__(self, filename=None, input_str=None, ignore_gapping=Tr... | stanfordnlp-master | stanfordnlp/models/common/conll.py |
"""
Utils for seq2seq models.
"""
from collections import Counter
import random
import json
import unicodedata
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
# torch utils
def get_optimizer(name, parameters, lr):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr)
e... | stanfordnlp-master | stanfordnlp/models/common/seq2seq_utils.py |
stanfordnlp-master | stanfordnlp/models/common/__init__.py | |
"""
Constants for seq2seq models.
"""
PAD = '<PAD>'
PAD_ID = 0
UNK = '<UNK>'
UNK_ID = 1
SOS = '<SOS>'
SOS_ID = 2
EOS = '<EOS>'
EOS_ID = 3
VOCAB_PREFIX = [PAD, UNK, SOS, EOS]
EMB_INIT_RANGE = 1.0
INFINITY_NUMBER = 1e12
| stanfordnlp-master | stanfordnlp/models/common/seq2seq_constant.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class PairwiseBilinear(nn.Module):
''' A bilinear module that deals with broadcasting for efficient memory usage.
Input: tensors of sizes (N x L1 x D1) and (N x L2 x D2)
Output: tensor of size (N x L1 x L2 x O)'''
def __init__(self, inp... | stanfordnlp-master | stanfordnlp/models/common/biaffine.py |
"""
Different loss functions.
"""
import torch
import torch.nn as nn
import stanfordnlp.models.common.seq2seq_constant as constant
def SequenceLoss(vocab_size):
weight = torch.ones(vocab_size)
weight[constant.PAD_ID] = 0
crit = nn.NLLLoss(weight)
return crit
class MixLoss(nn.Module):
"""
A m... | stanfordnlp-master | stanfordnlp/models/common/loss.py |
"""
Utility functions.
"""
import os
from collections import Counter
import random
import json
import unicodedata
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import Optimizer, required
from stanfordnlp.models.common.constant import lcode2lang
import stanfordnlp.models.common.seq2seq_const... | stanfordnlp-master | stanfordnlp/models/common/utils.py |
"""
Pytorch implementation of basic sequence to Sequence modules.
"""
import torch
import torch.nn as nn
import math
import numpy as np
import stanfordnlp.models.common.seq2seq_constant as constant
class BasicAttention(nn.Module):
"""
A basic MLP attention layer.
"""
def __init__(self, dim):
... | stanfordnlp-master | stanfordnlp/models/common/seq2seq_modules.py |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_sequence, pad_packed_sequence, pack_padded_sequence, PackedSequence
from stanfordnlp.models.common.packed_lstm import PackedLSTM
from stanfordnlp.models.common.utils import tensor_unsort
class CharacterModel(nn.Module):
def __init__(self, args... | stanfordnlp-master | stanfordnlp/models/common/char_model.py |
import torch
class Trainer:
def change_lr(self, new_lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
def save(self, filename):
savedict = {
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(... | stanfordnlp-master | stanfordnlp/models/common/trainer.py |
import torch
import torch.nn as nn
class WordDropout(nn.Module):
def __init__(self, dropprob):
super().__init__()
self.dropprob = dropprob
def forward(self, x, replacement=None):
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
... | stanfordnlp-master | stanfordnlp/models/common/dropout.py |
"""
Global constants.
"""
lcode2lang = {
"af": "Afrikaans",
"grc": "Ancient_Greek",
"ar": "Arabic",
"hy": "Armenian",
"eu": "Basque",
"br": "Breton",
"bg": "Bulgarian",
"bxr": "Buryat",
"ca": "Catalan",
"zh": "Chinese",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish"... | stanfordnlp-master | stanfordnlp/models/common/constant.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
class PackedLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=Fals... | stanfordnlp-master | stanfordnlp/models/common/packed_lstm.py |
from __future__ import division
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
"""
Adapted and modified from the OpenNMT project.
Class for managing the internals of the beam search process.
hyp1-hyp1---hyp1 -hyp1
\ /
hyp2 \-hyp2 /-hyp2hy... | stanfordnlp-master | stanfordnlp/models/common/beam.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanfordnlp.models.common.packed_lstm import PackedLSTM
# Highway LSTM Cell (Zhang et al. (2018) Highway Long Short-Term Memory RNNs for Dista... | stanfordnlp-master | stanfordnlp/models/common/hlstm.py |
"""
Utility functions for data transformations.
"""
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
def map_to_ids(tokens, vocab):
ids = [vocab[t] if t in vocab else constant.UNK_ID for t in tokens]
return ids
def get_long_tensor(tokens_list, batch_size, pad_id=constant.PAD_ID):
... | stanfordnlp-master | stanfordnlp/models/common/data.py |
from collections import Counter, OrderedDict
from stanfordnlp.models.common.vocab import BaseVocab, BaseMultiVocab
from stanfordnlp.models.common.vocab import CompositeVocab, VOCAB_PREFIX, EMPTY, EMPTY_ID
class CharVocab(BaseVocab):
def build_vocab(self):
counter = Counter([c for sent in self.data for w i... | stanfordnlp-master | stanfordnlp/models/pos/vocab.py |
# This is the XPOS factory method generated automatically from models.pos.build_xpos_factory.
# Please don't edit it!
from stanfordnlp.models.pos.vocab import WordVocab, XPOSVocab
def xpos_vocab_factory(data, shorthand):
if shorthand in ["af_afribooms", "grc_perseus", "ar_padt", "bg_btb", "cs_cac", "cs_fictree", ... | stanfordnlp-master | stanfordnlp/models/pos/xpos_vocab_factory.py |
from collections import defaultdict
import os
import sys
from stanfordnlp.models.common.vocab import VOCAB_PREFIX
from stanfordnlp.models.pos.vocab import XPOSVocab, WordVocab
from stanfordnlp.models.common.conll import CoNLLFile
if len(sys.argv) != 3:
print('Usage: {} short_to_tb_file output_factory_file'.format(... | stanfordnlp-master | stanfordnlp/models/pos/build_xpos_vocab_factory.py |
stanfordnlp-master | stanfordnlp/models/pos/__init__.py | |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanfordnlp.models.common.biaffine import BiaffineScorer
from stanfordnlp.models.common.hlstm import HighwayLSTM
from stanfo... | stanfordnlp-master | stanfordnlp/models/pos/model.py |
"""
A trainer class to handle training and testing of models.
"""
import sys
import torch
from torch import nn
from stanfordnlp.models.common.trainer import Trainer as BaseTrainer
from stanfordnlp.models.common import utils, loss
from stanfordnlp.models.pos.model import Tagger
from stanfordnlp.models.pos.vocab import... | stanfordnlp-master | stanfordnlp/models/pos/trainer.py |
import random
import torch
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.common.vocab import PAD_ID, VOCAB_PREFIX
from stanfordnlp.models.pos.vocab import CharVocab, WordVocab, XPOSVocab, FeatureVoc... | stanfordnlp-master | stanfordnlp/models/pos/data.py |
"""
Utils and wrappers for scoring taggers.
"""
from stanfordnlp.models.common.utils import ud_scores
def score(system_conllu_file, gold_conllu_file, verbose=True):
""" Wrapper for tagger scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation['AllTags']
p = el.precisio... | stanfordnlp-master | stanfordnlp/models/pos/scorer.py |
from collections import Counter
from stanfordnlp.models.common.vocab import BaseVocab, BaseMultiVocab
from stanfordnlp.models.common.seq2seq_constant import VOCAB_PREFIX
class Vocab(BaseVocab):
def build_vocab(self):
counter = Counter(self.data)
self._id2unit = VOCAB_PREFIX + list(sorted(list(coun... | stanfordnlp-master | stanfordnlp/models/lemma/vocab.py |
stanfordnlp-master | stanfordnlp/models/lemma/__init__.py | |
"""
Utilities for calculating edits between word and lemma forms.
"""
EDIT_TO_ID = {'none': 0, 'identity': 1, 'lower': 2}
def get_edit_type(word, lemma):
""" Calculate edit types. """
if lemma == word:
return 'identity'
elif lemma == word.lower():
return 'lower'
return 'none'
def edit... | stanfordnlp-master | stanfordnlp/models/lemma/edit.py |
"""
A trainer class to handle training and testing of models.
"""
import sys
import numpy as np
from collections import Counter
import torch
from torch import nn
import torch.nn.init as init
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.seq2seq_model import Seq2SeqModel
... | stanfordnlp-master | stanfordnlp/models/lemma/trainer.py |
import random
import numpy as np
import os
from collections import Counter
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.le... | stanfordnlp-master | stanfordnlp/models/lemma/data.py |
"""
Utils and wrappers for scoring lemmatizers.
"""
from stanfordnlp.utils import conll18_ud_eval as ud_eval
def score(system_conllu_file, gold_conllu_file):
""" Wrapper for lemma scorer. """
gold_ud = ud_eval.load_conllu_file(gold_conllu_file)
system_ud = ud_eval.load_conllu_file(system_conllu_file)
e... | stanfordnlp-master | stanfordnlp/models/lemma/scorer.py |
from collections import Counter
import re
from stanfordnlp.models.common.vocab import BaseVocab
from stanfordnlp.models.common.vocab import UNK, PAD
class Vocab(BaseVocab):
def build_vocab(self):
paras = self.data
counter = Counter()
for para in paras:
for unit in para:
... | stanfordnlp-master | stanfordnlp/models/tokenize/vocab.py |
stanfordnlp-master | stanfordnlp/models/tokenize/__init__.py | |
import torch
import torch.nn.functional as F
import torch.nn as nn
class Tokenizer(nn.Module):
def __init__(self, args, nchars, emb_dim, hidden_dim, N_CLASSES=5, dropout=0):
super().__init__()
self.args = args
feat_dim = args['feat_dim']
self.embeddings = nn.Embedding(nchars, emb_... | stanfordnlp-master | stanfordnlp/models/tokenize/model.py |
from collections import Counter
from copy import copy
import json
import numpy as np
from stanfordnlp.models.common.utils import ud_scores, harmonic_mean
def load_mwt_dict(filename):
if filename is not None:
with open(filename, 'r') as f:
mwt_dict0 = json.load(f)
mwt_dict = dict()
... | stanfordnlp-master | stanfordnlp/models/tokenize/utils.py |
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from stanfordnlp.models.common.trainer import Trainer
from .model import Tokenizer
from .vocab import Vocab
class Trainer(Trainer):
def __init__(self, args=None, vocab=None, model_file=None, use_cuda=False):
self.use_cuda = use_cud... | stanfordnlp-master | stanfordnlp/models/tokenize/trainer.py |
from bisect import bisect_right
from copy import copy
import json
import numpy as np
import random
import re
import torch
from .vocab import Vocab
class DataLoader:
def __init__(self, args, input_files={'json': None, 'txt': None, 'label': None}, input_text=None, input_data=None, vocab=None, evaluation=False):
... | stanfordnlp-master | stanfordnlp/models/tokenize/data.py |
from __future__ import absolute_import
from io import BytesIO
from google.protobuf.internal.encoder import _EncodeVarint
from google.protobuf.internal.decoder import _DecodeVarint
from .CoreNLP_pb2 import *
def parseFromDelimitedString(obj, buf, offset=0):
"""
Stanford CoreNLP uses the Java "writeDelimitedTo... | stanfordnlp-master | stanfordnlp/protobuf/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: CoreNLP.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _m... | stanfordnlp-master | stanfordnlp/protobuf/CoreNLP_pb2.py |
"""
Tests for the run_pipeline.py script, also serves as integration test
"""
import re
import subprocess
from datetime import datetime
from tests import *
def test_fr_pipeline():
# check input files present
assert os.path.exists(FR_TEST_IN), f'Missing test input file: {FR_TEST_IN}'
assert os.path.exist... | stanfordnlp-master | tests/test_run_pipeline.py |
"""
Basic testing of the English pipeline
"""
import pytest
import stanfordnlp
from tests import *
def setup_module(module):
"""Set up resources for all tests in this module"""
safe_rm(EN_MODELS_DIR)
stanfordnlp.download('en', resource_dir=TEST_WORKING_DIR, force=True)
def teardown_module(module):
... | stanfordnlp-master | tests/test_english_pipeline.py |
"""
Utilities for testing
"""
import os
# Environment Variables
# set this to specify working directory of tests
TEST_HOME_VAR = 'STANFORDNLP_TEST_HOME'
# Global Variables
# test working directory base name must be stanfordnlp_test
TEST_DIR_BASE_NAME = 'stanfordnlp_test'
# check the working dir is set and compliant... | stanfordnlp-master | tests/__init__.py |
"""
Tests to read a stored protobuf.
Also serves as an example of how to parse sentences, tokens, pos, lemma,
ner, dependencies and mentions.
The test corresponds to annotations for the following sentence:
Chris wrote a simple sentence that he parsed with Stanford CoreNLP.
"""
import os
import pytest
from pytest ... | stanfordnlp-master | tests/test_protobuf.py |
"""
Tests that call a running CoreNLPClient.
"""
import pytest
import stanfordnlp.server as corenlp
# set the marker for this module
pytestmark = pytest.mark.travis
TEXT = "Chris wrote a simple sentence that he parsed with Stanford CoreNLP.\n"
def test_connect():
with corenlp.CoreNLPClient() as client:
... | stanfordnlp-master | tests/test_client.py |
"""
Basic testing of multi-word-token expansion
"""
import stanfordnlp
from tests import *
def setup_module(module):
"""Set up resources for all tests in this module"""
safe_rm(FR_MODELS_DIR)
stanfordnlp.download('fr', resource_dir=TEST_WORKING_DIR, force=True)
def teardown_module(module):
"""Clean... | stanfordnlp-master | tests/test_mwt.py |
# we want to just keep some shorter sentences here
limit = 25
file_out = open('en_ewt.train.in.conllu', 'w')
linearr = []
write_line = True
with open('en_ewt.train.in.conllu.backup') as f:
for line in f:
linearr.append(line)
words = line.split()
if len(words) > 0:
if int(word... | stanfordnlp-master | scripts/get_short_sents.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import find_packages, setup
setup(
name="cotracker",
version="1.0",
install_requires=[],
... | co-tracker-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import torch
import signal
import socket
import sys
import json
import numpy as np
import argpars... | co-tracker-main | train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
dependencies = ["torch", "einops", "timm", "tqdm"]
_COTRACKER_URL = (
"https://dl.fbaipublicfiles.com/c... | co-tracker-main | hubconf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import torch
import argparse
import numpy as np
from PIL import Image
from cotracker.utils.visualize... | co-tracker-main | demo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cotracker.models.core.cotracker.cotracker import ... | co-tracker-main | cotracker/predictor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| co-tracker-main | cotracker/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import io
import glob
import torch
import pickle
import numpy as np
import mediapy as media
from PIL import Ima... | co-tracker-main | cotracker/datasets/tap_vid_datasets.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import imageio
import numpy as np
from cotracker.datasets.utils import CoTrackerData
from torchvi... | co-tracker-main | cotracker/datasets/kubric_movif_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import os
import json
import imageio
import cv2
from enum import Enum
from cotracker.da... | co-tracker-main | cotracker/datasets/badja_dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.