input
stringlengths
33
5k
output
stringlengths
32
5k
import argparse import os from typing import List from jina.parsers.helper import CastHostAction def api_to_dict(show_all_args: bool = False): """Convert Jina API to a dict :param show_all_args: if set, then hidden args are also exported :return: dict """ if show_all_args: from jina.parse...
import argparse import os from typing import List from jina.parsers.helper import CastHostAction def api_to_dict(show_all_args: bool = False): """Convert Jina API to a dict :param show_all_args: if set, then hidden args are also exported :return: dict """ if show_all_args: from jina.parse...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parq...
_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_6.4gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dic...
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_6.4gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dic...
import argparse import copy import os import re import sys import boto3 import botocore from metadata import AMI_ID, COMMON_STACK_PARAMS, STACK_PARAMS current_dir = os.path.dirname(__file__) sys.path.append(os.path.join(current_dir, "..")) from common_blocks.utils import create_or_update_stack, wait TEMPLATE_URL = ...
import argparse import copy import os import re import sys import boto3 import botocore from metadata import AMI_ID, COMMON_STACK_PARAMS, STACK_PARAMS current_dir = os.path.dirname(__file__) sys.path.append(os.path.join(current_dir, "..")) from common_blocks.utils import create_or_update_stack, wait TEMPLATE_URL = ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.backend.config import backend as backend from keras.src.backend.config import ( disable_flash_attention as disable_flash_attention, ) from keras.src.backend.config import ( en...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.backend.config import backend from keras.src.backend.config import disable_flash_attention from keras.src.backend.config import enable_flash_attention from keras.src.backend.config im...
import logging from fastapi import Request from backend.data import integrations from backend.data.model import APIKeyCredentials, Credentials from backend.integrations.providers import ProviderName from backend.integrations.webhooks._base import BaseWebhooksManager from backend.util.request import Requests logger =...
import logging import requests from fastapi import Request from backend.data import integrations from backend.data.model import APIKeyCredentials, Credentials from backend.integrations.providers import ProviderName from backend.integrations.webhooks._base import BaseWebhooksManager logger = logging.getLogger(__name_...
""" Feature agglomeration. Base classes and functions for performing feature agglomeration. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from scipy.sparse import issparse from ..base import TransformerMixin from ..utils.validation import check_is_fitted, vali...
""" Feature agglomeration. Base classes and functions for performing feature agglomeration. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from scipy.sparse import issparse from ..base import TransformerMixin from ..utils import metadata_routing from ..utils.de...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked...
import pytest import datasets import datasets.config # Import fixture modules as plugins pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def pytest_collection_modifyitems(config, items): # Mark tests as "unit" by default if not marked as "integration" (or already marked...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
from __future__ import annotations from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator from sentence_transformers.sparse_encoder.evaluation import ( SparseBinaryClassificationEvaluator, SparseEmbeddingSimilarityEvaluator, SparseInformationRetrievalEvaluator, SparseM...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # ...
from typing import Tuple, Iterator import pytest import requests import itertools from docarray import DocumentArray, Document def test_weaviate_hnsw(start_storage): da = DocumentArray( storage='weaviate', config={ 'n_dim': 100, 'ef': 100, 'ef_construction': 1...
import requests from docarray import DocumentArray def test_weaviate_hnsw(start_storage): da = DocumentArray( storage='weaviate', config={ 'n_dim': 100, 'ef': 100, 'ef_construction': 100, 'max_connections': 16, 'dynamic_ef_min': 50, ...
import os from functools import lru_cache from subprocess import CalledProcessError, run from typing import Optional, Union import numpy as np import torch import torch.nn.functional as F from .utils import exact_div # hard-coded audio hyperparameters SAMPLE_RATE = 16000 N_FFT = 400 N_MELS = 80 HOP_LENGTH = 160 CHUN...
import os from functools import lru_cache from subprocess import CalledProcessError, run from typing import Optional, Union import numpy as np import torch import torch.nn.functional as F from .utils import exact_div # hard-coded audio hyperparameters SAMPLE_RATE = 16000 N_FFT = 400 N_MELS = 80 HOP_LENGTH = 160 CHUN...
"""**Load** module helps with serialization and deserialization.""" from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.load.dump import dumpd, dumps from langchain_core.load.load import loads from langchain_core.load.serializable import Serializable ...
"""**Load** module helps with serialization and deserialization.""" from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from langchain_core.load.dump import dumpd, dumps from langchain_core.load.load import load, loads from langchain_core.load.serializable import Seriali...
__version__ = '0.18.1' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
__version__ = '0.18.0' import os from docarray.document import Document from docarray.array import DocumentArray from docarray.dataclasses import dataclass, field if 'DA_RICH_HANDLER' in os.environ: from rich.traceback import install install()
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Initialize the SPLADE model model = SparseEncoder("naver/sp...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Initialize the SPLADE model model = SparseEncoder("naver/sp...
import os import numpy as np import pytest from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_doc.io.json import orjson_dumps from docarray.typing import Mesh3DUrl, NdArray from docarray.typing.url.mimetypes import ( OBJ_MIMETYPE, AUDIO_MIMETYPE, VIDEO_MIMETYPE, IMAGE_MIMETYPE,...
import numpy as np import pytest from pydantic.tools import parse_obj_as, schema_json_of from docarray.base_doc.io.json import orjson_dumps from docarray.typing import Mesh3DUrl, NdArray from tests import TOYDATA_DIR MESH_FILES = { 'obj': str(TOYDATA_DIR / 'tetrahedron.obj'), 'glb': str(TOYDATA_DIR / 'test.gl...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.fashion_mnist import load_data as load_data
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.datasets.fashion_mnist import load_data
import csv from contextlib import nullcontext from typing import Union, TextIO, Optional, Dict, TYPE_CHECKING, Type, Sequence import numpy as np if TYPE_CHECKING: # pragma: no cover from docarray.typing import T class CsvIOMixin: """CSV IO helper. can be applied to DA & DAM """ def save_embed...
import csv from contextlib import nullcontext from typing import Union, TextIO, Optional, Dict, TYPE_CHECKING, Type, Sequence import numpy as np if TYPE_CHECKING: from docarray.typing import T class CsvIOMixin: """CSV IO helper. can be applied to DA & DAM """ def save_embeddings_csv( s...
from __future__ import annotations from pathlib import Path from unittest.mock import Mock, PropertyMock import pytest import torch from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import InformationRetrievalEvaluator from sentence_transformers.util import cos_sim @pytest...
from __future__ import annotations from pathlib import Path from unittest.mock import Mock, PropertyMock import pytest import torch from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import InformationRetrievalEvaluator from sentence_transformers.util import cos_sim @pytest...
""" This script contains an example how to perform semantic search with OpenSearch. You need OpenSearch up and running locally: https://docs.opensearch.org/docs/latest/getting-started/quickstart/ Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level...
""" This script contains an example how to perform semantic search with OpenSearch. You need OpenSearch up and running locally: https://docs.opensearch.org/docs/latest/getting-started/quickstart/ Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level...
_base_ = [ '../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
import numpy as np import pytest from keras.src import testing from keras.src.layers.activations import softmax class SoftmaxTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_softmax(self): self.run_layer_test( softmax.Softmax, init_kwargs={}, ...
import numpy as np import pytest from keras.src import testing from keras.src.layers.activations import softmax class SoftmaxTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_softmax(self): self.run_layer_test( softmax.Softmax, init_kwargs={}, ...
import pytest from pydantic import Field from docarray import BaseDoc from docarray.index import ElasticV7DocIndex from tests.index.elastic.fixture import start_storage_v7 # noqa: F401 pytestmark = [pytest.mark.slow, pytest.mark.index] def test_column_config(): class MyDoc(BaseDoc): text: str c...
import pytest from pydantic import Field from docarray import BaseDoc from docarray.index import ElasticV7DocIndex from tests.index.elastic.fixture import start_storage_v7 # noqa: F401 pytestmark = [pytest.mark.slow, pytest.mark.index] def test_column_config(): class MyDoc(BaseDoc): text: str c...
import logging import random from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseInformationRetrievalEvaluator, SpladePooling, ) logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INF...
import random from datasets import load_dataset from sentence_transformers.sparse_encoder import ( MLMTransformer, SparseEncoder, SparseInformationRetrievalEvaluator, SpladePooling, ) # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" model = SparseEncoder( modul...
import warnings from typing import Optional, Tuple, TypeVar from docarray.typing import AudioNdArray from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.utils._internal.misc import is_notebook ...
import warnings from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union from docarray.typing import AudioNdArray from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.proto_register import _register_proto from docarray.typing.url.any_url import AnyUrl from docarray.typing.u...
from datetime import datetime, timedelta from langchain_core.exceptions import OutputParserException from langchain_core.output_parsers import BaseOutputParser from langchain_core.utils import comma_list class DatetimeOutputParser(BaseOutputParser[datetime]): """Parse the output of an LLM call to a datetime.""" ...
from datetime import datetime, timedelta from langchain_core.exceptions import OutputParserException from langchain_core.output_parsers import BaseOutputParser from langchain_core.utils import comma_list class DatetimeOutputParser(BaseOutputParser[datetime]): """Parse the output of an LLM call to a datetime.""" ...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class FSAF(SingleStageDetector): """Implementation of `FSAF <https://arxiv.org/abs/1903.00621>...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class FSAF(SingleStageDetector): """Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`...
# coding: utf-8 """Script for generating files with NuGet package metadata.""" import datetime import sys from pathlib import Path from shutil import copyfile if __name__ == "__main__": source = Path(sys.argv[1]) current_dir = Path(__file__).absolute().parent linux_folder_path = current_dir / "runtimes" / ...
# coding: utf-8 """Script for generating files with NuGet package metadata.""" import datetime import sys from pathlib import Path from shutil import copyfile if __name__ == "__main__": source = Path(sys.argv[1]) current_dir = Path(__file__).absolute().parent linux_folder_path = current_dir / "runtimes" / ...
"""Module to test base parser implementations.""" from typing_extensions import override from langchain_core.exceptions import OutputParserException from langchain_core.language_models import GenericFakeChatModel from langchain_core.messages import AIMessage from langchain_core.output_parsers import ( BaseGenerat...
"""Module to test base parser implementations.""" from typing_extensions import override from langchain_core.exceptions import OutputParserException from langchain_core.language_models import GenericFakeChatModel from langchain_core.messages import AIMessage from langchain_core.output_parsers import ( BaseGenerat...
"""Interface for tools.""" from typing import Optional from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool, tool class InvalidTool(BaseTool): """Tool that is run when invalid tool name is encountered by agent.""" ...
"""Interface for tools.""" from typing import Optional from langchain_core.callbacks import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain_core.tools import BaseTool, tool class InvalidTool(BaseTool): # type: ignore[override] """Tool that is run when invalid tool name is ...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, List, Optional, Tuple, Type, Union import cv2 import matplotlib import numpy as np import torch def tensor2ndarray(value: Union[np.ndarray, torch.Tensor]) -> np.ndarray: """If the type of value is torch.Tensor, convert the value to np.ndarr...
# Copyright (c) OpenMMLab. All rights reserved. from typing import Any, List, Tuple, Type, Union import numpy as np import torch def tensor2ndarray(value: Union[np.ndarray, torch.Tensor]) -> np.ndarray: """If the type of value is torch.Tensor, convert the value to np.ndarray. Args: value (np.ndarray...
_base_ = './fast-rcnn_r50-caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
_base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.messages import BaseMessage from langchain_core.outputs import ChatGeneration, Generation from langchain.agents.agent import MultiActionAgentOutputParser from langchain.agents.output_parsers.tools import ( Tool...
from typing import List, Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.messages import BaseMessage from langchain_core.outputs import ChatGeneration, Generation from langchain.agents.agent import MultiActionAgentOutputParser from langchain.agents.output_parsers.tools import ( ...
__version__ = '0.33.1' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
__version__ = '0.33.0' import logging from docarray.array import DocList, DocVec from docarray.base_doc.doc import BaseDoc from docarray.utils._internal.misc import _get_path_from_docarray_root_level __all__ = ['BaseDoc', 'DocList', 'DocVec'] logger = logging.getLogger('docarray') handler = logging.StreamHandler()...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='Text') class Text(BaseDocument): """ Document for handling text. It can contain a T...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.typing import TextUrl from docarray.typing.tensor.embedding import AnyEmbedding T = TypeVar('T', bound='Text') class Text(BaseDocument): """ Document for handling text. It can contain a T...
""" Example of training with Dask on CPU ==================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb from xgboost.dask import DaskDMatrix def main(client): # generate some random data for demonstration m = 100000 ...
""" Example of training with Dask on CPU ==================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb from xgboost.dask import DaskDMatrix def main(client): # generate some random data for demonstration m = 100000 ...
import unittest import torch from transformers import AutoTokenizer, Gemma2Config, Gemma2Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Transformer2DModel, ) from ..test_pipelines_common import PipelineTesterMixin class Lumina2PipelineFastTests...
import unittest import torch from transformers import AutoTokenizer, Gemma2Config, Gemma2Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Text2ImgPipeline, Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import torch_device from...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.utils.dl_utils import TORCH_VERSION from mmengine.utils.version_utils import digit_version from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage, MomentumAnnealingEMA, StochasticWeightAverage) from .base_model ...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.utils.dl_utils import TORCH_VERSION from mmengine.utils.version_utils import digit_version from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage, MomentumAnnealingEMA, StochasticWeightAverage) from .base_model ...
import itertools from parameterized import parameterized from torchaudio.backend import sox_io_backend from torchaudio_unittest.common_utils import get_wav_data, PytorchTestCase, skipIfNoExec, skipIfNoSox, TempDirMixin from .common import get_enc_params, name_func @skipIfNoExec("sox") @skipIfNoSox class TestRoundTr...
import itertools from parameterized import parameterized from torchaudio.backend import sox_io_backend from torchaudio_unittest.common_utils import ( get_wav_data, PytorchTestCase, skipIfNoExec, skipIfNoSox, TempDirMixin, ) from .common import get_enc_params, name_func @skipIfNoExec("sox") @skip...
""" This application demonstrates how to find duplicate questions (paraphrases) in a long list of sentences. """ from sentence_transformers import SentenceTransformer, util # Questions can be a long list of sentences up to 100k sentences or more. # For demonstration purposes, we limit it to a few questions which all ...
""" This application demonstrates how to find duplicate questions (paraphrases) in a long list of sentences. """ from sentence_transformers import SentenceTransformer, util # Questions can be a long list of sentences up to 100k sentences or more. # For demonstration purposes, we limit it to a few questions which all ...
""" Use scikit-learn regressor interface with CPU histogram tree method =================================================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb def main(client: Client) -> dxgb.Booster: # generate som...
""" Use scikit-learn regressor interface with CPU histogram tree method =================================================================== """ from dask import array as da from dask.distributed import Client, LocalCluster from xgboost import dask as dxgb def main(client): # generate some random data for demons...
from unittest.mock import mock_open, patch from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization from llama_index.llms.cortex.utils import ( generate_sf_jwt, is_spcs_environment, get_spcs_base_url, get_default_spcs_token, SPCS_TOKEN_PATH...
from unittest.mock import mock_open, patch from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization from llama_index.llms.cortex.utils import generate_sf_jwt def test_generate_sf_jwt(): sf_account = "MY_SNOWFLAKE_ORG-MY_SNOWFLAKE_ACCOUNT" sf_user = "...
from keras.src.backend.config import backend if backend() == "torch": # When using the torch backend, # torch needs to be imported first, otherwise it will segfault # upon import. import torch from keras.src.api_export import keras_export from keras.src.backend.common.dtypes import result_type from ke...
from keras.src.backend.config import backend if backend() == "torch": # When using the torch backend, # torch needs to be imported first, otherwise it will segfault # upon import. import torch from keras.src.backend.common.dtypes import result_type from keras.src.backend.common.keras_tensor import Ker...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmcv import Config def parse_args(): parser = argparse.ArgumentParser( description='Convert benchmark model list to script') parser.add_argument('config', help='test config file path') parser.add_...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmcv import Config def parse_args(): parser = argparse.ArgumentParser( description='Convert benchmark model list to script') parser.add_argument('config', help='test config file path') parser.add_...
import os import sys import cognee import pytest from llama_index.core import Document from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.skipif( sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher" ) @pytest.mark.skipif( os.getenv("OPENAI_API_KEY") is None, ...
import os import asyncio import cognee import pytest from llama_index.core import Document from llama_index.graph_rag.cognee import CogneeGraphRAG @pytest.mark.skipif( os.getenv("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not available to test Cognee integration", ) @pytest.mark.asyncio() async def tes...
from typing import Dict, Iterable, Sequence from docarray import Document from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``...
from typing import Dict, Iterable, Sequence from docarray import Document from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin from docarray.array.storage.base.helper import Offset2ID class GetSetDelMixin(BaseGetSetDelMixin): """Provide concrete implementation for ``__getitem__``, ``__setitem__``...
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFr...
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) # augmentation strategy originates from DETR. train_pipeline = [ dict( type='Lo...
from pathlib import Path from typing import Callable import numpy as np import pytest import torchaudio from jina import Document, DocumentArray from ..vad_speech_segmenter import VADSpeechSegmenter @pytest.fixture(scope='module') def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter': workspace = tmpdir_factory...
import pytest import os from typing import Callable from pathlib import Path from jina import Document, DocumentArray import numpy as np import torchaudio from ..vad_speech_segmenter import VADSpeechSegmenter @pytest.fixture(scope='module') def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter': workspace = tmpd...
from typing import TYPE_CHECKING, Any, Dict, Type from docarray.proto import DocumentProto, NdArrayProto, NodeProto from docarray.typing import Tensor from ..abstract_document import AbstractDocument from ..base_node import BaseNode class ProtoMixin(AbstractDocument, BaseNode): @classmethod def _get_nested_...
from typing import TYPE_CHECKING, Any, Dict, Type from docarray.proto import DocumentProto, NdArrayProto, NodeProto from docarray.proto.io import flush_ndarray, read_ndarray from docarray.typing import Tensor from ..abstract_document import AbstractDocument from ..base_node import BaseNode class ProtoMixin(Abstract...
"""Run smoke tests""" import sys from pathlib import Path import torch import torchvision from torchvision.io import decode_jpeg, read_file, read_image from torchvision.models import resnet50, ResNet50_Weights SCRIPT_DIR = Path(__file__).parent def smoke_test_torchvision() -> None: print( "Is torchvisi...
"""Run smoke tests""" import sys from pathlib import Path import torch import torchvision from torchvision.io import decode_jpeg, read_file, read_image from torchvision.models import resnet50, ResNet50_Weights SCRIPT_DIR = Path(__file__).parent def smoke_test_torchvision() -> None: print( "Is torchvisi...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces from docarray.typing.tensor.embedding import AnyEmbedding from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl T = TypeVar('T', bound='Mes...
from typing import Any, Optional, Type, TypeVar, Union from docarray.base_document import BaseDocument from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces from docarray.typing.tensor.embedding import AnyEmbedding from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl T = TypeVar('T', bound='Mes...
""" =================================== Examples of Using `FrozenEstimator` =================================== This example showcases some use cases of :class:`~sklearn.frozen.FrozenEstimator`. :class:`~sklearn.frozen.FrozenEstimator` is a utility class that allows to freeze a fitted estimator. This is useful, for i...
""" =================================== Examples of Using `FrozenEstimator` =================================== This examples showcases some use cases of :class:`~sklearn.frozen.FrozenEstimator`. :class:`~sklearn.frozen.FrozenEstimator` is a utility class that allows to freeze a fitted estimator. This is useful, for ...
""" This module provides dynamic access to deprecated JSON tools in LangChain. It ensures backward compatibility by forwarding references such as `JsonGetValueTool`, `JsonListKeysTool`, and `JsonSpec` to their updated locations within the `langchain_community.tools` namespace. This setup allows legacy code to continu...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.tools import JsonGetValueTool, JsonListKeysTool from langchain_community.tools.json.tool import JsonSpec # Create a way to dynamically look up deprecated imports. # Used to consolidate ...
# ruff: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICE...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# Copyright (c) OpenMMLab. All rights reserved. import argparse from mmengine import Config, DictAction from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config file path') ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings from mmcv import Config, DictAction from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config f...
from dataclasses import dataclass, field from typing import Any, Callable, Dict, List import torch @dataclass class SentenceTransformerDataCollator: """Collator for a SentenceTransformers model. This encodes the text columns to {column}_input_ids and {column}_attention_mask columns. This works with the t...
from dataclasses import dataclass, field from typing import Any, Callable, Dict, List import torch @dataclass class SentenceTransformerDataCollator: """Collator for a SentenceTransformers model. This encodes the text columns to {column}_input_ids and {column}_attention_mask columns. This works with the t...
# Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.plugins import DropBlock def test_dropblock(): feat = torch.rand(1, 1, 11, 11) drop_prob = 1.0 dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0) out_feat = dropblock(feat) assert (out_feat =...
import pytest import torch from mmdet.models.plugins import DropBlock def test_dropblock(): feat = torch.rand(1, 1, 11, 11) drop_prob = 1.0 dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0) out_feat = dropblock(feat) assert (out_feat == 0).all() and out_feat.shape == feat.shape ...
import pickle from dataclasses import dataclass from io import BufferedIOBase from typing import Any import torch import torch._weights_only_unpickler as _weights_only_unpickler from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION __all__: list[str] = [] @dataclass class _Entry: key: st...
import pickle from dataclasses import dataclass from io import BufferedIOBase from typing import Any import torch import torch._weights_only_unpickler as _weights_only_unpickler from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION __all__: list[str] = [] @dataclass class _Entry: key: st...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from abc import abstractmethod from typing import Iterator, Iterable, MutableSequence from docarray import Document class BaseSequenceLikeMixin(MutableSequence[Document]): """Implement sequence-like methods""" def insert(self, index: int, value: 'Document'): """Insert `doc` at `index`. :par...
from abc import abstractmethod from typing import Iterator, Iterable, MutableSequence from .... import Document class BaseSequenceLikeMixin(MutableSequence[Document]): """Implement sequence-like methods""" def insert(self, index: int, value: 'Document'): """Insert `doc` at `index`. :param i...
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: ...
from abc import ABC, abstractmethod from typing import Optional, Union from .. import DatasetDict, Features, NamedSplit from ..arrow_dataset import Dataset from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: NestedDataS...
from parameterized import parameterized from torchaudio import sox_effects from torchaudio_unittest.common_utils import ( get_sinusoid, get_wav_data, save_wav, skipIfNoSox, TempDirMixin, TorchaudioTestCase, ) from .common import load_params @skipIfNoSox class SmokeTest(TempDirMixin, Torchaudi...
from parameterized import parameterized from torchaudio import sox_effects from torchaudio_unittest.common_utils import ( TempDirMixin, TorchaudioTestCase, skipIfNoSox, get_wav_data, get_sinusoid, save_wav, ) from .common import ( load_params, ) @skipIfNoSox class SmokeTest(TempDirMixin, ...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import glu from keras.src.activations.activations import ...
import types from keras.src.activations.activations import celu from keras.src.activations.activations import elu from keras.src.activations.activations import exponential from keras.src.activations.activations import gelu from keras.src.activations.activations import glu from keras.src.activations.activations import ...
"""Base interface class for storing chat history per user.""" import asyncio from abc import abstractmethod from typing import List, Optional from llama_index.core.llms import ChatMessage from llama_index.core.schema import BaseComponent class BaseChatStore(BaseComponent): @classmethod def class_name(cls) -...
"""Base interface class for storing chat history per user.""" import asyncio from abc import abstractmethod from typing import List, Optional from llama_index.core.llms import ChatMessage from llama_index.core.schema import BaseComponent class BaseChatStore(BaseComponent): @classmethod def class_name(cls) ->...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import mmcv from mmcv import Config, DictAction from mmdet.datasets import build_dataset def parse_args(): parser = argparse.ArgumentParser(description='Evaluate metric of the ' 'results saved in pkl format') ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import mmcv from mmcv import Config, DictAction from mmdet.datasets import build_dataset def parse_args(): parser = argparse.ArgumentParser(description='Evaluate metric of the ' 'results saved in pkl format') ...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
import dataclasses from typing import Any, Dict, Optional, Type from jina.jaml.parsers.base import BaseLegacyParser from jina.serve.executors import BaseExecutor from jina.serve.executors.metas import get_default_metas class ExecutorLegacyParser(BaseLegacyParser): """Legacy parser for executor.""" def parse...
import dataclasses from typing import Any, Dict, Optional, Type from jina.jaml.parsers.base import BaseLegacyParser from jina.serve.executors import BaseExecutor from jina.serve.executors.metas import get_default_metas class ExecutorLegacyParser(BaseLegacyParser): """Legacy parser for executor.""" def parse...
import pytest from docarray import DocumentArray from docarray.array.opensearch import DocumentArrayOpenSearch, OpenSearchConfig from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarr...
import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate...
"""Test EmbaasEmbeddings embeddings""" import pytest from pydantic import SecretStr from pytest import CaptureFixture from langchain_community.embeddings import PremAIEmbeddings @pytest.mark.requires("premai") def test_api_key_is_string() -> None: llm = PremAIEmbeddings( # type: ignore[call-arg] premai...
"""Test EmbaasEmbeddings embeddings""" import pytest from pydantic import SecretStr from pytest import CaptureFixture from langchain_community.embeddings import PremAIEmbeddings @pytest.mark.requires("premai") def test_api_key_is_string() -> None: llm = PremAIEmbeddings( # type: ignore[call-arg] premai...
class MissingConfigError(Exception): """The attempted operation requires configuration which is not available""" class NotFoundError(ValueError): """The requested record was not found, resulting in an error condition""" class NeedConfirmation(Exception): """The user must explicitly confirm that they wan...
class MissingConfigError(Exception): """The attempted operation requires configuration which is not available""" class NeedConfirmation(Exception): """The user must explicitly confirm that they want to proceed""" class InsufficientBalanceError(ValueError): user_id: str message: str balance: floa...
from google.protobuf import __version__ as __pb__version__ from jina._docarray import docarray_v2 as is_docarray_v2 if __pb__version__.startswith('4'): if is_docarray_v2: from jina.proto.docarray_v2.pb.jina_pb2 import * else: from jina.proto.docarray_v1.pb.jina_pb2 import * else: if is_do...
from google.protobuf import __version__ as __pb__version__ from jina._docarray import docarray_v2 as is_docarray_v2 if __pb__version__.startswith('4'): if is_docarray_v2: from .docarray_v2.pb.jina_pb2 import * else: from .docarray_v1.pb.jina_pb2 import * else: if is_docarray_v2: f...
import types from typing import TYPE_CHECKING from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.audio.audio_tensor import AudioTensor from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docar...
import types from typing import TYPE_CHECKING from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray from docarray.typing.tensor.audio.audio_tensor import AudioTensor from docarray.utils._internal.misc import ( _get_path_from_docarray_root_level, import_library, ) if TYPE_CHECKING: from docar...
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Dict, Iterable, Optional import torch from jina import DocumentArray, Executor, requests from jina_commons.batching import get_docs_batch_generator from sentence_transformers import SentenceTr...
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Optional, Dict, List, Tuple from jina import Executor, DocumentArray, requests from sentence_transformers import SentenceTransformer from jina_commons.batching import get_docs_batch_generator ...
# flake8: noqa import os # Set backend env to torch os.environ["KERAS_BACKEND"] = "torch" import torch import torch.nn as nn import torch.optim as optim from keras import layers import keras import numpy as np # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) learning_rate = 0.01 batch_size = 64 n...
# flake8: noqa import os # Set backend env to torch os.environ["KERAS_BACKEND"] = "torch" import torch import torch.nn as nn import torch.optim as optim from keras import layers import keras import numpy as np # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) learning_rate = 0.01 batch_size = 64 n...
def _python_type_to_schema_type(p): if p == 'str': dtype = 'string' elif p == 'int' or p == 'float': dtype = 'number' elif p in {'typing.List[str]', 'typing.Tuple[str]', 'list', 'tuple'}: dtype = 'array' elif p == 'bool': dtype = 'boolean' elif p == 'dict': dt...
def _python_type_to_schema_type(p): if p == 'str': dtype = 'string' elif p == 'int' or p == 'float': dtype = 'number' elif p in {'typing.List[str]', 'typing.Tuple[str]', 'list', 'tuple'}: dtype = 'array' elif p == 'bool': dtype = 'boolean' elif p == 'dict': dt...
import logging from backend.data import db from backend.data.credit import UsageTransactionMetadata, get_user_credit_model from backend.data.execution import ( create_graph_execution, get_graph_execution, get_incomplete_node_executions, get_latest_node_execution, get_node_execution_results, upd...
import logging from backend.data import db from backend.data.credit import UsageTransactionMetadata, get_user_credit_model from backend.data.execution import ( create_graph_execution, get_graph_execution, get_incomplete_node_executions, get_latest_node_execution, get_node_execution_results, upd...
import os from typing import Any, Optional, Dict from llama_index.llms.openai_like import OpenAILike class Databricks(OpenAILike): """ Databricks LLM. Examples: `pip install llama-index-llms-databricks` ```python from llama_index.llms.databricks import Databricks # Set ...
import os from typing import Any, Optional, Dict from llama_index.llms.openai_like import OpenAILike class Databricks(OpenAILike): """Databricks LLM. Examples: `pip install llama-index-llms-databricks` ```python from llama_index.llms.databricks import Databricks # Set up th...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl from docarray.utils._internal.misc import...
from typing import Optional import numpy as np import pytest import torch from pydantic.tools import parse_obj_as, schema_json_of from docarray import BaseDoc from docarray.base_doc.io.json import orjson_dumps from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl from docarray.utils._internal.misc import...
_base_ = './rtmdet_l_8xb32-300e_coco.py' model = dict( bbox_head=dict( _delete_=True, type='RTMDetInsSepBNHead', num_classes=80, in_channels=256, stacked_convs=2, share_conv=True, pred_kernel_size=1, feat_channels=256, act_cfg=dict(type='SiLU',...
_base_ = './rtmdet_l_8xb32-300e_coco.py' model = dict( bbox_head=dict( _delete_=True, type='RTMDetInsSepBNHead', num_classes=80, in_channels=256, stacked_convs=2, share_conv=True, pred_kernel_size=1, feat_channels=256, act_cfg=dict(type='SiLU',...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling ...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling ...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
_base_ = './fast-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
# Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.models.backbones import DetectoRS_ResNet def test_detectorrs_resnet_backbone(): detectorrs_cfg = dict( depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requi...
import pytest from mmdet.models.backbones import DetectoRS_ResNet def test_detectorrs_resnet_backbone(): detectorrs_cfg = dict( depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, ...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
import logging from datasets import load_dataset from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator logging.basicConfig(format="%(message)s", level=logging.INFO) # Load a model model = SparseEncoder("naver/splade-cocondenser...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction from sentence_transformers.models import Pooling, Transformer from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transform...
from __future__ import annotations import logging from datasets import load_dataset from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction from sentence_transformers.models import Pooling, Transformer from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transform...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.0.0' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '3.0.0rc6' short_version = __version__ def parse_version_info(version_str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is par...
from __future__ import annotations __version__ = "4.1.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.backend import ( export_dynamic_quantized_onnx_model, export_optimized_onnx_model, export_static_quantized_openvino_model, ) from senten...
from __future__ import annotations __version__ = "4.1.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib import os from sentence_transformers.backend import ( export_dynamic_quantized_onnx_model, export_optimized_onnx_model, export_static_quantized_openvino_model, ) from senten...
from enum import Enum from typing import Iterable, Dict import torch.nn.functional as F from torch import nn, Tensor from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """ The metric for the contrastive loss """ EUCLIDEAN = lambda x, y: F.pair...
from enum import Enum from typing import Iterable, Dict import torch.nn.functional as F from torch import nn, Tensor from sentence_transformers.SentenceTransformer import SentenceTransformer class SiameseDistanceMetric(Enum): """ The metric for the contrastive loss """ EUCLIDEAN = lambda x, y: F.pair...
""" Example of using callbacks with Dask ==================================== """ import numpy as np from dask.distributed import Client, LocalCluster from dask_ml.datasets import make_regression from dask_ml.model_selection import train_test_split import xgboost as xgb import xgboost.dask as dxgb from xgboost.dask im...
""" Example of using callbacks with Dask ==================================== """ import numpy as np from dask.distributed import Client, LocalCluster from dask_ml.datasets import make_regression from dask_ml.model_selection import train_test_split import xgboost as xgb from xgboost.dask import DaskDMatrix def proba...
import torch from torchvision import tv_tensors from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor: """[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubs...
import torch from torchvision import datapoints from torchvision.utils import _log_api_usage_once from ._utils import _get_kernel, _register_kernel_internal def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor: """[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubs...
""" Tests the correct computation of evaluation scores from TripletEvaluator """ from __future__ import annotations from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import TripletEvaluator def test_TripletEvaluator(stsb_bert_tiny_model: SentenceTransformer) -> None: ""...
""" Tests the correct computation of evaluation scores from TripletEvaluator """ from __future__ import annotations from sentence_transformers import SentenceTransformer from sentence_transformers.evaluation import TripletEvaluator def test_TripletEvaluator(stsb_bert_tiny_model_reused: SentenceTransformer) -> None:...
"""Tests related to the `DataIter` interface.""" import numpy as np import xgboost from xgboost import testing as tm def run_mixed_sparsity(device: str) -> None: """Check QDM with mixed batches.""" X_0, y_0, _ = tm.make_regression(128, 16, False) if device.startswith("cuda"): X_1, y_1 = tm.make_...
"""Tests related to the `DataIter` interface.""" import numpy as np import xgboost from xgboost import testing as tm def run_mixed_sparsity(device: str) -> None: """Check QDM with mixed batches.""" X_0, y_0, _ = tm.make_regression(128, 16, False) if device.startswith("cuda"): X_1, y_1 = tm.make_...
import numpy as np from pydantic.tools import parse_obj_as, schema_json_of from docarray.document.io.json import orjson_dumps from docarray.typing import Embedding def test_proto_embedding(): embedding = parse_obj_as(Embedding, np.zeros((3, 224, 224))) embedding._to_node_protobuf() def test_json_schema()...
import numpy as np from pydantic.tools import parse_obj_as from docarray.typing import Embedding def test_proto_embedding(): uri = parse_obj_as(Embedding, np.zeros((3, 224, 224))) uri._to_node_protobuf()
import pathlib from argparse import ArgumentParser def main(args): wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve() if not wheel_path.exists(): raise ValueError(f"Wheel cannot be found at path {wheel_path}") if not wheel_path.is_file(): raise ValueError(f"Path {wheel_path}...
import os import sys from test_utils import DirectoryExcursion if len(sys.argv) != 4: print("Usage: {} [wheel to rename] [commit id] [platform tag]".format(sys.argv[0])) sys.exit(1) whl_path = sys.argv[1] commit_id = sys.argv[2] platform_tag = sys.argv[3] dirname, basename = os.path.dirname(whl_path), os.p...
from typing import Type from .document import BaseDocument class AnyDocument(BaseDocument): """ AnyDocument is a Document that is not tied to any schema """ def __init__(self, **kwargs): super().__init__() self.__dict__.update(kwargs) @classmethod def _get_field_type(cls, fi...
from typing import Type from .document import BaseDocument class AnyDocument(BaseDocument): """ AnyDocument is a Document that is not tied to any schema """ def __init__(self, **kwargs): super().__init__() self.__dict__.update(kwargs) @classmethod def _get_nested_document_cl...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from mmdet.datasets import CrowdHumanDataset class TestCrowdHumanDataset(unittest.TestCase): def test_crowdhuman_init(self): dataset = CrowdHumanDataset( data_root='tests/data/crowdhuman_dataset/', ann_file='test_ann...
# Copyright (c) OpenMMLab. All rights reserved. import unittest from mmdet.datasets import CrowdHumanDataset class TestCrowdHumanDataset(unittest.TestCase): def test_crowdhuman_init(self): dataset = CrowdHumanDataset( data_root='tests/data/crowdhuman_dataset/', ann_file='test_ann...
from llama_index.core.base.llms.base import BaseLLM from llama_index.llms.cleanlab import CleanlabTLM from llama_index.llms.cleanlab.base import DEFAULT_MODEL, DEFAULT_MAX_TOKENS def test_llms_cleanlab(): names_of_base_classes = [b.__name__ for b in CleanlabTLM.__mro__] assert BaseLLM.__name__ in names_of_bas...
from llama_index.core.base.llms.base import BaseLLM from llama_index.llms.cleanlab import CleanlabTLM def test_llms_cleanlab(): names_of_base_classes = [b.__name__ for b in CleanlabTLM.__mro__] assert BaseLLM.__name__ in names_of_base_classes
from typing import Any, Optional, Sequence from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType from tonic_validate.metrics.answer_consistency_binary_metric import ( AnswerConsistencyBinaryMetric, ) from tonic_valid...
from typing import Any, Optional, Sequence from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType from tonic_validate.metrics.answer_consistency_binary_metric import ( AnswerConsistencyBinaryMetric, ) from tonic_valid...