Skip to content

Inject SDK-side flattens while handling input/output coder mismatch in flattens. #28043

Inject SDK-side flattens while handling input/output coder mismatch in flattens.

Inject SDK-side flattens while handling input/output coder mismatch in flattens. #28043

GitHub Actions / Test Results failed Apr 16, 2025 in 0s

1 fail, 74 skipped, 411 pass in 4m 27s

  2 files  ±    0    2 suites  ±0   4m 27s ⏱️ - 1h 19m 16s
486 tests  - 7 593  411 ✅  - 6 514  74 💤  - 1 024  1 ❌  - 55 
490 runs   - 7 609  411 ✅  - 6 514  78 💤  - 1 040  1 ❌  - 55 

Results for commit 52827b7. ± Comparison against earlier commit 369859b.

Annotations

Check warning on line 0 in apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_sentence_transformer_image_embeddings (apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest) failed

sdks/python/test-suites/tox/py310/build/srcs/sdks/python/pytest_py310-ml_no_xdist.xml [took 11s]
Raw output
OSError: sentence-transformers/clip-ViT-B-32 does not appear to have a file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt or flax_model.msgpack. [while running 'MLTransform/RunInference/BeamML_RunInference']
>   lifecycle_method()

apache_beam/runners/common.py:1559: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/runners/common.py:601: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py:321: in __init__
    modules = self._load_auto_model(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py:1606: in _load_auto_model
    transformer_model = Transformer(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/modeling_utils.py:4260: in from_pretrained
    checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

pretrained_model_name_or_path = 'sentence-transformers/clip-ViT-B-32'
subfolder = '', variant = None, gguf_file = None, from_tf = False
from_flax = False, use_safetensors = None, cache_dir = None
force_download = False, proxies = None, local_files_only = False, token = None
user_agent = {'file_type': 'model', 'framework': 'pytorch', 'from_auto_class': True}
revision = None, commit_hash = None

    def _get_resolved_checkpoint_files(
        pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
        subfolder: str,
        variant: Optional[str],
        gguf_file: Optional[str],
        from_tf: bool,
        from_flax: bool,
        use_safetensors: bool,
        cache_dir: str,
        force_download: bool,
        proxies: Optional[Dict[str, str]],
        local_files_only: bool,
        token: Optional[Union[str, bool]],
        user_agent: dict,
        revision: str,
        commit_hash: Optional[str],
    ) -> Tuple[Optional[List[str]], Optional[Dict]]:
        """Get all the checkpoint filenames based on `pretrained_model_name_or_path`, and optional metadata if the
        checkpoints are sharded.
        This function will download the data if necesary.
        """
        is_sharded = False
    
        if pretrained_model_name_or_path is not None and gguf_file is None:
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
            is_local = os.path.isdir(pretrained_model_name_or_path)
            if is_local:
                if from_tf and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                ):
                    # Load from a TF 1.0 checkpoint in priority if from_tf
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)):
                    # Load from a TF 2.0 checkpoint in priority if from_tf
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)
                elif from_flax and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                ):
                    # Load from a Flax checkpoint in priority if from_flax
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                elif use_safetensors is not False and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))
                ):
                    # Load from a safetensors checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)
                    )
                elif use_safetensors is not False and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant))
                ):
                    # Load from a sharded safetensors checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)
                    )
                    is_sharded = True
                elif not use_safetensors and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))
                ):
                    # Load from a PyTorch checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)
                    )
                elif not use_safetensors and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))
                ):
                    # Load from a sharded PyTorch checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)
                    )
                    is_sharded = True
                # At this stage we don't have a weight file so we will raise an error.
                elif not use_safetensors and (
                    os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index"))
                    or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME))
                ):
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use"
                        " `from_tf=True` to load this model from those weights."
                    )
                elif not use_safetensors and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                ):
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`"
                        " to load this model from those weights."
                    )
                elif use_safetensors:
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path}."
                    )
                else:
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)},"
                        f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory"
                        f" {pretrained_model_name_or_path}."
                    )
            elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
                archive_file = pretrained_model_name_or_path
                is_local = True
            elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")):
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
                archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index")
                is_local = True
            elif is_remote_url(pretrained_model_name_or_path):
                filename = pretrained_model_name_or_path
                resolved_archive_file = download_url(pretrained_model_name_or_path)
            else:
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
                elif use_safetensors is not False:
                    filename = _add_variant(SAFE_WEIGHTS_NAME, variant)
                else:
                    filename = _add_variant(WEIGHTS_NAME, variant)
    
                try:
                    # Load from URL or cache if already cached
                    cached_file_kwargs = {
                        "cache_dir": cache_dir,
                        "force_download": force_download,
                        "proxies": proxies,
                        "local_files_only": local_files_only,
                        "token": token,
                        "user_agent": user_agent,
                        "revision": revision,
                        "subfolder": subfolder,
                        "_raise_exceptions_for_gated_repo": False,
                        "_raise_exceptions_for_missing_entries": False,
                        "_commit_hash": commit_hash,
                    }
                    resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
    
                    # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
                    # result when internet is up, the repo and revision exist, but the file does not.
                    if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant):
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
                        resolved_archive_file = cached_file(
                            pretrained_model_name_or_path,
                            _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant),
                            **cached_file_kwargs,
                        )
                        if resolved_archive_file is not None:
                            is_sharded = True
                        elif use_safetensors:
                            if revision == "main":
                                resolved_archive_file, revision, is_sharded = auto_conversion(
                                    pretrained_model_name_or_path, **cached_file_kwargs
                                )
                            cached_file_kwargs["revision"] = revision
                            if resolved_archive_file is None:
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} "
                                    "and thus cannot be loaded with `safetensors`. Please make sure that the model has "
                                    "been saved with `safe_serialization=True` or do not set `use_safetensors=True`."
                                )
                        else:
                            # This repo has no safetensors file of any kind, we switch to PyTorch.
                            filename = _add_variant(WEIGHTS_NAME, variant)
                            resolved_archive_file = cached_file(
                                pretrained_model_name_or_path, filename, **cached_file_kwargs
                            )
                    if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant):
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
                        resolved_archive_file = cached_file(
                            pretrained_model_name_or_path,
                            _add_variant(WEIGHTS_INDEX_NAME, variant),
                            **cached_file_kwargs,
                        )
                        if resolved_archive_file is not None:
                            is_sharded = True
                    if not local_files_only and not is_offline_mode():
                        if resolved_archive_file is not None:
                            if filename in [WEIGHTS_NAME, WEIGHTS_INDEX_NAME]:
                                # If the PyTorch file was found, check if there is a safetensors file on the repository
                                # If there is no safetensors file on the repositories, start an auto conversion
                                safe_weights_name = SAFE_WEIGHTS_INDEX_NAME if is_sharded else SAFE_WEIGHTS_NAME
                                has_file_kwargs = {
                                    "revision": revision,
                                    "proxies": proxies,
                                    "token": token,
                                    "cache_dir": cache_dir,
                                    "local_files_only": local_files_only,
                                }
                                cached_file_kwargs = {
                                    "cache_dir": cache_dir,
                                    "force_download": force_download,
                                    "local_files_only": local_files_only,
                                    "user_agent": user_agent,
                                    "subfolder": subfolder,
                                    "_raise_exceptions_for_gated_repo": False,
                                    "_raise_exceptions_for_missing_entries": False,
                                    "_commit_hash": commit_hash,
                                    **has_file_kwargs,
                                }
                                if not has_file(pretrained_model_name_or_path, safe_weights_name, **has_file_kwargs):
                                    Thread(
                                        target=auto_conversion,
                                        args=(pretrained_model_name_or_path,),
                                        kwargs={"ignore_errors_during_conversion": True, **cached_file_kwargs},
                                        name="Thread-auto_conversion",
                                    ).start()
                        else:
                            # Otherwise, no PyTorch file was found, maybe there is a TF or Flax model file.
                            # We try those to give a helpful error message.
                            has_file_kwargs = {
                                "revision": revision,
                                "proxies": proxies,
                                "token": token,
                                "cache_dir": cache_dir,
                                "local_files_only": local_files_only,
                            }
                            if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs):
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights."
                                    " Use `from_tf=True` to load this model from those weights."
                                )
                            elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs):
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use"
                                    " `from_flax=True` to load this model from those weights."
                                )
                            elif variant is not None and has_file(
                                pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs
                            ):
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant"
                                    f" {variant}. Use `variant=None` to load this model from those weights."
                                )
                            else:
>                               raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)},"
                                    f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}."
                                )
E                               OSError: sentence-transformers/clip-ViT-B-32 does not appear to have a file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt or flax_model.msgpack.

target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/modeling_utils.py:1100: OSError

During handling of the above exception, another exception occurred:

self = <apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest testMethod=test_sentence_transformer_image_embeddings>

    @unittest.skipIf(Image is None, 'Pillow is not installed.')
    def test_sentence_transformer_image_embeddings(self):
      embedding_config = SentenceTransformerEmbeddings(
          model_name=IMAGE_MODEL_NAME,
          columns=[test_query_column],
          image_model=True)
      img = self.generateRandomImage(256)
>     with beam.Pipeline() as pipeline:

apache_beam/ml/transforms/embeddings/huggingface_test.py:299: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/pipeline.py:644: in __exit__
    self.result = self.run()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/pipeline.py:618: in run
    return self.runner.run_pipeline(self, self._options)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/direct/direct_runner.py:184: in run_pipeline
    return runner.run_pipeline(pipeline, options)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:196: in run_pipeline
    self._latest_run_result = self.run_via_runner_api(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:223: in run_via_runner_api
    return self.run_stages(stage_context, stages)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:470: in run_stages
    bundle_results = self._execute_bundle(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:795: in _execute_bundle
    self._run_bundle(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1034: in _run_bundle
    result, splits = bundle_manager.process_bundle(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/fn_runner.py:1360: in process_bundle
    result_future = self._worker_handler.control_conn.push(process_bundle_req)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/portability/fn_api_runner/worker_handlers.py:386: in push
    response = self.worker.do_instruction(request)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/worker/sdk_worker.py:658: in do_instruction
    return getattr(self, request_type)(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/worker/sdk_worker.py:689: in process_bundle
    bundle_processor = self.bundle_processor_cache.get(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/worker/sdk_worker.py:512: in get
    processor = bundle_processor.BundleProcessor(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/runners/worker/bundle_processor.py:1135: in __init__
    op.setup(self.data_sampler)
apache_beam/runners/worker/operations.py:873: in apache_beam.runners.worker.operations.DoOperation.setup
    with self.scoped_start_state:
apache_beam/runners/worker/operations.py:923: in apache_beam.runners.worker.operations.DoOperation.setup
    self.dofn_runner.setup()
apache_beam/runners/common.py:1565: in apache_beam.runners.common.DoFnRunner.setup
    self._invoke_lifecycle_method(self.do_fn_invoker.invoke_setup)
apache_beam/runners/common.py:1561: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    self._reraise_augmented(exn)
apache_beam/runners/common.py:1606: in apache_beam.runners.common.DoFnRunner._reraise_augmented
    raise new_exn
apache_beam/runners/common.py:1559: in apache_beam.runners.common.DoFnRunner._invoke_lifecycle_method
    lifecycle_method()
apache_beam/runners/common.py:601: in apache_beam.runners.common.DoFnInvoker.invoke_setup
    self.signature.setup_lifecycle_method.method_value()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/inference/base.py:1886: in setup
    self._model = self._load_model()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/inference/base.py:1852: in _load_model
    model = self._shared_model_handle.acquire(load, tag=self._cur_tag)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/utils/shared.py:289: in acquire
    return _shared_map.acquire(self._key, constructor_fn, tag)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/utils/shared.py:236: in acquire
    result = control_block.acquire(constructor_fn, tag)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/utils/shared.py:133: in acquire
    result = constructor_fn()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/inference/base.py:1826: in load
    model = self._model_handler.load_model()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/transforms/base.py:681: in load_model
    model = self._underlying.load_model()
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/apache_beam/ml/transforms/embeddings/huggingface.py:87: in load_model
    model = self._model_class(self.model_name, **self._load_model_args)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py:321: in __init__
    modules = self._load_auto_model(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py:1606: in _load_auto_model
    transformer_model = Transformer(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py:81: in __init__
    self._load_model(model_name_or_path, config, cache_dir, backend, is_peft_model, **model_args)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py:181: in _load_model
    self.auto_model = AutoModel.from_pretrained(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py:571: in from_pretrained
    return model_class.from_pretrained(
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/modeling_utils.py:279: in _wrapper
    return func(*args, **kwargs)
target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/modeling_utils.py:4260: in from_pretrained
    checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

pretrained_model_name_or_path = 'sentence-transformers/clip-ViT-B-32'
subfolder = '', variant = None, gguf_file = None, from_tf = False
from_flax = False, use_safetensors = None, cache_dir = None
force_download = False, proxies = None, local_files_only = False, token = None
user_agent = {'file_type': 'model', 'framework': 'pytorch', 'from_auto_class': True}
revision = None, commit_hash = None

    def _get_resolved_checkpoint_files(
        pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
        subfolder: str,
        variant: Optional[str],
        gguf_file: Optional[str],
        from_tf: bool,
        from_flax: bool,
        use_safetensors: bool,
        cache_dir: str,
        force_download: bool,
        proxies: Optional[Dict[str, str]],
        local_files_only: bool,
        token: Optional[Union[str, bool]],
        user_agent: dict,
        revision: str,
        commit_hash: Optional[str],
    ) -> Tuple[Optional[List[str]], Optional[Dict]]:
        """Get all the checkpoint filenames based on `pretrained_model_name_or_path`, and optional metadata if the
        checkpoints are sharded.
        This function will download the data if necesary.
        """
        is_sharded = False
    
        if pretrained_model_name_or_path is not None and gguf_file is None:
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
            is_local = os.path.isdir(pretrained_model_name_or_path)
            if is_local:
                if from_tf and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                ):
                    # Load from a TF 1.0 checkpoint in priority if from_tf
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)):
                    # Load from a TF 2.0 checkpoint in priority if from_tf
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)
                elif from_flax and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                ):
                    # Load from a Flax checkpoint in priority if from_flax
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                elif use_safetensors is not False and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))
                ):
                    # Load from a safetensors checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)
                    )
                elif use_safetensors is not False and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant))
                ):
                    # Load from a sharded safetensors checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)
                    )
                    is_sharded = True
                elif not use_safetensors and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))
                ):
                    # Load from a PyTorch checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)
                    )
                elif not use_safetensors and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))
                ):
                    # Load from a sharded PyTorch checkpoint
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)
                    )
                    is_sharded = True
                # At this stage we don't have a weight file so we will raise an error.
                elif not use_safetensors and (
                    os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index"))
                    or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME))
                ):
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use"
                        " `from_tf=True` to load this model from those weights."
                    )
                elif not use_safetensors and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                ):
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`"
                        " to load this model from those weights."
                    )
                elif use_safetensors:
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path}."
                    )
                else:
                    raise EnvironmentError(
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)},"
                        f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory"
                        f" {pretrained_model_name_or_path}."
                    )
            elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
                archive_file = pretrained_model_name_or_path
                is_local = True
            elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")):
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
                archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index")
                is_local = True
            elif is_remote_url(pretrained_model_name_or_path):
                filename = pretrained_model_name_or_path
                resolved_archive_file = download_url(pretrained_model_name_or_path)
            else:
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
                elif use_safetensors is not False:
                    filename = _add_variant(SAFE_WEIGHTS_NAME, variant)
                else:
                    filename = _add_variant(WEIGHTS_NAME, variant)
    
                try:
                    # Load from URL or cache if already cached
                    cached_file_kwargs = {
                        "cache_dir": cache_dir,
                        "force_download": force_download,
                        "proxies": proxies,
                        "local_files_only": local_files_only,
                        "token": token,
                        "user_agent": user_agent,
                        "revision": revision,
                        "subfolder": subfolder,
                        "_raise_exceptions_for_gated_repo": False,
                        "_raise_exceptions_for_missing_entries": False,
                        "_commit_hash": commit_hash,
                    }
                    resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
    
                    # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
                    # result when internet is up, the repo and revision exist, but the file does not.
                    if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant):
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
                        resolved_archive_file = cached_file(
                            pretrained_model_name_or_path,
                            _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant),
                            **cached_file_kwargs,
                        )
                        if resolved_archive_file is not None:
                            is_sharded = True
                        elif use_safetensors:
                            if revision == "main":
                                resolved_archive_file, revision, is_sharded = auto_conversion(
                                    pretrained_model_name_or_path, **cached_file_kwargs
                                )
                            cached_file_kwargs["revision"] = revision
                            if resolved_archive_file is None:
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} "
                                    "and thus cannot be loaded with `safetensors`. Please make sure that the model has "
                                    "been saved with `safe_serialization=True` or do not set `use_safetensors=True`."
                                )
                        else:
                            # This repo has no safetensors file of any kind, we switch to PyTorch.
                            filename = _add_variant(WEIGHTS_NAME, variant)
                            resolved_archive_file = cached_file(
                                pretrained_model_name_or_path, filename, **cached_file_kwargs
                            )
                    if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant):
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
                        resolved_archive_file = cached_file(
                            pretrained_model_name_or_path,
                            _add_variant(WEIGHTS_INDEX_NAME, variant),
                            **cached_file_kwargs,
                        )
                        if resolved_archive_file is not None:
                            is_sharded = True
                    if not local_files_only and not is_offline_mode():
                        if resolved_archive_file is not None:
                            if filename in [WEIGHTS_NAME, WEIGHTS_INDEX_NAME]:
                                # If the PyTorch file was found, check if there is a safetensors file on the repository
                                # If there is no safetensors file on the repositories, start an auto conversion
                                safe_weights_name = SAFE_WEIGHTS_INDEX_NAME if is_sharded else SAFE_WEIGHTS_NAME
                                has_file_kwargs = {
                                    "revision": revision,
                                    "proxies": proxies,
                                    "token": token,
                                    "cache_dir": cache_dir,
                                    "local_files_only": local_files_only,
                                }
                                cached_file_kwargs = {
                                    "cache_dir": cache_dir,
                                    "force_download": force_download,
                                    "local_files_only": local_files_only,
                                    "user_agent": user_agent,
                                    "subfolder": subfolder,
                                    "_raise_exceptions_for_gated_repo": False,
                                    "_raise_exceptions_for_missing_entries": False,
                                    "_commit_hash": commit_hash,
                                    **has_file_kwargs,
                                }
                                if not has_file(pretrained_model_name_or_path, safe_weights_name, **has_file_kwargs):
                                    Thread(
                                        target=auto_conversion,
                                        args=(pretrained_model_name_or_path,),
                                        kwargs={"ignore_errors_during_conversion": True, **cached_file_kwargs},
                                        name="Thread-auto_conversion",
                                    ).start()
                        else:
                            # Otherwise, no PyTorch file was found, maybe there is a TF or Flax model file.
                            # We try those to give a helpful error message.
                            has_file_kwargs = {
                                "revision": revision,
                                "proxies": proxies,
                                "token": token,
                                "cache_dir": cache_dir,
                                "local_files_only": local_files_only,
                            }
                            if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs):
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights."
                                    " Use `from_tf=True` to load this model from those weights."
                                )
                            elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs):
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use"
                                    " `from_flax=True` to load this model from those weights."
                                )
                            elif variant is not None and has_file(
                                pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs
                            ):
                                raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant"
                                    f" {variant}. Use `variant=None` to load this model from those weights."
                                )
                            else:
>                               raise EnvironmentError(
                                    f"{pretrained_model_name_or_path} does not appear to have a file named"
                                    f" {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)},"
                                    f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}."
                                )
E                               OSError: sentence-transformers/clip-ViT-B-32 does not appear to have a file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt or flax_model.msgpack. [while running 'MLTransform/RunInference/BeamML_RunInference']

target/.tox-py310-ml/py310-ml/lib/python3.10/site-packages/transformers/modeling_utils.py:1100: OSError

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Test Results

74 skipped tests found

There are 74 skipped tests, see "Raw output" for the full list of skipped tests.
Raw output
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_deidentification
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_inspection
apache_beam.ml.gcp.naturallanguageml_test_it.NaturalLanguageMlTestIT ‑ test_analyzing_syntax
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_catalog_item
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_user_event
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_predict
apache_beam.ml.gcp.videointelligenceml_test_it.VideoIntelligenceMlTestIT ‑ test_label_detection_with_video_context
apache_beam.ml.gcp.visionml_test_it.VisionMlTestIT ‑ test_text_detection_with_language_hint
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_does_garbage_collection
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_side_inputin_streaming
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling_large_model
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_pipeline
apache_beam.ml.inference.onnx_inference_it_test.OnnxInference ‑ test_onnx_run_inference_roberta_sentiment_classification
apache_beam.ml.inference.onnx_inference_test
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm_large_model
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn_v1_and_v2
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_imagenet_mobilenetv2
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification_large_model
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_regression
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_imagenet_image_segmentation
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification_large_model
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_with_weights_classification
apache_beam.ml.inference.tensorrt_inference_test
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_flower_image_classification
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_llm_text_classification
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch_large_model
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_single_batch
apache_beam.ml.inference.xgboost_inference_test
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_basic_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_batched_metadata_filter_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_condition_batching
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_empty_input
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_euclidean_distance_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_metadata_filter_leakage
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_no_metadata_restriction
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_ignore
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_update_fields
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_update
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_custom_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_id_embedding_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_defaults_with_args_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_language_connector
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_metadata_spec_and_conflicts
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_custom_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema_missing_embedding
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_streaming_default_schema
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_sentence_transformer_image_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_with_str_data_types

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Test Results

486 tests found

There are 486 tests, see "Raw output" for the full list of tests.
Raw output
apache_beam.ml.anomaly.aggregations_test.AllVoteTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.AnyVoteTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.AverageScoreTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.LabelAggTestWithMissingOrError ‑ test_default
apache_beam.ml.anomaly.aggregations_test.MajorityVoteTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.MajorityVoteTest ‑ test_tie_breaker
apache_beam.ml.anomaly.aggregations_test.MaxScoreTest ‑ test_default
apache_beam.ml.anomaly.aggregations_test.ScoreAggTestWithMissingOrError ‑ test_default
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_0
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_1
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_2
apache_beam.ml.anomaly.base_test.TestAnomalyDetector ‑ test_model_id_and_spec_3
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_0
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_1
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_2
apache_beam.ml.anomaly.base_test.TestEnsembleAnomalyDetector ‑ test_model_id_and_spec_3
apache_beam.ml.anomaly.detectors.iqr_test.IQRTest ‑ test_with_default_trackers
apache_beam.ml.anomaly.detectors.robust_zscore_test.RobustZScoreTest ‑ test_with_default_trackers
apache_beam.ml.anomaly.detectors.zscore_test.ZScoreTest ‑ test_with_custom_mean_tracker
apache_beam.ml.anomaly.detectors.zscore_test.ZScoreTest ‑ test_with_default_trackers
apache_beam.ml.anomaly.specifiable_test.TestClassAsArgument ‑ test_normal_class
apache_beam.ml.anomaly.specifiable_test.TestFunctionAsArgument ‑ test_lambda_function
apache_beam.ml.anomaly.specifiable_test.TestFunctionAsArgument ‑ test_normal_function
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_just_in_time_init
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_on_demand_and_just_in_time_init
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_on_demand_init
apache_beam.ml.anomaly.specifiable_test.TestInitCallCount ‑ test_on_pickle
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_error_in_child
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_0
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_1
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_2
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_3
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_4
apache_beam.ml.anomaly.specifiable_test.TestNestedSpecifiable ‑ test_nested_specifiable_5
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_decorator_in_function_form
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_decorator_in_syntactic_sugar_form
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_0
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_1
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_2
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_and_to_spec_3
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_from_spec_on_unknown_spec_type
apache_beam.ml.anomaly.specifiable_test.TestSpecifiable ‑ test_init_params_in_specifiable
apache_beam.ml.anomaly.specifiable_test.TestUncommonUsages ‑ test_double_specifiable
apache_beam.ml.anomaly.specifiable_test.TestUncommonUsages ‑ test_unspecifiable
apache_beam.ml.anomaly.thresholds_test.TestFixedThreshold ‑ test_apply_only
apache_beam.ml.anomaly.thresholds_test.TestQuantileThreshold ‑ test_apply_only
apache_beam.ml.anomaly.thresholds_test.TestQuantileThreshold ‑ test_quantile_tracker
apache_beam.ml.anomaly.transforms_test.TestAnomalyDetection ‑ test_multiple_detectors_without_aggregation
apache_beam.ml.anomaly.transforms_test.TestAnomalyDetection ‑ test_multiple_sub_detectors_with_aggregation
apache_beam.ml.anomaly.transforms_test.TestAnomalyDetection ‑ test_one_detector
apache_beam.ml.anomaly.transforms_test.TestOfflineDetector ‑ test_default_inference_fn
apache_beam.ml.anomaly.transforms_test.TestOfflineDetector ‑ test_run_inference_args
apache_beam.ml.anomaly.transforms_test.TestStatefulThresholdDoFn ‑ test_dofn_on_single_prediction
apache_beam.ml.anomaly.transforms_test.TestStatelessThresholdDoFn ‑ test_dofn_on_multiple_predictions
apache_beam.ml.anomaly.transforms_test.TestStatelessThresholdDoFn ‑ test_dofn_on_single_prediction
apache_beam.ml.anomaly.univariate.mad_test.MadTest ‑ test_default_tracker
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_with_float64_max
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_with_nan
apache_beam.ml.anomaly.univariate.mean_test.LandmarkMeanTest ‑ test_without_nan
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_float64_max_0
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_float64_max_1
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_nan_0
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_with_nan_1
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_without_nan_0
apache_beam.ml.anomaly.univariate.mean_test.SlidingMeanTest ‑ test_without_nan_1
apache_beam.ml.anomaly.univariate.median_test.MedianTest ‑ test_custom_tracker
apache_beam.ml.anomaly.univariate.median_test.MedianTest ‑ test_default_tracker
apache_beam.ml.anomaly.univariate.median_test.MedianTest ‑ test_wrong_tracker
apache_beam.ml.anomaly.univariate.perf_test.PerfTest ‑ test_mean_perf
apache_beam.ml.anomaly.univariate.perf_test.PerfTest ‑ test_quantile_perf
apache_beam.ml.anomaly.univariate.perf_test.PerfTest ‑ test_stdev_perf
apache_beam.ml.anomaly.univariate.quantile_test.LandmarkQuantileTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.quantile_test.LandmarkQuantileTest ‑ test_with_nan
apache_beam.ml.anomaly.univariate.quantile_test.LandmarkQuantileTest ‑ test_without_nan
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_with_nan_0
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_with_nan_1
apache_beam.ml.anomaly.univariate.quantile_test.SlidingQuantileTest ‑ test_without_nan_0
apache_beam.ml.anomaly.univariate.stdev_test.LandmarkStdevTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.stdev_test.LandmarkStdevTest ‑ test_with_nan
apache_beam.ml.anomaly.univariate.stdev_test.LandmarkStdevTest ‑ test_without_nan
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_accuracy_fuzz
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_stdev_with_nan_0
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_stdev_with_nan_1
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_without_nan_0
apache_beam.ml.anomaly.univariate.stdev_test.SlidingStdevTest ‑ test_without_nan_1
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_deidentification
apache_beam.ml.gcp.cloud_dlp_it_test.CloudDLPIT ‑ test_inspection
apache_beam.ml.gcp.cloud_dlp_test.TestDeidentifyFn ‑ test_deidentify_called
apache_beam.ml.gcp.cloud_dlp_test.TestDeidentifyText ‑ test_exception_raised_when_no_config_is_provided
apache_beam.ml.gcp.cloud_dlp_test.TestInspectFn ‑ test_inspect_called
apache_beam.ml.gcp.cloud_dlp_test.TestInspectText ‑ test_exception_raised_then_no_config_provided
apache_beam.ml.gcp.naturallanguageml_test.NaturalLanguageMlTest ‑ test_document_source
apache_beam.ml.gcp.naturallanguageml_test_it.NaturalLanguageMlTestIT ‑ test_analyzing_syntax
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAICatalogItemTest ‑ test_CreateCatalogItem
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAICatalogItemTest ‑ test_ImportCatalogItems
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAIPredictTest ‑ test_Predict
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAIUserEventTest ‑ test_CreateUserEvent
apache_beam.ml.gcp.recommendations_ai_test.RecommendationsAIUserEventTest ‑ test_ImportUserEvents
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_catalog_item
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_create_user_event
apache_beam.ml.gcp.recommendations_ai_test_it.RecommendationAIIT ‑ test_predict
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideoWithContext_b64_content
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideoWithContext_bad_input
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_URIs
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_b64_content
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_bad_input
apache_beam.ml.gcp.videointelligenceml_test.VideoIntelligenceTest ‑ test_AnnotateVideo_with_side_input_context
apache_beam.ml.gcp.videointelligenceml_test_it.VideoIntelligenceMlTestIT ‑ test_label_detection_with_video_context
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImageWithContext_URIs
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImageWithContext_bad_input
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_URI_with_side_input_context
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_URIs
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_URIs_large_batch
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_b64_content
apache_beam.ml.gcp.visionml_test.VisionTest ‑ test_AnnotateImage_bad_input
apache_beam.ml.gcp.visionml_test_it.VisionMlTestIT ‑ test_text_detection_with_language_hint
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_child_class_without_env_vars
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_counted_metrics
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_env_vars_set_correctly
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_failed_batches_counter_no_failures
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_forwards_batch_args
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_increment_failed_batches_counter
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_keyed_many_model_handlers_validation
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_keyed_model_handler_get_num_bytes
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_keyed_model_handler_multiple_models_get_num_bytes
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_handler_compatibility
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_evicts_correct_num_of_models_after_being_incremented
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_evicts_models
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_evicts_models_after_update
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_manager_loads_shared_model
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_status_provides_valid_garbage_collection
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_model_status_provides_valid_tags
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_empty_side_input
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_inference_args
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_keyed_mps_incrementing_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_mps_nobatch_incrementing_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_multi_process_shared_incrementing_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_simple_examples
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_simple_examples_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_simple_examples_multi_process_shared_multi_copy
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_many_mhs_max_models_hint
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_many_model_handlers
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_many_model_handlers_metrics
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_keyed_examples_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_maybe_keyed_examples
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_impl_with_maybe_keyed_examples_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_keyed_examples_with_unkeyed_model_handler
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_keyed_pre_and_post_processing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_loads_different_models
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_loads_different_models_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_maybe_keyed_pre_and_post_processing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_metrics_with_custom_namespace
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_postprocessing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_postprocessing_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_postprocessing_multiple_fns
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_pre_and_post_processing_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_prebatched
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_prediction_result_with_model_id
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_preprocessing
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_preprocessing_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_preprocessing_multiple_fns
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch_per_key_models
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_side_input_in_batch_per_key_models_split_cohort
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_does_garbage_collection
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_not_hit
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_on_inference_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_timeout_on_load_dlq
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_unkeyed_examples_with_keyed_model_handler
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_watch_file_pattern_keyword_arg_side_input_label
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_watch_file_pattern_side_input_label
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_iterable_side_input
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_iterable_side_input_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_run_inference_with_side_inputin_streaming
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_runinference_loads_same_model_with_identifier_multi_process_shared
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_timing_metrics
apache_beam.ml.inference.base_test.RunInferenceBaseTest ‑ test_unexpected_inference_args_passed
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_exception_on_load_model_override
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_exception_on_run_inference_override
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_normal_model_execution
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_repeated_requests_fail
apache_beam.ml.inference.base_test.RunInferenceRemoteTest ‑ test_works_on_retry
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_language_modeling_large_model
apache_beam.ml.inference.huggingface_inference_it_test.HuggingFaceInference ‑ test_hf_pipeline
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_framework_detection_tensorflow
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_framework_detection_torch
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_predict_tensor
apache_beam.ml.inference.huggingface_inference_test.HuggingFaceInferenceTest ‑ test_predict_tensor_with_inference_args
apache_beam.ml.inference.onnx_inference_it_test.OnnxInference ‑ test_onnx_run_inference_roberta_sentiment_classification
apache_beam.ml.inference.onnx_inference_test
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_bert_for_masked_lm_large_model
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_coco_maskrcnn_resnet50_fpn_v1_and_v2
apache_beam.ml.inference.pytorch_inference_it_test.PyTorchInference ‑ test_torch_run_inference_imagenet_mobilenetv2
apache_beam.ml.inference.pytorch_inference_test.PytorchInferenceTestWithMocks ‑ test_load_model_args_keyed_tensor
apache_beam.ml.inference.pytorch_inference_test.PytorchInferenceTestWithMocks ‑ test_load_model_args_tensor
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_env_vars_set_correctly_keyed_tensor_handler
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_env_vars_set_correctly_tensor_handler
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_gpu_auto_convert_to_cpu
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_inference_torch_script_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_invalid_input_type
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_load_torch_script_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_gcs_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_gcs_model_control_batching
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_extra_inference_args
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_extra_inference_args_batching_args
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_extra_inference_args_large
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_large
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_pipeline_local_model_simple
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_prediction_result_model_id_with_torch_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_prediction_result_model_id_with_torch_script_model
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_specify_torch_script_path_and_state_dict_path
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_torch_model_class_none
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferencePipelineTest ‑ test_torch_model_state_dict_none
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_inference_runner_inference_args
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_namespace
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_num_bytes
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_custom
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_helper
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_keyed
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_keyed_dict_output
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_keyed_helper
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_multiple_tensor_features
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_multiple_tensor_features_dict_output
apache_beam.ml.inference.pytorch_inference_test.PytorchRunInferenceTest ‑ test_run_inference_single_tensor_feature
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_mnist_classification_large_model
apache_beam.ml.inference.sklearn_inference_it_test.SklearnInference ‑ test_sklearn_regression
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_bad_file_raises
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_bad_input_type_raises
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_custom_inference_fn
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_data_vectorized
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_env_vars_set_correctly_numpy
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_infer_too_many_rows_in_dataframe
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_num_bytes_numpy
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_joblib
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_custom_batching
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_custom_inference
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_dict_out
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_env_vars_set_correctly
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_joblib
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_large_model
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pandas_with_keys
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pickled
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pickled_custom_batching
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_pipeline_pickled_large_model
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_predict_output
apache_beam.ml.inference.sklearn_inference_test.SkLearnRunInferenceTest ‑ test_predict_output_dict
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_imagenet_image_segmentation
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_classification_large_model
apache_beam.ml.inference.tensorflow_inference_it_test.TensorflowInference ‑ test_tf_mnist_with_weights_classification
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_load_model_exception
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_keyed_numpy
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_keyed_tensor
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_numpy
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_numpy_with_batch_size
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_numpy_with_large_model
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor_with_args
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor_with_batch_size
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTest ‑ test_predict_tensor_with_large_model
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_env_vars_set_correctly_numpy
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_env_vars_set_correctly_tensor
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_load_model_args
apache_beam.ml.inference.tensorflow_inference_test.TFRunInferenceTestWithMocks ‑ test_load_model_with_args_and_custom_weights
apache_beam.ml.inference.tensorrt_inference_test
apache_beam.ml.inference.utils_test.WatchFilePatternTest ‑ test_emitting_singleton_output
apache_beam.ml.inference.utils_test.WatchFilePatternTest ‑ test_latest_file_by_timestamp_default_value
apache_beam.ml.inference.utils_test.WatchFilePatternTest ‑ test_latest_file_with_timestamp_after_pipeline_construction_time
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_flower_image_classification
apache_beam.ml.inference.vertex_ai_inference_it_test.VertexAIInference ‑ test_vertex_ai_run_llm_text_classification
apache_beam.ml.inference.vertex_ai_inference_test.ModelHandlerArgConditions ‑ test_exception_on_private_without_network
apache_beam.ml.inference.vertex_ai_inference_test.RetryOnClientErrorTest ‑ test_retry_on_client_error_negative
apache_beam.ml.inference.vertex_ai_inference_test.RetryOnClientErrorTest ‑ test_retry_on_client_error_positive
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_datatable_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_numpy_single_batch_large_model
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_pandas_single_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_multi_batch
apache_beam.ml.inference.xgboost_inference_it_test.XGBoostInference ‑ test_iris_classification_scipy_single_batch
apache_beam.ml.inference.xgboost_inference_test
apache_beam.ml.rag.chunking.base_test.ChunkingTransformProviderTest ‑ test_chunking_transform
apache_beam.ml.rag.chunking.base_test.ChunkingTransformProviderTest ‑ test_custom_chunk_id_fn
apache_beam.ml.rag.chunking.base_test.ChunkingTransformProviderTest ‑ test_doesnt_override_get_text_splitter_transform
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_empty_document_field
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_empty_text
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_huggingface_tokenizer_splitter
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_invalid_document_field
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_invalid_text_splitter
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_multiple_metadata_fields
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_no_metadata_fields
apache_beam.ml.rag.chunking.langchain_test.LangChainChunkingTest ‑ test_recursive_splitter_no_overlap
apache_beam.ml.rag.embeddings.base_test.RAGBaseEmbeddingsTest ‑ test_adapter_input_conversion
apache_beam.ml.rag.embeddings.base_test.RAGBaseEmbeddingsTest ‑ test_adapter_input_conversion_missing_text_content
apache_beam.ml.rag.embeddings.base_test.RAGBaseEmbeddingsTest ‑ test_adapter_output_conversion
apache_beam.ml.rag.embeddings.huggingface_test.HuggingfaceTextEmbeddingsTest ‑ test_embedding_pipeline
apache_beam.ml.rag.embeddings.vertex_ai_test.VertexAITextEmbeddingsTest ‑ test_embedding_pipeline
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_basic_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_batched_metadata_filter_vector_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_condition_batching
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_empty_input
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_euclidean_distance_search
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_invalid_query
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_metadata_filter_leakage
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_missing_embedding
apache_beam.ml.rag.enrichment.bigquery_vector_search_it_test.TestBigQueryVectorSearchIT ‑ test_no_metadata_restriction
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_ignore
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_default_update_fields
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_conflict_resolution_update
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_custom_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_id_embedding_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_defaults_with_args_specs
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_language_connector
apache_beam.ml.rag.ingestion.alloydb_it_test.AlloyDBVectorWriterConfigTest ‑ test_metadata_spec_and_conflicts
apache_beam.ml.rag.ingestion.base_test.VectorDatabaseBaseTest ‑ test_invalid_config
apache_beam.ml.rag.ingestion.base_test.VectorDatabaseBaseTest ‑ test_pipeline_integration
apache_beam.ml.rag.ingestion.base_test.VectorDatabaseBaseTest ‑ test_write_transform_creation
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_custom_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_default_schema_missing_embedding
apache_beam.ml.rag.ingestion.bigquery_it_test.BigQueryVectorWriterConfigTest ‑ test_streaming_default_schema
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_appends_transforms_to_process_handler_correctly
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_0
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_1
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_2
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_dict_output_pcoll_schema_3
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_fail_for_non_global_windows_in_produce_mode
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_no_read_or_write_artifact_lcoation
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_on_dict
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_on_list_dict
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transform_on_multiple_columns_single_transform
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_ml_transforms_on_multiple_columns_multiple_transforms
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_mltransform_with_counter
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_non_ptransfrom_provider_class_to_mltransform
apache_beam.ml.transforms.base_test.BaseMLTransformTest ‑ test_read_mode_with_transforms
apache_beam.ml.transforms.base_test.MLTransformDLQTest ‑ test_dlq_with_embeddings
apache_beam.ml.transforms.base_test.MLTransformDLQTest ‑ test_mltransform_with_dlq_and_extract_tranform_name
apache_beam.ml.transforms.base_test.TestImageEmbeddingHandler ‑ test_handler_with_dict_inputs
apache_beam.ml.transforms.base_test.TestImageEmbeddingHandler ‑ test_handler_with_non_dict_datatype
apache_beam.ml.transforms.base_test.TestImageEmbeddingHandler ‑ test_handler_with_non_image_datatype
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_mltransform_to_ptransform_wrapper
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_save_and_load_run_inference
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_save_run_inference
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_save_tft_process_handler
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_with_gcs_location_with_none_options
apache_beam.ml.transforms.base_test.TestJsonPickleTransformAttributeManager ‑ test_with_same_local_artifact_location
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_input_fn_normal
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_input_fn_on_list_inputs
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_output_fn_normal
apache_beam.ml.transforms.base_test.TestUtilFunctions ‑ test_dict_output_fn_on_list_inputs
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_on_columns_not_exist_in_input_data
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_on_multiple_columns
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_batch_sizes
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_dict_inputs
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_incompatible_datatype
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_inconsistent_keys
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_handler_with_list_data
apache_beam.ml.transforms.base_test.TextEmbeddingHandlerTest ‑ test_no_columns_or_type_adapter
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPIGCSLocationTest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_embeddings_with_inference_api
apache_beam.ml.transforms.embeddings.huggingface_test.HuggingfaceInferenceAPITest ‑ test_get_api_url_and_when_model_name_not_provided
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_inference_args
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_read_artifact_location_0
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_read_artifact_location_1
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_read_artifact_location_2
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_mltransform_to_ptransform_with_sentence_transformer
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_embeddings
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_image_embeddings
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_images_with_str_data_types
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_sentence_transformer_with_int_data_types
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_with_gcs_artifact_location_0
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_with_gcs_artifact_location_1
apache_beam.ml.transforms.embeddings.huggingface_test.SentenceTransformerEmbeddingsTest ‑ test_with_gcs_artifact_location_2
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsGCSArtifactLocationTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_tfhub_text_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubEmbeddingsTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_sentence_transformer_image_embeddings
apache_beam.ml.transforms.embeddings.tensorflow_hub_test.TFHubImageEmbeddingsTest ‑ test_with_str_data_types
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_embeddings_with_read_artifact_location
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_embeddings_with_scale_to_0_1
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_mltransform_to_ptransform_with_vertex
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_vertex_ai_text_embeddings
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_with_gcs_artifact_location
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIEmbeddingsTest ‑ test_with_int_data_types
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIImageEmbeddingsTest ‑ test_improper_dimension
apache_beam.ml.transforms.embeddings.vertex_ai_test.VertexAIImageEmbeddingsTest ‑ test_vertex_ai_image_embedding
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_consume_mode_with_extra_columns_in_the_input
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_handler_with_same_input_elements
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_named_tuple_pcoll_numpy
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_row_type_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_row_type_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_schema_named_tuple_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_input_type_from_schema_named_tuple_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_dtypes
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_numpy_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_primitive_native_container_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_primitive_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_primitive_types_in_containers
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tensorflow_raw_data_metadata_union_type_in_single_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_operation_preprocessing_fn_0
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_operation_preprocessing_fn_1
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_default_transform_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_transformed_data_schema
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_unused_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTest ‑ test_tft_process_handler_verify_artifacts
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_consume_mode_with_extra_columns_in_the_input
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_handler_with_same_input_elements
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_named_tuple_pcoll_numpy
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_row_type_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_row_type_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_schema_named_tuple_pcoll
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_input_type_from_schema_named_tuple_pcoll_list
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_dtypes
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_numpy_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_primitive_native_container_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_primitive_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_primitive_types_in_containers
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tensorflow_raw_data_metadata_union_type_in_single_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_operation_preprocessing_fn_0
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_operation_preprocessing_fn_1
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_default_transform_types
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_transformed_data_schema
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_unused_column
apache_beam.ml.transforms.handlers_test.TFTProcessHandlerTestWithGCSLocation ‑ test_tft_process_handler_verify_artifacts
apache_beam.ml.transforms.tft_test.ApplyBucketsTest ‑ test_apply_buckets_0
apache_beam.ml.transforms.tft_test.ApplyBucketsTest ‑ test_apply_buckets_1
apache_beam.ml.transforms.tft_test.ApplyBucketsWithInterpolationTest ‑ test_apply_buckets_0
apache_beam.ml.transforms.tft_test.ApplyBucketsWithInterpolationTest ‑ test_apply_buckets_1
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_by_splitting_input_text
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_list_seperated_words_custom_ngrams
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_list_seperated_words_default_ngrams
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_bag_of_words_on_numpy_data
apache_beam.ml.transforms.tft_test.BagOfWordsTest ‑ test_count_per_key_on_list
apache_beam.ml.transforms.tft_test.BucketizeTest ‑ test_bucketize
apache_beam.ml.transforms.tft_test.BucketizeTest ‑ test_bucketize_list
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_compute_and_apply_vocabulary
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_compute_and_apply_vocabulary_inputs
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_multiple_columns_with_default_vocab_name
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_multiple_columns_with_vocab_name
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_string_split_with_multiple_delimiters
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_string_split_with_single_delimiter
apache_beam.ml.transforms.tft_test.ComputeAndApplyVocabTest ‑ test_with_basic_example_list
apache_beam.ml.transforms.tft_test.DeduplicateTensorPerRowTest ‑ test_deduplicate
apache_beam.ml.transforms.tft_test.DeduplicateTensorPerRowTest ‑ test_deduplicate_different_output_sizes
apache_beam.ml.transforms.tft_test.DeduplicateTensorPerRowTest ‑ test_deduplicate_no_op
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_multi_bucket_one_string
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_multi_buckets_multi_string
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_one_bucket_multi_string
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_single_bucket
apache_beam.ml.transforms.tft_test.HashStringsTest ‑ test_two_bucket_multi_string
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_ngrams_on_list_separated_words
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_ngrams_on_list_separated_words_default_args
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_with_multiple_string_delimiters
apache_beam.ml.transforms.tft_test.NGramsTest ‑ test_with_string_split_delimiter
apache_beam.ml.transforms.tft_test.ScaleTo01Test ‑ test_ScaleTo01
apache_beam.ml.transforms.tft_test.ScaleTo01Test ‑ test_ScaleTo01_list
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_list_skewed_distribution
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_list_uniform_distribution
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_skewed
apache_beam.ml.transforms.tft_test.ScaleToGaussianTest ‑ test_gaussian_uniform
apache_beam.ml.transforms.tft_test.ScaleToMinMaxTest ‑ test_fail_max_value_less_than_min
apache_beam.ml.transforms.tft_test.ScaleToMinMaxTest ‑ test_scale_to_min_max
apache_beam.ml.transforms.tft_test.ScaleZScoreTest ‑ test_z_score
apache_beam.ml.transforms.tft_test.ScaleZScoreTest ‑ test_z_score_list_data
apache_beam.ml.transforms.tft_test.TFIDIFTest ‑ test_tfidf_compute_vocab_size_during_runtime