Compare commits

..

No commits in common. "main" and "v9.0.1" have entirely different histories.
main ... v9.0.1

6 changed files with 8 additions and 29 deletions

View File

@ -16,4 +16,4 @@ jobs:
deployments: write deployments: write
id-token: write id-token: write
contents: read contents: read
pull-requests: write pull-requests: read

View File

@ -53,8 +53,7 @@ $ conda install -c conda-forge eland
### Compatibility ### Compatibility
- Supports Python 3.9, 3.10, 3.11 and 3.12. - Supports Python 3.9, 3.10, 3.11, 3.12 and Pandas 1.5
- Supports Pandas 1.5 and 2.
- Supports Elasticsearch 8+ clusters, recommended 8.16 or later for all features to work. - Supports Elasticsearch 8+ clusters, recommended 8.16 or later for all features to work.
If you are using the NLP with PyTorch feature make sure your Eland minor version matches the minor If you are using the NLP with PyTorch feature make sure your Eland minor version matches the minor
version of your Elasticsearch cluster. For all other features it is sufficient for the major versions version of your Elasticsearch cluster. For all other features it is sufficient for the major versions

View File

@ -1,6 +1,4 @@
project: 'Eland Python client' project: 'Eland Python client'
products:
- id: elasticsearch-client
cross_links: cross_links:
- docs-content - docs-content
toc: toc:

View File

@ -50,7 +50,10 @@ class Index:
# index_field.setter # index_field.setter
self._is_source_field = False self._is_source_field = False
self.es_index_field = es_index_field # The type:ignore is due to mypy not being smart enough
# to recognize the property.setter has a different type
# than the property.getter.
self.es_index_field = es_index_field # type: ignore
@property @property
def sort_field(self) -> str: def sort_field(self) -> str:

View File

@ -62,10 +62,10 @@ extras = {
"requests<3", "requests<3",
"torch==2.5.1", "torch==2.5.1",
"tqdm", "tqdm",
"sentence-transformers>=5.0.0,<6.0.0", "sentence-transformers>=2.1.0,<=2.7.0",
# sentencepiece is a required dependency for the slow tokenizers # sentencepiece is a required dependency for the slow tokenizers
# https://huggingface.co/transformers/v4.4.2/migration.html#sentencepiece-is-removed-from-the-required-dependencies # https://huggingface.co/transformers/v4.4.2/migration.html#sentencepiece-is-removed-from-the-required-dependencies
"transformers[sentencepiece]>=4.47.0,<4.50.3", "transformers[sentencepiece]>=4.47.0",
], ],
} }
extras["all"] = list({dep for deps in extras.values() for dep in deps}) extras["all"] = list({dep for deps in extras.values() for dep in deps})

View File

@ -65,8 +65,6 @@ TEXT_EMBEDDING_MODELS = [
TEXT_SIMILARITY_MODELS = ["mixedbread-ai/mxbai-rerank-xsmall-v1"] TEXT_SIMILARITY_MODELS = ["mixedbread-ai/mxbai-rerank-xsmall-v1"]
TEXT_EXPANSION_MODELS = ["naver/splade-v3-distilbert"]
@pytest.fixture(scope="function", autouse=True) @pytest.fixture(scope="function", autouse=True)
def setup_and_tear_down(): def setup_and_tear_down():
@ -157,22 +155,3 @@ class TestPytorchModel:
assert result.body["inference_results"][0]["predicted_value"] < 0 assert result.body["inference_results"][0]["predicted_value"] < 0
assert result.body["inference_results"][1]["predicted_value"] > 0 assert result.body["inference_results"][1]["predicted_value"] > 0
@pytest.mark.skipif(ES_VERSION < (9, 0, 0), reason="requires current major version")
@pytest.mark.parametrize("model_id", TEXT_EXPANSION_MODELS)
def test_text_expansion(self, model_id):
with tempfile.TemporaryDirectory() as tmp_dir:
ptm = download_model_and_start_deployment(
tmp_dir, False, model_id, "text_expansion"
)
result = ptm.infer(
docs=[
{
"text_field": "The Amazon rainforest covers most of the Amazon basin in South America"
},
{"text_field": "Paris is the capital of France"},
]
)
assert len(result.body["inference_results"][0]["predicted_value"]) > 0
assert len(result.body["inference_results"][1]["predicted_value"]) > 0