Compare commits

..

No commits in common. "main" and "v8.2.0" have entirely different histories.
main ... v8.2.0

248 changed files with 7575 additions and 14714 deletions

View File

@ -1,11 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
export LC_ALL=en_US.UTF-8
echo "--- Building the Wolfi image"
# Building the linux/arm64 image takes about one hour on Buildkite, which is too slow
docker build --file Dockerfile.wolfi .
echo "--- Building the public image"
docker build .

View File

@ -1,8 +0,0 @@
#!/usr/bin/env bash
docker build --file .buildkite/Dockerfile --tag elastic/eland --build-arg PYTHON_VERSION=${PYTHON_VERSION} .
docker run \
--name doc_build \
--rm \
elastic/eland \
bash -c "apt-get update && apt-get install --yes pandoc && nox -s docs"

View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
docker build --file .buildkite/Dockerfile --tag elastic/eland --build-arg PYTHON_VERSION=${PYTHON_VERSION} .
docker run \
--name linter \
--rm \
elastic/eland \
nox -s lint

View File

@ -1,50 +0,0 @@
steps:
- label: ":terminal: Lint code"
env:
PYTHON_VERSION: 3
agents:
provider: "gcp"
machineType: "n2-standard-2"
commands:
- ./.buildkite/lint-code.sh
- label: ":books: Build documentation"
env:
PYTHON_VERSION: 3.9-bookworm
agents:
provider: "gcp"
machineType: "n2-standard-2"
commands:
- ./.buildkite/build-documentation.sh
- label: ":docker: Build Wolfi image"
env:
PYTHON_VERSION: 3.11-bookworm
agents:
provider: "gcp"
machineType: "n2-standard-2"
commands:
- ./.buildkite/build-docker-images.sh
- label: ":python: {{ matrix.python }} :elasticsearch: {{ matrix.stack }} :pandas: {{ matrix.pandas }}"
agents:
provider: "gcp"
machineType: "n2-standard-4"
env:
PYTHON_VERSION: "{{ matrix.python }}"
PANDAS_VERSION: "{{ matrix.pandas }}"
TEST_SUITE: "xpack"
ELASTICSEARCH_VERSION: "{{ matrix.stack }}"
matrix:
setup:
# Python and pandas versions need to be added to the nox configuration too
# (in the decorators of the test method in noxfile.py)
pandas:
- '1.5.0'
- '2.2.3'
python:
- '3.12'
- '3.11'
- '3.10'
- '3.9'
stack:
- '9.0.0'
- '9.1.0-SNAPSHOT'
command: ./.buildkite/run-tests

View File

@ -1,28 +0,0 @@
{
"jobs": [
{
"enabled": true,
"pipeline_slug": "eland",
"allow_org_users": true,
"allowed_repo_permissions": ["admin", "write"],
"build_on_commit": true,
"build_on_comment": true,
"trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))",
"always_trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))",
"skip_ci_labels": ["skip-ci"],
"skip_ci_on_only_changed": ["\\.md$"]
},
{
"enabled": true,
"pipeline_slug": "docs-build-pr",
"allow_org_users": true,
"allowed_repo_permissions": ["admin", "write"],
"build_on_commit": true,
"build_on_comment": true,
"trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))",
"always_trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))",
"skip_ci_labels": ["skip-ci"],
"skip_ci_on_only_changed": ["\\.md$"]
}
]
}

View File

@ -1,28 +0,0 @@
steps:
- input: "Build parameters"
fields:
- text: "Release version"
key: "RELEASE_VERSION"
default: ""
format: "\\d{1,}.\\d{1,}.\\d{1,}"
hint: "The version to release e.g. '8.10.0' (without the v prefix)."
- select: "Environment"
key: "ENVIRONMENT"
options:
- label: "Staging"
value: "staging"
- label: "Production"
value: "production"
- wait
- label: "Release Docker Artifacts for Eland"
command: |
set -eo pipefail
export RELEASE_VERSION=$(buildkite-agent meta-data get RELEASE_VERSION)
export ENVIRONMENT=$(buildkite-agent meta-data get ENVIRONMENT)
export BUILDKIT_PROGRESS=plain
bash .buildkite/release-docker/run.sh
# Run on GCP to use `docker`
agents:
provider: gcp

View File

@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
export LC_ALL=en_US.UTF-8
echo "Publishing Eland $RELEASE_VERSION Docker image to $ENVIRONMENT"
set +x
# login to docker registry
docker_registry=$(vault read -field registry "secret/ci/elastic-eland/container-library/eland-$ENVIRONMENT")
docker_username=$(vault read -field username "secret/ci/elastic-eland/container-library/eland-$ENVIRONMENT")
docker_password=$(vault read -field password "secret/ci/elastic-eland/container-library/eland-$ENVIRONMENT")
echo "$docker_password" | docker login "$docker_registry" --username "$docker_username" --password-stdin
unset docker_username docker_password
set -x
tmp_dir=$(mktemp --directory)
pushd "$tmp_dir"
git clone https://github.com/elastic/eland
pushd eland
git checkout "v${RELEASE_VERSION}"
git --no-pager show
# Create builder that supports QEMU emulation (needed for linux/arm64)
docker buildx rm --force eland-multiarch-builder || true
docker buildx create --name eland-multiarch-builder --bootstrap --use
docker buildx build --push \
--file Dockerfile.wolfi \
--tag "$docker_registry/eland/eland:$RELEASE_VERSION" \
--tag "$docker_registry/eland/eland:latest" \
--platform linux/amd64,linux/arm64 \
"$PWD"
popd
popd
rm -rf "$tmp_dir"

View File

@ -1,8 +1,6 @@
ARG PYTHON_VERSION=3.9
FROM python:${PYTHON_VERSION}
ENV FORCE_COLOR=1
WORKDIR /code/eland
RUN python -m pip install nox

82
.ci/jobs/defaults.yml Executable file
View File

@ -0,0 +1,82 @@
---
##### GLOBAL METADATA
- meta:
cluster: clients-ci
##### JOB DEFAULTS
- job:
project-type: matrix
logrotate:
daysToKeep: 30
numToKeep: 100
parameters:
- string:
name: branch_specifier
default: refs/heads/main
description: the Git branch specifier to build (<branchName>, <tagName>,
<commitId>, etc.)
properties:
- github:
url: https://github.com/elastic/eland
- inject:
properties-content: HOME=$JENKINS_HOME
concurrent: true
node: flyweight
scm:
- git:
name: origin
credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba
reference-repo: /var/lib/jenkins/.git-references/eland.git
branches:
- ${branch_specifier}
url: git@github.com:elastic/eland.git
basedir: ''
wipe-workspace: 'True'
triggers:
- github
axes:
- axis:
type: slave
name: label
values:
- linux
- axis:
type: yaml
filename: .ci/test-matrix.yml
name: ELASTICSEARCH_VERSION
- axis:
type: yaml
filename: .ci/test-matrix.yml
name: PYTHON_VERSION
- axis:
type: yaml
filename: .ci/test-matrix.yml
name: PANDAS_VERSION
- axis:
type: yaml
filename: .ci/test-matrix.yml
name: TEST_SUITE
yaml-strategy:
exclude-key: exclude
filename: .ci/test-matrix.yml
wrappers:
- ansicolor
- timeout:
type: absolute
timeout: 120
fail: true
- timestamps
- workspace-cleanup
builders:
- shell: |-
#!/usr/local/bin/runbld
.ci/run-tests
publishers:
- email:
recipients: build-lang-clients@elastic.co
- junit:
results: "build/output/*-junit.xml"
allow-empty-results: true

14
.ci/jobs/elastic+eland+7.x.yml Executable file
View File

@ -0,0 +1,14 @@
---
- job:
name: elastic+eland+7.x
display-name: 'elastic / eland # 7.x'
description: Eland is a data science client with a Pandas-like interface
junit_results: "*-junit.xml"
parameters:
- string:
name: branch_specifier
default: refs/heads/7.x
description: The Git branch specifier to build
triggers:
- github
- timed: '@daily'

14
.ci/jobs/elastic+eland+main.yml Executable file
View File

@ -0,0 +1,14 @@
---
- job:
name: elastic+eland+main
display-name: 'elastic / eland # main'
description: Eland is a data science client with a Pandas-like interface
junit_results: "*-junit.xml"
parameters:
- string:
name: branch_specifier
default: refs/heads/main
description: The Git branch specifier to build
triggers:
- github
- timed: '@daily'

View File

@ -0,0 +1,19 @@
---
- job:
name: elastic+eland+pull-request
display-name: 'elastic / eland # pull-request'
description: Testing of eland pull requests.
scm:
- git:
branches:
- ${ghprbActualCommit}
refspec: +refs/pull/*:refs/remotes/origin/pr/*
triggers:
- github-pull-request:
org-list:
- elastic
allow-whitelist-orgs-as-admins: true
github-hooks: true
status-context: clients-ci
cancel-builds-on-update: true
publishers: []

View File

@ -16,12 +16,7 @@ fi
set -euxo pipefail
# realpath on MacOS use different flags than on Linux
if [[ "$OSTYPE" == "darwin"* ]]; then
SCRIPT_PATH=$(dirname $(realpath $0))
else
SCRIPT_PATH=$(dirname $(realpath -s $0))
fi
moniker=$(echo "$ELASTICSEARCH_VERSION" | tr -C "[:alnum:]" '-')
suffix=rest-test
@ -42,11 +37,6 @@ NETWORK_NAME=${NETWORK_NAME-"$network_default"}
set +x
# Set vm.max_map_count kernel setting to 262144 if we're in CI
if [[ "$BUILDKITE" == "true" ]]; then
sudo sysctl -w vm.max_map_count=262144
fi
function cleanup_volume {
if [[ "$(docker volume ls -q -f name=$1)" ]]; then
echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m"
@ -116,12 +106,6 @@ environment=($(cat <<-END
--env node.attr.testattr=test
--env path.repo=/tmp
--env repositories.url.allowed_urls=http://snapshot.test*
--env ELASTIC_PASSWORD=$ELASTIC_PASSWORD
--env xpack.license.self_generated.type=trial
--env xpack.security.enabled=false
--env xpack.security.http.ssl.enabled=false
--env xpack.security.transport.ssl.enabled=false
--env xpack.ml.max_machine_memory_percent=90
END
))
@ -130,14 +114,29 @@ volumes=($(cat <<-END
END
))
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
environment+=($(cat <<-END
--env ELASTIC_PASSWORD=$ELASTIC_PASSWORD
--env xpack.license.self_generated.type=trial
--env xpack.security.enabled=false
--env xpack.security.http.ssl.enabled=false
--env xpack.security.transport.ssl.enabled=false
--env xpack.ml.max_machine_memory_percent=90
END
))
fi
url="http://$NODE_NAME"
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
url="http://elastic:$ELASTIC_PASSWORD@$NODE_NAME"
fi
# Pull the container, retry on failures up to 5 times with
# short delays between each attempt. Fixes most transient network errors.
docker_pull_attempts=0
until [ "$docker_pull_attempts" -ge 5 ]
do
docker pull docker.elastic.co/elasticsearch/$ELASTICSEARCH_VERSION && break
docker pull docker.elastic.co/elasticsearch/"$ELASTICSEARCH_VERSION" && break
docker_pull_attempts=$((docker_pull_attempts+1))
sleep 10
done
@ -147,7 +146,7 @@ set -x
docker run \
--name "$NODE_NAME" \
--network "$NETWORK_NAME" \
--env ES_JAVA_OPTS=-"Xms2g -Xmx2g" \
--env ES_JAVA_OPTS=-"Xms1g -Xmx1g" \
"${environment[@]}" \
"${volumes[@]}" \
--publish "$HTTP_PORT":9200 \

View File

@ -12,7 +12,7 @@
# When run in CI the test-matrix is used to define additional variables
# TEST_SUITE -- `xpack`
# TEST_SUITE -- either `oss` or `xpack`, defaults to `oss` in `run-tests`
#
PYTHON_VERSION=${PYTHON_VERSION-3.8}
@ -25,7 +25,7 @@ echo -e "\033[34;1mINFO:\033[0m PANDAS_VERSION ${PANDAS_VERSION}\033[0m"
echo -e "\033[1m>>>>> Build [elastic/eland container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m"
docker build --file .buildkite/Dockerfile --tag elastic/eland --build-arg PYTHON_VERSION=${PYTHON_VERSION} .
docker build --file .ci/Dockerfile --tag elastic/eland --build-arg PYTHON_VERSION=${PYTHON_VERSION} .
echo -e "\033[1m>>>>> Run [elastic/eland container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m"

View File

@ -9,9 +9,11 @@ if [[ -z $ELASTICSEARCH_VERSION ]]; then
fi
set -euxo pipefail
TEST_SUITE=${TEST_SUITE-xpack}
NODE_NAME=localhost
PANDAS_VERSION=${PANDAS_VERSION-1.5.0}
PANDAS_VERSION=${PANDAS_VERSION-1.3.0}
elasticsearch_image=elasticsearch
elasticsearch_url=http://elastic:changeme@${NODE_NAME}:9200
@ -27,7 +29,7 @@ function cleanup {
NODE_NAME=${NODE_NAME} \
NETWORK_NAME=elasticsearch \
CLEANUP=true \
bash ./.buildkite/run-elasticsearch.sh
bash ./.ci/run-elasticsearch.sh
# Report status and exit
if [[ "$status" == "0" ]]; then
echo -e "\n\033[32;1mSUCCESS run-tests\033[0m"
@ -39,15 +41,15 @@ function cleanup {
}
trap cleanup EXIT
echo "--- :elasticsearch: Starting Elasticsearch"
echo -e "\033[1m>>>>> Start [$ELASTICSEARCH_VERSION container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m"
ELASTICSEARCH_VERSION=${elasticsearch_image}:${ELASTICSEARCH_VERSION} \
NODE_NAME=${NODE_NAME} \
NETWORK_NAME=host \
DETACH=true \
bash .buildkite/run-elasticsearch.sh
bash .ci/run-elasticsearch.sh
echo "+++ :python: Run tests"
echo -e "\033[1m>>>>> Repository specific tests >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m"
ELASTICSEARCH_CONTAINER=${elasticsearch_image}:${ELASTICSEARCH_VERSION} \
NETWORK_NAME=host \
@ -55,4 +57,5 @@ ELASTICSEARCH_CONTAINER=${elasticsearch_image}:${ELASTICSEARCH_VERSION} \
ELASTICSEARCH_URL=${elasticsearch_url} \
TEST_SUITE=${TEST_SUITE} \
PANDAS_VERSION=${PANDAS_VERSION} \
bash .buildkite/run-repository.sh
bash .ci/run-repository.sh

20
.ci/test-matrix.yml Executable file
View File

@ -0,0 +1,20 @@
---
ELASTICSEARCH_VERSION:
- '8.1.0-SNAPSHOT'
- '8.0.0-SNAPSHOT'
PANDAS_VERSION:
- '1.2.0'
- '1.3.0'
PYTHON_VERSION:
- '3.10'
- '3.9'
- '3.8'
- '3.7'
TEST_SUITE:
- xpack
exclude: ~

View File

@ -1,62 +1,4 @@
# docs and example
docs/*
example/*
# Git
.git
# Nox
.nox
# Compiled python modules.
*.pyc
__pycache__/
# Setuptools distribution folder.
dist/
# Build folder
build/
# pytest results
tests/dataframe/results/*csv
result_images/
# Python egg metadata, regenerated from source files by setuptools.
/*.egg-info
eland.egg-info/
# PyCharm files
.idea/
# vscode files
.vscode/
# pytest files
.pytest_cache/
# Ignore MacOSX files
.DS_Store
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# Environments
.env
.venv
.nox
env/
venv/
ENV/
env.bak/
venv.bak/
.mypy_cache
# Coverage
.coverage

View File

@ -1,26 +0,0 @@
name: Backport
on:
pull_request_target:
types:
- closed
- labeled
jobs:
backport:
name: Backport
runs-on: ubuntu-latest
# Only react to merged PRs for security reasons.
# See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target.
if: >
github.event.pull_request.merged
&& (
github.event.action == 'closed'
|| (
github.event.action == 'labeled'
&& contains(github.event.label.name, 'backport')
)
)
steps:
- uses: tibdex/backport@9565281eda0731b1d20c4025c43339fb0a23812e # v2.0.4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

38
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: CI
on: [push, pull_request]
defaults:
run:
shell: bash
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v2
- name: Set up Python 3
uses: actions/setup-python@v2
with:
python-version: 3
- name: Install dependencies
run: python3 -m pip install nox
- name: Lint the code
run: nox -s lint
docs:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v2
- name: Set up Python 3
uses: actions/setup-python@v2
with:
python-version: 3
- name: Install dependencies
run: |
sudo apt-get install --yes pandoc
python3 -m pip install nox
- name: Build documentation
run: nox -s docs

View File

@ -1,19 +0,0 @@
name: docs-build
on:
push:
branches:
- main
pull_request_target: ~
merge_group: ~
jobs:
docs-preview:
uses: elastic/docs-builder/.github/workflows/preview-build.yml@main
with:
path-pattern: docs/**
permissions:
deployments: write
id-token: write
contents: read
pull-requests: write

View File

@ -1,14 +0,0 @@
name: docs-cleanup
on:
pull_request_target:
types:
- closed
jobs:
docs-preview:
uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main
permissions:
contents: none
id-token: write
deployments: write

View File

@ -1,14 +0,0 @@
version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
python:
install:
- path: .
- requirements: docs/requirements-docs.txt
sphinx:
configuration: docs/sphinx/conf.py

View File

@ -2,331 +2,6 @@
Changelog
=========
9.0.1 (2025-04-30)
------------------
* Forbid Elasticsearch 8 client or server (`#780 <https://github.com/elastic/eland/pull/780>`_)
* Fix DeBERTa tokenization (`#769 <https://github.com/elastic/eland/pull/769>`_)
* Upgrade PyTorch to 2.5.1 (`#785 <https://github.com/elastic/eland/pull/785>`_)
* Upgrade LightGBM to 4.6.0 (`#782 <https://github.com/elastic/eland/pull/782>`_)
9.0.0 (2025-04-15)
------------------
* Drop Python 3.8, Support Python 3.12 (`#743 <https://github.com/elastic/eland/pull/743>`_)
* Support Pandas 2 (`#742 <https://github.com/elastic/eland/pull/742>`_)
* Upgrade transformers to 4.47 (`#752 <https://github.com/elastic/eland/pull/752>`_)
* Remove ML model export as sklearn Pipeline (`#744 <https://github.com/elastic/eland/pull/744>`_)
* Allow scikit-learn 1.5 (`#729 <https://github.com/elastic/eland/pull/729>`_)
* Migrate docs from AsciiDoc to Markdown (`#762 <https://github.com/elastic/eland/pull/762>`_)
8.17.0 (2025-01-07)
-------------------
* Support sparse embedding models such as SPLADE-v3-DistilBERT (`#740 <https://github.com/elastic/eland/pull/740>`_)
8.16.0 (2024-11-13)
-------------------
* Add deprecation warning for ESGradientBoostingModel subclasses (`#738 <https://github.com/elastic/eland/pull/738>`_)
8.15.4 (2024-10-17)
-------------------
* Revert "Allow reading Elasticsearch certs in Wolfi image" (`#734 <https://github.com/elastic/eland/pull/734>`_)
8.15.3 (2024-10-09)
-------------------
* Added support for DeBERTa-V2 tokenizer (`#717 <https://github.com/elastic/eland/pull/717>`_)
* Fixed ``--ca-cert`` with a shared Elasticsearch Docker volume (`#732 <https://github.com/elastic/eland/pull/732>`_)
8.15.2 (2024-10-02)
-------------------
* Fixed Docker image build (`#728 <https://github.com/elastic/eland/pull/728>`_)
8.15.1 (2024-10-01)
-------------------
* Upgraded PyTorch to version 2.3.1, which is compatible with Elasticsearch 8.15.2 or above (`#718 <https://github.com/elastic/eland/pull/718>`_)
* Migrated to distroless Wolfi base Docker image (`#720 <https://github.com/elastic/eland/pull/720>`_)
8.15.0 (2024-08-12)
-------------------
* Added a default truncation of ``second`` for text similarity (`#713 <https://github.com/elastic/eland/pull/713>`_)
* Added note about using text_similarity for rerank in the CLI (`#716 <https://github.com/elastic/eland/pull/716>`_)
* Added support for lists in result hits (`#707 <https://github.com/elastic/eland/pull/707>`_)
* Removed input fields from exported LTR models (`#708 <https://github.com/elastic/eland/pull/708>`_)
8.14.0 (2024-06-10)
-------------------
Added
^^^^^
* Added Elasticsearch Serverless support in DataFrames (`#690`_, contributed by `@AshokChoudhary11`_) and eland_import_hub_model (`#698`_)
Fixed
^^^^^
* Fixed Python 3.8 support (`#695`_, contributed by `@bartbroere`_)
* Fixed non _source fields missing from the results hits (`#693`_, contributed by `@bartbroere`_)
.. _@AshokChoudhary11: https://github.com/AshokChoudhary11
.. _#690: https://github.com/elastic/eland/pull/690
.. _#693: https://github.com/elastic/eland/pull/693
.. _#695: https://github.com/elastic/eland/pull/695
.. _#698: https://github.com/elastic/eland/pull/698
8.13.1 (2024-05-03)
-------------------
Added
^^^^^
* Added support for HTTP proxies in eland_import_hub_model (`#688`_)
.. _#688: https://github.com/elastic/eland/pull/688
8.13.0 (2024-03-27)
-------------------
Added
^^^^^
* Added support for Python 3.11 (`#681`_)
* Added ``eland.DataFrame.to_json`` function (`#661`_, contributed by `@bartbroere`_)
* Added override option to specify the model's max input size (`#674`_)
Changed
^^^^^^^
* Upgraded torch to 2.1.2 (`#671`_)
* Mirrored pandas' ``lineterminator`` instead of ``line_terminator`` in ``to_csv`` (`#595`_, contributed by `@bartbroere`_)
.. _#595: https://github.com/elastic/eland/pull/595
.. _#661: https://github.com/elastic/eland/pull/661
.. _#671: https://github.com/elastic/eland/pull/671
.. _#674: https://github.com/elastic/eland/pull/674
.. _#681: https://github.com/elastic/eland/pull/681
8.12.1 (2024-01-30)
-------------------
Fixed
^^^^^
* Fix missing value support for XGBRanker (`#654`_)
.. _#654: https://github.com/elastic/eland/pull/654
8.12.0 (2024-01-18)
-------------------
Added
^^^^^
* Supported XGBRanker model (`#649`_)
* Accepted LTR (Learning to rank) model config when importing model (`#645`_, `#651`_)
* Added LTR feature logger (`#648`_)
* Added ``prefix_string`` config option to the import model hub script (`#642`_)
* Made online retail analysis notebook runnable in Colab (`#641`_)
* Added new movie dataset to the tests (`#646`_)
.. _#641: https://github.com/elastic/eland/pull/641
.. _#642: https://github.com/elastic/eland/pull/642
.. _#645: https://github.com/elastic/eland/pull/645
.. _#646: https://github.com/elastic/eland/pull/646
.. _#648: https://github.com/elastic/eland/pull/648
.. _#649: https://github.com/elastic/eland/pull/649
.. _#651: https://github.com/elastic/eland/pull/651
8.11.1 (2023-11-22)
-------------------
Added
^^^^^
* Make demo notebook runnable in Colab (`#630`_)
Changed
^^^^^^^
* Bump Shap version to 0.43 (`#636`_)
Fixed
^^^^^
* Fix failed import of Sentence Transformer RoBERTa models (`#637`_)
.. _#630: https://github.com/elastic/eland/pull/630
.. _#636: https://github.com/elastic/eland/pull/636
.. _#637: https://github.com/elastic/eland/pull/637
8.11.0 (2023-11-08)
-------------------
Added
^^^^^
* Support E5 small multilingual model (`#625`_)
Changed
^^^^^^^
* Stream writes in ``ed.DataFrame.to_csv()`` (`#579`_)
* Improve memory estimation for NLP models (`#568`_)
Fixed
^^^^^
* Fixed deprecations in preparation of Pandas 2.0 support (`#602`_, `#603`_, contributed by `@bartbroere`_)
.. _#568: https://github.com/elastic/eland/pull/568
.. _#579: https://github.com/elastic/eland/pull/579
.. _#602: https://github.com/elastic/eland/pull/602
.. _#603: https://github.com/elastic/eland/pull/603
.. _#625: https://github.com/elastic/eland/pull/625
8.10.1 (2023-10-11)
-------------------
Fixed
^^^^^
* Fixed direct usage of TransformerModel (`#619`_)
.. _#619: https://github.com/elastic/eland/pull/619
8.10.0 (2023-10-09)
-------------------
Added
^^^^^
* Published pre-built Docker images to docker.elastic.co/eland/eland (`#613`_)
* Allowed importing private HuggingFace models (`#608`_)
* Added Apple Silicon (arm64) support to Docker image (`#615`_)
* Allowed importing some DPR models like ance-dpr-context-multi (`#573`_)
* Allowed using the Pandas API without monitoring/main permissions (`#581`_)
Changed
^^^^^^^
* Updated Docker image to Debian 12 Bookworm (`#613`_)
* Reduced Docker image size by not installing unused PyTorch GPU support on amd64 (`#615`_)
* Reduced model chunk size to 1MB (`#605`_)
Fixed
^^^^^
* Fixed deprecations in preparation of Pandas 2.0 support (`#593`_, `#596`_, contributed by `@bartbroere`_)
.. _@bartbroere: https://github.com/bartbroere
.. _#613: https://github.com/elastic/eland/pull/613
.. _#608: https://github.com/elastic/eland/pull/608
.. _#615: https://github.com/elastic/eland/pull/615
.. _#573: https://github.com/elastic/eland/pull/573
.. _#581: https://github.com/elastic/eland/pull/581
.. _#605: https://github.com/elastic/eland/pull/605
.. _#593: https://github.com/elastic/eland/pull/593
.. _#596: https://github.com/elastic/eland/pull/596
8.9.0 (2023-08-24)
------------------
Added
^^^^^
* Simplify embedding model support and loading (`#569`_)
* Make eland_import_hub_model easier to find on Windows (`#559`_)
* Update trained model inference endpoint (`#556`_)
* Add BertJapaneseTokenizer support with bert_ja tokenization configuration (`#534`_)
* Add ability to upload xlm-roberta tokenized models (`#518`_)
* Tolerate different model output formats when measuring embedding size (`#535`_)
* Generate valid NLP model id from file path (`#541`_)
* Upgrade torch to 1.13.1 and check the cluster version before uploading a NLP model (`#522`_)
* Set embedding_size config parameter for Text Embedding models (`#532`_)
* Add support for the pass_through task (`#526`_)
Fixed
^^^^^
* Fixed black to comply with the code style (`#557`_)
* Fixed No module named 'torch' (`#553`_)
* Fix autosummary directive by removing hack autosummaries (`#548`_)
* Prevent TypeError with None check (`#525`_)
.. _#518: https://github.com/elastic/eland/pull/518
.. _#522: https://github.com/elastic/eland/pull/522
.. _#525: https://github.com/elastic/eland/pull/525
.. _#526: https://github.com/elastic/eland/pull/526
.. _#532: https://github.com/elastic/eland/pull/532
.. _#534: https://github.com/elastic/eland/pull/534
.. _#535: https://github.com/elastic/eland/pull/535
.. _#541: https://github.com/elastic/eland/pull/541
.. _#548: https://github.com/elastic/eland/pull/548
.. _#553: https://github.com/elastic/eland/pull/553
.. _#556: https://github.com/elastic/eland/pull/556
.. _#557: https://github.com/elastic/eland/pull/557
.. _#559: https://github.com/elastic/eland/pull/559
.. _#569: https://github.com/elastic/eland/pull/569
8.7.0 (2023-03-30)
------------------
Added
^^^^^
* Added a new NLP model task type "text_similarity" (`#486`_)
* Added a new NLP model task type "text_expansion" (`#520`_)
* Added support for exporting an Elastic ML model as a scikit-learn pipeline via ``MLModel.export_model()`` (`#509`_)
Fixed
^^^^^
* Fixed an issue that occurred when LightGBM was installed but libomp wasn't installed on the system. (`#499`_)
.. _#486: https://github.com/elastic/eland/pull/486
.. _#499: https://github.com/elastic/eland/pull/499
.. _#509: https://github.com/elastic/eland/pull/509
.. _#520: https://github.com/elastic/eland/pull/520
8.3.0 (2022-07-11)
------------------
Added
^^^^^
* Added a new NLP model task type "auto" which infers the task type based on model configuration and architecture (`#475`_)
Changed
^^^^^^^
* Changed required version of 'torch' package to `>=1.11.0,<1.12` to match required PyTorch version for Elasticsearch 8.3 (was `>=1.9.0,<2`) (`#479`_)
* Changed the default value of the `--task-type` parameter for the `eland_import_hub_model` CLI to be "auto" (`#475`_)
Fixed
^^^^^
* Fixed decision tree classifier serialization to account for probabilities (`#465`_)
* Fixed PyTorch model quantization (`#472`_)
.. _#465: https://github.com/elastic/eland/pull/465
.. _#472: https://github.com/elastic/eland/pull/472
.. _#475: https://github.com/elastic/eland/pull/475
.. _#479: https://github.com/elastic/eland/pull/479
8.2.0 (2022-05-09)
------------------

View File

@ -78,15 +78,9 @@ Once your changes and tests are ready to submit for review:
# Run Auto-format, lint, mypy type checker for your changes
$ nox -s format
# Launch Elasticsearch with a trial licence and ML enabled
$ docker run --name elasticsearch -p 9200:9200 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.license.self_generated.type=trial" docker.elastic.co/elasticsearch/elasticsearch:9.0.0
# See all test suites
$ nox -l
# Run a specific test suite
$ nox -rs "test-3.12(pandas_version='2.2.3')"
# Run a specific test
$ nox -rs "test-3.12(pandas_version='2.2.3')" -- -k test_learning_to_rank
# Run the test suite
$ pytest --doctest-modules eland/ tests/
$ pytest --nbval tests/notebook/
```
@ -175,7 +169,7 @@ currently using a minimum version of PyCharm 2019.2.4.
* Setup Elasticsearch instance with docker
``` bash
> ELASTICSEARCH_VERSION=elasticsearch:8.17.0 BUILDKITE=false .buildkite/run-elasticsearch.sh
> ELASTICSEARCH_VERSION=elasticsearch:7.x-SNAPSHOT .ci/run-elasticsearch.sh
```
* Now check `http://localhost:9200`
@ -197,7 +191,7 @@ currently using a minimum version of PyCharm 2019.2.4.
``` bash
> import eland as ed
> ed_df = ed.DataFrame('http://localhost:9200', 'flights')
> ed_df = ed.DataFrame('localhost', 'flights')
```
* To run the automatic formatter and check for lint issues run
@ -209,7 +203,7 @@ currently using a minimum version of PyCharm 2019.2.4.
* To test specific versions of Python run
``` bash
> nox -s test-3.12
> nox -s test-3.8
```
### Documentation

View File

@ -1,28 +1,14 @@
# syntax=docker/dockerfile:1
FROM python:3.10-slim
FROM debian:11.1
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
build-essential \
pkg-config \
cmake \
libzip-dev \
libjpeg-dev
RUN apt-get update && \
apt-get install -y build-essential pkg-config cmake \
python3-dev python3-pip python3-venv \
libzip-dev libjpeg-dev && \
apt-get clean
ADD . /eland
WORKDIR /eland
ARG TARGETPLATFORM
RUN --mount=type=cache,target=/root/.cache/pip \
if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
python3 -m pip install \
--no-cache-dir --disable-pip-version-check --extra-index-url https://download.pytorch.org/whl/cpu \
torch==2.5.1+cpu .[all]; \
else \
python3 -m pip install \
--no-cache-dir --disable-pip-version-check \
.[all]; \
fi
RUN python3 -m pip install --no-cache-dir --disable-pip-version-check .[all]
CMD ["/bin/sh"]

View File

@ -1,42 +0,0 @@
# syntax=docker/dockerfile:1
FROM docker.elastic.co/wolfi/python:3.10-dev AS builder
WORKDIR /eland
ENV VIRTUAL_ENV=/eland/venv
RUN python3 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ADD . /eland
ARG TARGETPLATFORM
RUN --mount=type=cache,target=/root/.cache/pip \
if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
python3 -m pip install \
--no-cache-dir --disable-pip-version-check --extra-index-url https://download.pytorch.org/whl/cpu \
torch==2.5.1+cpu .[all]; \
else \
python3 -m pip install \
--no-cache-dir --disable-pip-version-check \
.[all]; \
fi
FROM docker.elastic.co/wolfi/python:3.10
WORKDIR /eland
ENV VIRTUAL_ENV=/eland/venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
COPY --from=builder /eland /eland
# The eland_import_hub_model script is intended to be executed by a shell,
# which will see its shebang line and then execute it with the Python
# interpreter of the virtual environment. We want to keep this behavior even
# with Wolfi so that users can use the image as before. To do that, we use two
# tricks:
#
# * copy /bin/sh (that is, busybox's ash) from the builder image
# * revert to Docker's the default entrypoint, which is the only way to pass
# parameters to `eland_import_hub_model` without needing quotes.
#
COPY --from=builder /bin/sh /bin/sh
ENTRYPOINT []

View File

@ -50,6 +50,3 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--
This product contains an adapted version of the "us-national-parks" dataset, https://data.world/kevinnayar/us-national-parks, by Kevin Nayar, https://data.world/kevinnayar, is licensed under CC BY, https://creativecommons.org/licenses/by/4.0/legalcode

View File

@ -9,10 +9,11 @@
<a href="https://pypi.org/project/eland"><img src="https://img.shields.io/pypi/v/eland.svg" alt="PyPI Version"></a>
<a href="https://anaconda.org/conda-forge/eland"><img src="https://img.shields.io/conda/vn/conda-forge/eland"
alt="Conda Version"></a>
<a href="https://pepy.tech/project/eland"><img src="https://static.pepy.tech/badge/eland" alt="Downloads"></a>
<a href="https://pepy.tech/project/eland"><img src="https://pepy.tech/badge/eland" alt="Downloads"></a>
<a href="https://pypi.org/project/eland"><img src="https://img.shields.io/pypi/status/eland.svg"
alt="Package Status"></a>
<a href="https://buildkite.com/elastic/eland"><img src="https://badge.buildkite.com/d92340e800bc06a7c7c02a71b8d42fcb958bd18c25f99fe2d9.svg" alt="Build Status"></a>
<a href="https://clients-ci.elastic.co/job/elastic+eland+main"><img
src="https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Beland%2Bmain" alt="Build Status"></a>
<a href="https://github.com/elastic/eland/blob/main/LICENSE.txt"><img src="https://img.shields.io/pypi/l/eland.svg"
alt="License"></a>
<a href="https://eland.readthedocs.io"><img
@ -40,11 +41,6 @@ Eland can be installed from [PyPI](https://pypi.org/project/eland) with Pip:
$ python -m pip install eland
```
If using Eland to upload NLP models to Elasticsearch install the PyTorch extras:
```bash
$ python -m pip install 'eland[pytorch]'
```
Eland can also be installed from [Conda Forge](https://anaconda.org/conda-forge/eland) with Conda:
```bash
@ -53,15 +49,9 @@ $ conda install -c conda-forge eland
### Compatibility
- Supports Python 3.9, 3.10, 3.11 and 3.12.
- Supports Pandas 1.5 and 2.
- Supports Elasticsearch 8+ clusters, recommended 8.16 or later for all features to work.
If you are using the NLP with PyTorch feature make sure your Eland minor version matches the minor
version of your Elasticsearch cluster. For all other features it is sufficient for the major versions
to match.
- You need to install the appropriate version of PyTorch to import an NLP model. Run `python -m pip
install 'eland[pytorch]'` to install that version.
- Supports Python 3.7+ and Pandas 1.3
- Supports Elasticsearch clusters that are 7.11+, recommended 7.14 or later for all features to work.
Make sure your Eland major version matches the major version of your Elasticsearch cluster.
### Prerequisites
@ -79,23 +69,29 @@ specifying different package names.
### Docker
If you want to use Eland without installing it just to run the available scripts, use the Docker
image.
It can be used interactively:
Users wishing to use Eland without installing it, in order to just run the available scripts, can build the Docker
container:
```bash
$ docker run -it --rm --network host docker.elastic.co/eland/eland
$ docker build -t elastic/eland .
```
The container can now be used interactively:
```bash
$ docker run -it --rm --network host elastic/eland
```
Running installed scripts is also possible without an interactive shell, e.g.:
```bash
$ docker run -it --rm --network host \
docker.elastic.co/eland/eland \
elastic/eland \
eland_import_hub_model \
--url http://host.docker.internal:9200/ \
--hub-model-id elastic/distilbert-base-cased-finetuned-conll03-english \
--task-type ner
--task-type ner \
--start
```
### Connecting to Elasticsearch
@ -109,15 +105,15 @@ or a string containing the host to connect to:
```python
import eland as ed
# Connecting to an Elasticsearch instance running on 'http://localhost:9200'
df = ed.DataFrame("http://localhost:9200", es_index_pattern="flights")
# Connecting to an Elasticsearch instance running on 'localhost:9200'
df = ed.DataFrame("localhost:9200", es_index_pattern="flights")
# Connecting to an Elastic Cloud instance
from elasticsearch import Elasticsearch
es = Elasticsearch(
cloud_id="cluster-name:...",
basic_auth=("elastic", "<password>")
http_auth=("elastic", "<password>")
)
df = ed.DataFrame(es, es_index_pattern="flights")
```
@ -138,7 +134,7 @@ without overloading your machine.
>>> import eland as ed
>>> # Connect to 'flights' index via localhost Elasticsearch node
>>> df = ed.DataFrame('http://localhost:9200', 'flights')
>>> df = ed.DataFrame('localhost:9200', 'flights')
# eland.DataFrame instance has the same API as pandas.DataFrame
# except all data is in Elasticsearch. See .info() memory usage.
@ -200,12 +196,10 @@ libraries to be serialized and used as an inference model in Elasticsearch.
➤ [Read more about Machine Learning in Elasticsearch](https://www.elastic.co/guide/en/machine-learning/current/ml-getting-started.html)
```python
>>> from sklearn import datasets
>>> from xgboost import XGBClassifier
>>> from eland.ml import MLModel
# Train and exercise an XGBoost ML model locally
>>> training_data = datasets.make_classification(n_features=5)
>>> xgb_model = XGBClassifier(booster="gbtree")
>>> xgb_model.fit(training_data[0], training_data[1])
@ -214,7 +208,7 @@ libraries to be serialized and used as an inference model in Elasticsearch.
# Import the model into Elasticsearch
>>> es_model = MLModel.import_model(
es_client="http://localhost:9200",
es_client="localhost:9200",
model_id="xgb-classifier",
model=xgb_model,
feature_names=["f0", "f1", "f2", "f3", "f4"],
@ -239,29 +233,14 @@ $ eland_import_hub_model \
--start
```
The example above will automatically start a model deployment. This is a
good shortcut for initial experimentation, but for anything that needs
good throughput you should omit the `--start` argument from the Eland
command line and instead start the model using the ML UI in Kibana.
The `--start` argument will deploy the model with one allocation and one
thread per allocation, which will not offer good performance. When starting
the model deployment using the ML UI in Kibana or the Elasticsearch
[API](https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html)
you will be able to set the threading options to make the best use of your
hardware.
```python
>>> import elasticsearch
>>> from pathlib import Path
>>> from eland.common import es_version
>>> from eland.ml.pytorch import PyTorchModel
>>> from eland.ml.pytorch.transformers import TransformerModel
>>> es = elasticsearch.Elasticsearch("http://elastic:mlqa_admin@localhost:9200")
>>> es_cluster_version = es_version(es)
# Load a Hugging Face transformers model directly from the model hub
>>> tm = TransformerModel(model_id="elastic/distilbert-base-cased-finetuned-conll03-english", task_type="ner", es_version=es_cluster_version)
>>> tm = TransformerModel("elastic/distilbert-base-cased-finetuned-conll03-english", "ner")
Downloading: 100%|██████████| 257/257 [00:00<00:00, 108kB/s]
Downloading: 100%|██████████| 954/954 [00:00<00:00, 372kB/s]
Downloading: 100%|██████████| 208k/208k [00:00<00:00, 668kB/s]
@ -274,6 +253,7 @@ Downloading: 100%|██████████| 249M/249M [00:23<00:00, 11.2MB
>>> model_path, config, vocab_path = tm.save(tmp_path)
# Import model into Elasticsearch
>>> es = elasticsearch.Elasticsearch("http://elastic:mlqa_admin@localhost:9200", timeout=300) # 5 minute timeout
>>> ptm = PyTorchModel(es, tm.elasticsearch_model_id())
>>> ptm.import_model(model_path=model_path, config_path=None, vocab_path=vocab_path, config=config)
100%|██████████| 63/63 [00:12<00:00, 5.02it/s]

224
bin/eland_import_hub_model Executable file
View File

@ -0,0 +1,224 @@
#!/usr/bin/env python
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Copies a model from the Hugging Face model hub into an Elasticsearch cluster.
This will create local cached copies that will be traced (necessary) before
uploading to Elasticsearch. This will also check that the task type is supported
as well as the model and tokenizer types. All necessary configuration is
uploaded along with the model.
"""
import argparse
import logging
import os
import sys
import tempfile
import textwrap
from elastic_transport.client_utils import DEFAULT
from elasticsearch import AuthenticationException, Elasticsearch
MODEL_HUB_URL = "https://huggingface.co"
def get_arg_parser():
parser = argparse.ArgumentParser()
location_args = parser.add_mutually_exclusive_group(required=True)
location_args.add_argument(
"--url",
default=os.environ.get("ES_URL"),
help="An Elasticsearch connection URL, e.g. http://localhost:9200",
)
location_args.add_argument(
"--cloud-id",
default=os.environ.get("CLOUD_ID"),
help="Cloud ID as found in the 'Manage Deployment' page of an Elastic Cloud deployment",
)
parser.add_argument(
"--hub-model-id",
required=True,
help="The model ID in the Hugging Face model hub, "
"e.g. dbmdz/bert-large-cased-finetuned-conll03-english",
)
parser.add_argument(
"--es-model-id",
required=False,
default=None,
help="The model ID to use in Elasticsearch, "
"e.g. bert-large-cased-finetuned-conll03-english."
"When left unspecified, this will be auto-created from the `hub-id`",
)
parser.add_argument(
"-u", "--es-username",
required=False,
default=os.environ.get("ES_USERNAME"),
help="Username for Elasticsearch"
)
parser.add_argument(
"-p", "--es-password",
required=False,
default=os.environ.get("ES_PASSWORD"),
help="Password for the Elasticsearch user specified with -u/--username"
)
parser.add_argument(
"--es-api-key",
required=False,
default=os.environ.get("ES_API_KEY"),
help="API key for Elasticsearch"
)
parser.add_argument(
"--task-type",
required=True,
choices=SUPPORTED_TASK_TYPES,
help="The task type for the model usage.",
)
parser.add_argument(
"--quantize",
action="store_true",
default=False,
help="Quantize the model before uploading. Default: False",
)
parser.add_argument(
"--start",
action="store_true",
default=False,
help="Start the model deployment after uploading. Default: False",
)
parser.add_argument(
"--clear-previous",
action="store_true",
default=False,
help="Should the model previously stored with `es-model-id` be deleted"
)
parser.add_argument(
"--insecure",
action="store_false",
default=True,
help="Do not verify SSL certificates"
)
parser.add_argument(
"--ca-certs",
required=False,
default=DEFAULT,
help="Path to CA bundle"
)
return parser
def get_es_client(cli_args):
try:
es_args = {
'request_timeout': 300,
'verify_certs': cli_args.insecure,
'ca_certs': cli_args.ca_certs
}
# Deployment location
if cli_args.url:
es_args['hosts'] = cli_args.url
if cli_args.cloud_id:
es_args['cloud_id'] = cli_args.cloud_id
# Authentication
if cli_args.es_api_key:
es_args['api_key'] = cli_args.es_api_key
elif cli_args.es_username:
if not cli_args.es_password:
logging.error(f"Password for user {cli_args.es_username} was not specified.")
exit(1)
es_args['basic_auth'] = (cli_args.es_username, cli_args.es_password)
es_client = Elasticsearch(**es_args)
es_info = es_client.info()
logger.info(f"Connected to cluster named '{es_info['cluster_name']}' (version: {es_info['version']['number']})")
return es_client
except AuthenticationException as e:
logger.error(e)
exit(1)
if __name__ == "__main__":
# Configure logging
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
from eland.ml.pytorch import PyTorchModel
from eland.ml.pytorch.transformers import SUPPORTED_TASK_TYPES, TransformerModel
except ModuleNotFoundError as e:
logger.error(textwrap.dedent(f"""\
\033[31mFailed to run because module '{e.name}' is not available.\033[0m
This script requires PyTorch extras to run. You can install these by running:
\033[1m{sys.executable} -m pip install 'eland[pytorch]'
\033[0m"""))
exit(1)
# Parse arguments
args = get_arg_parser().parse_args()
# Connect to ES
logger.info("Establishing connection to Elasticsearch")
es = get_es_client(args)
# Trace and save model, then upload it from temp file
with tempfile.TemporaryDirectory() as tmp_dir:
logger.info(f"Loading HuggingFace transformer tokenizer and model '{args.hub_model_id}'")
tm = TransformerModel(args.hub_model_id, args.task_type, args.quantize)
model_path, config, vocab_path = tm.save(tmp_dir)
ptm = PyTorchModel(es, args.es_model_id if args.es_model_id else tm.elasticsearch_model_id())
model_exists = es.options(ignore_status=404).ml.get_trained_models(model_id=ptm.model_id).meta.status == 200
if model_exists:
if args.clear_previous:
logger.info(f"Stopping deployment for model with id '{ptm.model_id}'")
ptm.stop()
logger.info(f"Deleting model with id '{ptm.model_id}'")
ptm.delete()
else:
logger.error(f"Trained model with id '{ptm.model_id}' already exists")
logger.info("Run the script with the '--clear-previous' flag if you want to overwrite the existing model.")
exit(1)
logger.info(f"Creating model with id '{ptm.model_id}'")
ptm.put_config(config=config)
logger.info(f"Uploading model definition")
ptm.put_model(model_path)
logger.info(f"Uploading model vocabulary")
ptm.put_vocab(vocab_path)
# Start the deployed model
if args.start:
logger.info(f"Starting model deployment")
ptm.start()
logger.info(f"Model successfully imported with id '{ptm.model_id}'")

View File

@ -1,94 +0,0 @@
# Declare a Backstage Component that represents the Eland application.
---
# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: eland
description: Python Client and Toolkit for DataFrames, Big Data, Machine Learning and ETL in Elasticsearch
annotations:
backstage.io/source-location: url:https://github.com/elastic/eland/
github.com/project-slug: elastic/eland
github.com/team-slug: elastic/ml-core
buildkite.com/project-slug: elastic/eland
tags:
- elasticsearch
- python
- machine-learning
- big-data
- etl
links:
- title: Eland docs
url: https://eland.readthedocs.io/
spec:
type: application
owner: group:ml-core
lifecycle: production
dependsOn:
- resource:eland-pipeline
- resource:eland-releaser-docker-pipeline
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
---
apiVersion: backstage.io/v1alpha1
kind: Resource
metadata:
name: eland-pipeline
description: Run Eland tests
links:
- title: Pipeline
url: https://buildkite.com/elastic/eland
spec:
type: buildkite-pipeline
owner: group:ml-core
system: buildkite
implementation:
apiVersion: buildkite.elastic.dev/v1
kind: Pipeline
metadata:
name: Eland
description: Eland Python
spec:
pipeline_file: .buildkite/pipeline.yml
repository: elastic/eland
teams:
ml-core: {}
devtools-team: {}
es-docs: {}
everyone:
access_level: READ_ONLY
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
---
apiVersion: backstage.io/v1alpha1
kind: Resource
metadata:
name: eland-release-docker-pipeline
description: Release Docker Artifacts for Eland
links:
- title: Pipeline
url: https://buildkite.com/elastic/eland-release-docker
spec:
type: buildkite-pipeline
owner: group:ml-core
system: buildkite
implementation:
apiVersion: buildkite.elastic.dev/v1
kind: Pipeline
metadata:
name: Eland - Release Docker
description: Release Docker Artifacts for Eland
spec:
pipeline_file: .buildkite/release-docker/pipeline.yml
provider_settings:
trigger_mode: none
repository: elastic/eland
teams:
ml-core: {}
devtools-team: {}
everyone:
access_level: READ_ONLY

View File

@ -1,10 +0,0 @@
project: 'Eland Python client'
products:
- id: elasticsearch-client
cross_links:
- docs-content
toc:
- toc: reference
subs:
es: "Elasticsearch"
ml: "machine learning"

View File

@ -1,16 +1,16 @@
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/client/eland/current/dataframes.html
---
[[dataframes]]
== Data Frames
# Data Frames [dataframes]
`eland.DataFrame` wraps an Elasticsearch index in a Pandas-like API
and defers all processing and filtering of data to Elasticsearch
instead of your local machine. This means you can process large
amounts of data within Elasticsearch from a Jupyter Notebook
without overloading your machine.
`eland.DataFrame` wraps an Elasticsearch index in a Pandas-like API and defers all processing and filtering of data to Elasticsearch instead of your local machine. This means you can process large amounts of data within Elasticsearch from a Jupyter Notebook without overloading your machine.
```python
[source,python]
-------------------------------------
>>> import eland as ed
>>>
# Connect to 'flights' index via localhost Elasticsearch node
>>> # Connect to 'flights' index via localhost Elasticsearch node
>>> df = ed.DataFrame('http://localhost:9200', 'flights')
# eland.DataFrame instance has the same API as pandas.DataFrame
@ -59,5 +59,4 @@ Elasticsearch storage usage: 5.043 MB
sum 9.261629e+07 8.204365e+06
min 0.000000e+00 1.000205e+02
std 4.578263e+03 2.663867e+02
```
-------------------------------------

13
docs/guide/index.asciidoc Normal file
View File

@ -0,0 +1,13 @@
= Eland Python Client
:doctype: book
include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
include::overview.asciidoc[]
include::installation.asciidoc[]
include::dataframes.asciidoc[]
include::machine-learning.asciidoc[]

View File

@ -0,0 +1,16 @@
[[installation]]
== Installation
Eland can be installed with https://pip.pypa.io[pip] from https://pypi.org/project/eland[PyPI]:
[source,sh]
-----------------------------
$ python -m pip install eland
-----------------------------
and can also be installed with https://docs.conda.io[Conda] from https://anaconda.org/conda-forge/eland[Conda Forge]:
[source,sh]
------------------------------------
$ conda install -c conda-forge eland
------------------------------------

View File

@ -0,0 +1,80 @@
[[machine-learning]]
== Machine Learning
[discrete]
[[ml-trained-models]]
=== Trained models
Eland allows transforming trained models from scikit-learn, XGBoost,
and LightGBM libraries to be serialized and used as an inference
model in {es}.
[source,python]
------------------------
>>> from xgboost import XGBClassifier
>>> from eland.ml import MLModel
# Train and exercise an XGBoost ML model locally
>>> xgb_model = XGBClassifier(booster="gbtree")
>>> xgb_model.fit(training_data[0], training_data[1])
>>> xgb_model.predict(training_data[0])
[0 1 1 0 1 0 0 0 1 0]
# Import the model into Elasticsearch
>>> es_model = MLModel.import_model(
es_client="http://localhost:9200",
model_id="xgb-classifier",
model=xgb_model,
feature_names=["f0", "f1", "f2", "f3", "f4"],
)
# Exercise the ML model in Elasticsearch with the training data
>>> es_model.predict(training_data[0])
[0 1 1 0 1 0 0 0 1 0]
------------------------
[discrete]
[[ml-nlp-pytorch]]
=== Natural language processing (NLP) with PyTorch
For NLP tasks, Eland enables you to import PyTorch trained BERT models into {es}.
Models can be either plain PyTorch models, or supported
https://huggingface.co/transformers[transformers] models from the
https://huggingface.co/models[Hugging Face model hub].
[source,bash]
------------------------
$ eland_import_hub_model \
--url http://localhost:9200/ \
--hub-model-id elastic/distilbert-base-cased-finetuned-conll03-english \
--task-type ner \
--start
------------------------
[source,python]
------------------------
>>> import elasticsearch
>>> from pathlib import Path
>>> from eland.ml.pytorch import PyTorchModel
>>> from eland.ml.pytorch.transformers import TransformerModel
# Load a Hugging Face transformers model directly from the model hub
>>> tm = TransformerModel("elastic/distilbert-base-cased-finetuned-conll03-english", "ner")
Downloading: 100%|██████████| 257/257 [00:00<00:00, 108kB/s]
Downloading: 100%|██████████| 954/954 [00:00<00:00, 372kB/s]
Downloading: 100%|██████████| 208k/208k [00:00<00:00, 668kB/s]
Downloading: 100%|██████████| 112/112 [00:00<00:00, 43.9kB/s]
Downloading: 100%|██████████| 249M/249M [00:23<00:00, 11.2MB/s]
# Export the model in a TorchScrpt representation which Elasticsearch uses
>>> tmp_path = "models"
>>> Path(tmp_path).mkdir(parents=True, exist_ok=True)
>>> model_path, config_path, vocab_path = tm.save(tmp_path)
# Import model into Elasticsearch
>>> es = elasticsearch.Elasticsearch("http://elastic:mlqa_admin@localhost:9200", timeout=300) # 5 minute timeout
>>> ptm = PyTorchModel(es, tm.elasticsearch_model_id())
>>> ptm.import_model(model_path, config_path, vocab_path)
100%|██████████| 63/63 [00:12<00:00, 5.02it/s]
------------------------

View File

@ -1,36 +1,33 @@
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/client/eland/current/index.html
- https://www.elastic.co/guide/en/elasticsearch/client/eland/current/overview.html
navigation_title: Eland
---
[[overview]]
== Overview
# Eland Python client [overview]
Eland is a Python client and toolkit for DataFrames and {ml} in {es}.
Full documentation is available on https://eland.readthedocs.io[Read the Docs].
Source code is available on https://github.com/elastic/eland[GitHub].
Eland is a Python client and toolkit for DataFrames and {{ml}} in {{es}}. Full documentation is available on [Read the Docs](https://eland.readthedocs.io). Source code is available on [GitHub](https://github.com/elastic/eland).
[discrete]
=== Compatibility
- Supports Python 3.7+ and Pandas 1.3
- Supports {es} clusters that are 7.11+, recommended 7.14 or later for all features to work.
Make sure your Eland major version matches the major version of your Elasticsearch cluster.
## Compatibility [_compatibility]
The recommended way to set your requirements in your `setup.py` or
`requirements.txt` is::
* Supports Python 3.9+ and Pandas 1.5
* Supports {{es}} 8+ clusters, recommended 8.16 or later for all features to work. Make sure your Eland major version matches the major version of your Elasticsearch cluster.
The recommended way to set your requirements in your `setup.py` or `requirements.txt` is::
```
# Elasticsearch 8.x
eland>=8,<9
```
```
# Elasticsearch 7.x
eland>=7,<8
```
## Getting Started [_getting_started]
[discrete]
=== Getting Started
Create a `DataFrame` object connected to an {{es}} cluster running on `http://localhost:9200`:
Create a `DataFrame` object connected to an {es} cluster running on `http://localhost:9200`:
```python
[source,python]
------------------------------------
>>> import eland as ed
>>> df = ed.DataFrame(
... es_client="http://localhost:9200",
@ -51,19 +48,20 @@ Create a `DataFrame` object connected to an {{es}} cluster running on `http://lo
13058 858.144337 False ... 6 2018-02-11 14:54:34
[13059 rows x 27 columns]
```
------------------------------------
### Elastic Cloud [_elastic_cloud]
[discrete]
==== Elastic Cloud
You can also connect Eland to an Elasticsearch instance in Elastic Cloud:
```python
[source,python]
------------------------------------
>>> import eland as ed
>>> from elasticsearch import Elasticsearch
# First instantiate an 'Elasticsearch' instance connected to Elastic Cloud
>>> es = Elasticsearch(cloud_id="...", api_key="...")
>>> es = Elasticsearch(cloud_id="...", api_key=("...", "..."))
# then wrap the client in an Eland DataFrame:
>>> df = ed.DataFrame(es, es_index_pattern="flights")
@ -75,16 +73,16 @@ You can also connect Eland to an Elasticsearch instance in Elastic Cloud:
3 181.694216 True ... 0 2018-01-01 10:33:28
4 730.041778 False ... 0 2018-01-01 05:13:00
[5 rows x 27 columns]
```
------------------------------------
Eland can be used for complex queries and aggregations:
```python
[source,python]
------------------------------------
>>> df[df.Carrier != "Kibana Airlines"].groupby("Carrier").mean(numeric_only=False)
AvgTicketPrice Cancelled timestamp
Carrier
ES-Air 630.235816 0.129814 2018-01-21 20:45:00.200000000
JetBeats 627.457373 0.134698 2018-01-21 14:43:18.112400635
Logstash Airways 624.581974 0.125188 2018-01-21 16:14:50.711798340
```
------------------------------------

View File

@ -1,19 +0,0 @@
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/client/eland/current/installation.html
---
# Installation [installation]
Eland can be installed with [pip](https://pip.pypa.io) from [PyPI](https://pypi.org/project/eland). We recommend [using a virtual environment](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/) when installing with pip:
```sh
$ python -m pip install eland
```
Alternatively, Eland can be installed with [Conda](https://docs.conda.io) from [Conda Forge](https://anaconda.org/conda-forge/eland):
```sh
$ conda install -c conda-forge eland
```

View File

@ -1,199 +0,0 @@
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/client/eland/current/machine-learning.html
---
# Machine Learning [machine-learning]
## Trained models [ml-trained-models]
Eland allows transforming *some*
[trained models](https://eland.readthedocs.io/en/latest/reference/api/eland.ml.MLModel.import_model.html#parameters) from scikit-learn, XGBoost,
and LightGBM libraries to be serialized and used as an inference model in {{es}}.
```python
>>> from xgboost import XGBClassifier
>>> from eland.ml import MLModel
# Train and exercise an XGBoost ML model locally
>>> xgb_model = XGBClassifier(booster="gbtree")
>>> xgb_model.fit(training_data[0], training_data[1])
>>> xgb_model.predict(training_data[0])
[0 1 1 0 1 0 0 0 1 0]
# Import the model into Elasticsearch
>>> es_model = MLModel.import_model(
es_client="http://localhost:9200",
model_id="xgb-classifier",
model=xgb_model,
feature_names=["f0", "f1", "f2", "f3", "f4"],
)
# Exercise the ML model in Elasticsearch with the training data
>>> es_model.predict(training_data[0])
[0 1 1 0 1 0 0 0 1 0]
```
## Natural language processing (NLP) with PyTorch [ml-nlp-pytorch]
::::{important}
You need to install the appropriate version of PyTorch to import an NLP model. Run `python -m pip install 'eland[pytorch]'` to install that version.
::::
For NLP tasks, Eland enables you to import PyTorch models into {{es}}. Use the `eland_import_hub_model` script to download and install supported [transformer models](https://huggingface.co/transformers) from the [Hugging Face model hub](https://huggingface.co/models). For example:
```bash
eland_import_hub_model <authentication> \ <1>
--url http://localhost:9200/ \ <2>
--hub-model-id elastic/distilbert-base-cased-finetuned-conll03-english \ <3>
--task-type ner \ <4>
--start
```
1. Use an authentication method to access your cluster. Refer to [Authentication methods](machine-learning.md#ml-nlp-pytorch-auth).
2. The cluster URL. Alternatively, use `--cloud-id`.
3. Specify the identifier for the model in the Hugging Face model hub.
4. Specify the type of NLP task. Supported values are `fill_mask`, `ner`, `question_answering`, `text_classification`, `text_embedding`, `text_expansion`, `text_similarity` and `zero_shot_classification`.
For more information about the available options, run `eland_import_hub_model` with the `--help` option.
```bash
eland_import_hub_model --help
```
### Import model with Docker [ml-nlp-pytorch-docker]
::::{important}
To use the Docker container, you need to clone the Eland repository: [https://github.com/elastic/eland](https://github.com/elastic/eland)
::::
If you want to use Eland without installing it, you can use the Docker image:
You can use the container interactively:
```bash
docker run -it --rm --network host docker.elastic.co/eland/eland
```
Running installed scripts is also possible without an interactive shell, for example:
```bash
docker run -it --rm docker.elastic.co/eland/eland \
eland_import_hub_model \
--url $ELASTICSEARCH_URL \
--hub-model-id elastic/distilbert-base-uncased-finetuned-conll03-english \
--start
```
Replace the `$ELASTICSEARCH_URL` with the URL for your Elasticsearch cluster. For authentication purposes, include an administrator username and password in the URL in the following format: `https://username:password@host:port`.
### Install models in an air-gapped environment [ml-nlp-pytorch-air-gapped]
You can install models in a restricted or closed network by pointing the `eland_import_hub_model` script to local files.
For an offline install of a Hugging Face model, the model first needs to be cloned locally, Git and [Git Large File Storage](https://git-lfs.com/) are required to be installed in your system.
1. Select a model you want to use from Hugging Face. Refer to the [compatible third party model](docs-content://explore-analyze/machine-learning/nlp/ml-nlp-model-ref.md) list for more information on the supported architectures.
2. Clone the selected model from Hugging Face by using the model URL. For example:
```bash
git clone https://huggingface.co/dslim/bert-base-NER
```
This command results in a local copy of of the model in the directory `bert-base-NER`.
3. Use the `eland_import_hub_model` script with the `--hub-model-id` set to the directory of the cloned model to install it:
```bash
eland_import_hub_model \
--url 'XXXX' \
--hub-model-id /PATH/TO/MODEL \
--task-type ner \
--es-username elastic --es-password XXX \
--es-model-id bert-base-ner
```
If you use the Docker image to run `eland_import_hub_model` you must bind mount the model directory, so the container can read the files:
```bash
docker run --mount type=bind,source=/PATH/TO/MODEL,destination=/model,readonly -it --rm docker.elastic.co/eland/eland \
eland_import_hub_model \
--url 'XXXX' \
--hub-model-id /model \
--task-type ner \
--es-username elastic --es-password XXX \
--es-model-id bert-base-ner
```
Once its uploaded to {{es}}, the model will have the ID specified by `--es-model-id`. If it is not set, the model ID is derived from `--hub-model-id`; spaces and path delimiters are converted to double underscores `__`.
### Connect to Elasticsearch through a proxy [ml-nlp-pytorch-proxy]
Behind the scenes, Eland uses the `requests` Python library, which [allows configuring proxies through an environment variable](https://requests.readthedocs.io/en/latest/user/advanced/#proxies). For example, to use an HTTP proxy to connect to an HTTPS Elasticsearch cluster, you need to set the `HTTPS_PROXY` environment variable when invoking Eland:
```bash
HTTPS_PROXY=http://proxy-host:proxy-port eland_import_hub_model ...
```
If you disabled security on your Elasticsearch cluster, you should use `HTTP_PROXY` instead.
### Authentication methods [ml-nlp-pytorch-auth]
The following authentication options are available when using the import script:
* Elasticsearch username and password authentication (specified with the `-u` and `-p` options):
```bash
eland_import_hub_model -u <username> -p <password> --cloud-id <cloud-id> ...
```
These `-u` and `-p` options also work when you use `--url`.
* Elasticsearch username and password authentication (embedded in the URL):
```bash
eland_import_hub_model --url https://<user>:<password>@<hostname>:<port> ...
```
* Elasticsearch API key authentication:
```bash
eland_import_hub_model --es-api-key <api-key> --url https://<hostname>:<port> ...
```
* HuggingFace Hub access token (for private models):
```bash
eland_import_hub_model --hub-access-token <access-token> ...
```
### TLS/SSL [ml-nlp-pytorch-tls]
The following TLS/SSL options for Elasticsearch are available when using the import script:
* Specify alternate CA bundle to verify the cluster certificate:
```bash
eland_import_hub_model --ca-certs CA_CERTS ...
```
* Disable TLS/SSL verification altogether (strongly discouraged):
```bash
eland_import_hub_model --insecure ...
```

View File

@ -1,6 +0,0 @@
project: 'Eland reference'
toc:
- file: index.md
- file: installation.md
- file: dataframes.md
- file: machine-learning.md

View File

@ -1,5 +1,11 @@
elasticsearch>=7.7
pandas>=1.2.0
matplotlib
nbval
sphinx==5.3.0
numpydoc>=0.9.0
scikit-learn>=0.22.1
xgboost>=1
lightgbm
nbsphinx
furo
numpydoc>=0.9.0
git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master

View File

@ -58,9 +58,9 @@ release = version
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"numpydoc",
"matplotlib.sphinxext.plot_directive",
"sphinx.ext.todo",
"nbsphinx",
@ -116,7 +116,12 @@ exclude_patterns = ["**.ipynb_checkpoints"]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
html_theme = "pydata_sphinx_theme"
html_theme_options = {
"external_links": [],
"github_url": "https://github.com/elastic/eland",
"twitter_url": "https://twitter.com/elastic",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,

View File

@ -167,7 +167,7 @@ Configuring PyCharm And Running Tests
- Install development requirements. Open terminal in virtual environment and run
.. code-block:: bash
pip install -r requirements-dev.txt
`pip install -r requirements-dev.txt`
- Setup Elasticsearch instance with docker
.. code-block:: bash
@ -200,7 +200,7 @@ Configuring PyCharm And Running Tests
- To test specific versions of Python run
.. code-block:: bash
nox -s test-3.12
nox -s test-3.8
Documentation

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
eland.DataFrame.agg
eland.DataFrame.agg
===================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.aggregate
eland.DataFrame.aggregate
=========================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.DataFrame.columns
eland.DataFrame.columns
=======================
.. currentmodule:: eland
.. autoproperty:: DataFrame.columns
.. autoattribute:: DataFrame.columns

View File

@ -1,4 +1,4 @@
eland.DataFrame.count
eland.DataFrame.count
=====================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.describe
eland.DataFrame.describe
========================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.drop
eland.DataFrame.drop
====================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.DataFrame.dtypes
eland.DataFrame.dtypes
======================
.. currentmodule:: eland
.. autoproperty:: DataFrame.dtypes
.. autoattribute:: DataFrame.dtypes

View File

@ -1,6 +1,6 @@
eland.DataFrame.empty
eland.DataFrame.empty
=====================
.. currentmodule:: eland
.. autoproperty:: DataFrame.empty
.. autoattribute:: DataFrame.empty

View File

@ -1,6 +1,6 @@
eland.DataFrame.es\_dtypes
==========================
eland.DataFrame.es_dtypes
=========================
.. currentmodule:: eland
.. autoproperty:: DataFrame.es_dtypes
.. autoattribute:: DataFrame.es_dtypes

View File

@ -1,5 +1,5 @@
eland.DataFrame.es\_info
========================
eland.DataFrame.es_info
=======================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.es\_match
=========================
eland.DataFrame.es_match
========================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.es\_query
=========================
eland.DataFrame.es_query
========================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.filter
eland.DataFrame.filter
======================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.get
eland.DataFrame.get
===================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.groupby
eland.DataFrame.groupby
=======================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.head
eland.DataFrame.head
====================
.. currentmodule:: eland

View File

@ -1,6 +1,8 @@
eland.DataFrame.hist
eland.DataFrame.hist
====================
.. currentmodule:: eland
.. automethod:: DataFrame.hist
.. image:: eland-DataFrame-hist-1.png

View File

@ -1,5 +1,5 @@
eland.DataFrame.idxmax
======================
eland.DataFrame.idxmax
========================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.idxmin
======================
eland.DataFrame.idxmin
========================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.DataFrame.index
eland.DataFrame.index
=====================
.. currentmodule:: eland
.. autoproperty:: DataFrame.index
.. autoattribute:: DataFrame.index

View File

@ -1,4 +1,4 @@
eland.DataFrame.info
eland.DataFrame.info
====================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.iterrows
eland.DataFrame.iterrows
========================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.itertuples
eland.DataFrame.itertuples
==========================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.keys
eland.DataFrame.keys
====================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.mad
eland.DataFrame.mad
===================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.max
eland.DataFrame.max
===================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.mean
eland.DataFrame.mean
====================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.median
eland.DataFrame.median
======================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.min
eland.DataFrame.min
===================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.DataFrame.ndim
eland.DataFrame.ndim
====================
.. currentmodule:: eland
.. autoproperty:: DataFrame.ndim
.. autoattribute:: DataFrame.ndim

View File

@ -1,4 +1,4 @@
eland.DataFrame.nunique
eland.DataFrame.nunique
=======================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.query
eland.DataFrame.query
=====================
.. currentmodule:: eland

View File

@ -1,76 +1,18 @@
eland.DataFrame
===============
eland.DataFrame
================
.. currentmodule:: eland
.. autoclass:: DataFrame
.. automethod:: __init__
.. rubric:: Methods
..
HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages.
.. autosummary::
:toctree:
~DataFrame.__init__
~DataFrame.agg
~DataFrame.aggregate
~DataFrame.count
~DataFrame.describe
~DataFrame.drop
~DataFrame.es_info
~DataFrame.es_match
~DataFrame.es_query
~DataFrame.filter
~DataFrame.get
~DataFrame.groupby
~DataFrame.head
~DataFrame.hist
~DataFrame.idxmax
~DataFrame.idxmin
~DataFrame.info
~DataFrame.iterrows
~DataFrame.itertuples
~DataFrame.keys
~DataFrame.mad
~DataFrame.max
~DataFrame.mean
~DataFrame.median
~DataFrame.min
~DataFrame.mode
~DataFrame.nunique
~DataFrame.quantile
~DataFrame.query
~DataFrame.sample
~DataFrame.select_dtypes
~DataFrame.std
~DataFrame.sum
~DataFrame.tail
~DataFrame.to_csv
~DataFrame.to_html
~DataFrame.to_json
~DataFrame.to_numpy
~DataFrame.to_pandas
~DataFrame.to_string
~DataFrame.var
.. rubric:: Attributes
.. autosummary::
~DataFrame.columns
~DataFrame.dtypes
~DataFrame.empty
~DataFrame.es_dtypes
~DataFrame.index
~DataFrame.ndim
~DataFrame.shape
~DataFrame.size
~DataFrame.values
DataFrame.abs
DataFrame.add

View File

@ -1,4 +1,4 @@
eland.DataFrame.sample
eland.DataFrame.sample
======================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.select\_dtypes
==============================
eland.DataFrame.select_dtypes
=============================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.DataFrame.shape
eland.DataFrame.shape
=====================
.. currentmodule:: eland
.. autoproperty:: DataFrame.shape
.. autoattribute:: DataFrame.shape

View File

@ -1,6 +1,6 @@
eland.DataFrame.size
eland.DataFrame.size
====================
.. currentmodule:: eland
.. autoproperty:: DataFrame.size
.. autoattribute:: DataFrame.size

View File

@ -1,4 +1,4 @@
eland.DataFrame.std
eland.DataFrame.std
===================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.sum
eland.DataFrame.sum
===================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.DataFrame.tail
eland.DataFrame.tail
====================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.to\_csv
=======================
eland.DataFrame.to_csv
======================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.to\_html
========================
eland.DataFrame.to_html
=======================
.. currentmodule:: eland

View File

@ -1,6 +0,0 @@
eland.DataFrame.to\_json
========================
.. currentmodule:: eland
.. automethod:: DataFrame.to_json

View File

@ -1,5 +1,5 @@
eland.DataFrame.to\_numpy
=========================
eland.DataFrame.to_numpy
========================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.to\_pandas
==========================
eland.DataFrame.to_pandas
=========================
.. currentmodule:: eland

View File

@ -1,5 +1,5 @@
eland.DataFrame.to\_string
==========================
eland.DataFrame.to_string
=========================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.DataFrame.values
eland.DataFrame.values
======================
.. currentmodule:: eland
.. autoproperty:: DataFrame.values
.. autoattribute:: DataFrame.values

View File

@ -1,4 +1,4 @@
eland.DataFrame.var
eland.DataFrame.var
===================
.. currentmodule:: eland

View File

@ -1,33 +1,6 @@
eland.Index
eland.Index
===========
.. currentmodule:: eland
.. autoclass:: Index
.. automethod:: __init__
.. rubric:: Methods
.. autosummary::
~Index.__init__
~Index.es_info
.. rubric:: Attributes
.. autosummary::
~Index.ID_INDEX_FIELD
~Index.ID_SORT_FIELD
~Index.es_index_field
~Index.is_source_field
~Index.sort_field

View File

@ -1,4 +1,4 @@
eland.Series.add
eland.Series.add
================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.Series.describe
eland.Series.describe
=====================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.Series.div
eland.Series.div
================
.. currentmodule:: eland

View File

@ -1,4 +1,4 @@
eland.Series.divide
eland.Series.divide
===================
.. currentmodule:: eland

View File

@ -1,6 +1,6 @@
eland.Series.dtype
eland.Series.dtype
==================
.. currentmodule:: eland
.. autoproperty:: Series.dtype
.. autoattribute:: Series.dtype

View File

@ -1,6 +1,6 @@
eland.Series.dtypes
eland.Series.dtypes
===================
.. currentmodule:: eland
.. autoproperty:: Series.dtypes
.. autoattribute:: Series.dtypes

View File

@ -1,6 +1,6 @@
eland.Series.empty
eland.Series.empty
==================
.. currentmodule:: eland
.. autoproperty:: Series.empty
.. autoattribute:: Series.empty

Some files were not shown because too many files have changed in this diff Show More