diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 000000000..4507429ec
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,134 @@
+name: CI
+
+on: [push, pull_request]
+
+jobs:
+ ci:
+ strategy:
+ # Allows for matrix sub-jobs to fail without canceling the rest
+ fail-fast: false
+
+ # MATRIX:
+ # =======
+ # Required parameters:
+ # os the os to run on
+ # python-version the python version to use
+ # backend the backend to use
+ # env any additional env variables. Set to '{}' for none
+ # Optional parameters:
+ # allowed_failure whether the job is allowed to fail
+ # extra_hash extra hash str to differentiate from other caches with similar name (must always start with '-')
+ matrix:
+ # Tests [amd64]
+ #
+ os: [ubuntu-18.04, macos-10.15]
+ python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9, "3.10"] # quotes to avoid being interpreted as the number 3.1
+ env: [{ STATIC_DEPS: true }, { STATIC_DEPS: false }]
+
+ include:
+ # Coverage setup
+ - os: ubuntu-18.04
+ python-version: 3.9
+ env: { COVERAGE: true }
+ extra_hash: "-coverage"
+ allowed_failure: true # shouldn't fail but currently does...
+ - os: ubuntu-18.04
+ python-version: 3.9
+ env: { STATIC_DEPS: false, EXTRA_DEPS: "docutils pygments sphinx sphinx-rtd-theme" }
+ extra_hash: "-docs"
+ allowed_failure: true # shouldn't fail but currently does...
+ # Old library setup with minimum version requirements
+ - os: ubuntu-18.04
+ python-version: 3.9
+ env: {
+ STATIC_DEPS: true,
+ LIBXML2_VERSION: 2.9.2,
+ LIBXSLT_VERSION: 1.1.27,
+ }
+ extra_hash: "-oldlibs"
+ allowed_failure: true # shouldn't fail but currently does...
+ # Ubuntu sub-jobs:
+ # ================
+ # Pypy
+ - os: ubuntu-18.04
+ python-version: pypy-2.7
+ env: { STATIC_DEPS: false }
+ allowed_failure: true
+ - os: ubuntu-18.04
+ python-version: pypy-3.7
+ env: { STATIC_DEPS: false }
+ allowed_failure: true
+
+ # MacOS sub-jobs
+ # ==============
+ - os: macos-10.15
+ allowed_failure: true # Unicode parsing fails in Py3
+
+ # This defaults to 360 minutes (6h) which is way too long and if a test gets stuck, it can block other pipelines.
+ # From testing, the runs tend to take ~3 minutes, so a limit of 20 minutes should be enough. This can always be
+ # changed in the future if needed.
+ timeout-minutes: 20
+ runs-on: ${{ matrix.os }}
+
+ env:
+ OS_NAME: ${{ matrix.os }}
+ PYTHON_VERSION: ${{ matrix.python-version }}
+ MACOSX_DEPLOYMENT_TARGET: 10.14
+ LIBXML2_VERSION: 2.9.10
+ LIBXSLT_VERSION: 1.1.34
+ COVERAGE: false
+ GCC_VERSION: 8
+ USE_CCACHE: 1
+ CCACHE_SLOPPINESS: "pch_defines,time_macros"
+ CCACHE_COMPRESS: 1
+ CCACHE_MAXSIZE: "100M"
+
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 1
+
+ - name: Setup python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Cache [ccache]
+ uses: pat-s/always-upload-cache@v2.1.3
+ if: startsWith(runner.os, 'Linux')
+ with:
+ path: ~/.ccache
+ key: ${{ runner.os }}-ccache${{ matrix.extra_hash }}-${{ matrix.python-version }}-${{ hashFiles('.github/workflows/ci.yml', 'tools/ci-run.sh') }}
+
+ - name: Run CI
+ continue-on-error: ${{ matrix.allowed_failure || false }}
+ env: ${{ matrix.env }}
+ run: bash ./tools/ci-run.sh
+
+ - name: Build docs
+ if: contains( env.EXTRA_DEPS, 'sphinx')
+ run: make html
+
+ - name: Upload docs
+ uses: actions/upload-artifact@v2
+ if: ${{ matrix.extra_hash == '-docs' }}
+ with:
+ name: website_html
+ path: doc/html
+ if-no-files-found: ignore
+
+ - name: Upload Coverage Report
+ uses: actions/upload-artifact@v2
+ with:
+ name: pycoverage_html
+ path: coverage*
+ if-no-files-found: ignore
+
+ - name: Upload Wheel
+ uses: actions/upload-artifact@v2
+ if: ${{ matrix.env.STATIC_DEPS == 'true' && env.COVERAGE == 'false' }}
+ with:
+ name: wheels-${{ runner.os }}
+ path: dist/*.whl
+ if-no-files-found: ignore
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
new file mode 100644
index 000000000..3c5775c6f
--- /dev/null
+++ b/.github/workflows/wheels.yml
@@ -0,0 +1,155 @@
+name: Wheel build
+
+on:
+ release:
+ types: [created]
+
+jobs:
+ sdist:
+ runs-on: ubuntu-20.04
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: 3.9
+
+ - name: Install lib dependencies
+ run: sudo apt-get update -y -q && sudo apt-get install -y -q "libxml2=2.9.10*" "libxml2-dev=2.9.10*" libxslt1.1 libxslt1-dev
+
+ - name: Install Python dependencies
+ run: python -m pip install -U pip setuptools && python -m pip install -U docutils pygments sphinx sphinx-rtd-theme -r requirements.txt
+
+ - name: Build docs and sdist
+ run: make html sdist
+ env: { STATIC_DEPS: false }
+
+ - name: Release
+ uses: softprops/action-gh-release@v1
+ if: startsWith(github.ref, 'refs/tags/')
+ with:
+ files: dist/*.tar.gz
+
+ - name: Upload sdist
+ uses: actions/upload-artifact@v2
+ with:
+ name: sdist
+ path: dist/*.tar.gz
+
+ - name: Upload website
+ uses: actions/upload-artifact@v2
+ with:
+ name: website
+ path: doc/html
+
+ Linux:
+ runs-on: ubuntu-latest
+
+ strategy:
+ # Allows for matrix sub-jobs to fail without canceling the rest
+ fail-fast: false
+
+ matrix:
+ image:
+ - manylinux1_x86_64
+ - manylinux1_i686
+ #- manylinux2010_x86_64
+ #- manylinux2010_i686
+ - manylinux_2_24_x86_64
+ - manylinux_2_24_i686
+ - manylinux_2_24_aarch64
+ - musllinux_1_1_x86_64
+ #- manylinux_2_24_ppc64le
+ #- manylinux_2_24_ppc64le
+ #- manylinux_2_24_s390x
+ pyversion: ["*"]
+
+ exclude:
+ - image: manylinux_2_24_aarch64
+ pyversion: "*"
+ include:
+ - image: manylinux_2_24_aarch64
+ pyversion: "cp37*"
+ - image: manylinux_2_24_aarch64
+ pyversion: "cp38*"
+ - image: manylinux_2_24_aarch64
+ pyversion: "cp39*"
+ - image: manylinux_2_24_aarch64
+ pyversion: "cp310*"
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Install dependencies
+ run: python -m pip install -r requirements.txt
+
+ - name: Build Linux wheels
+ run: make sdist wheel_${{ matrix.image }}
+ env: { STATIC_DEPS: true, PYTHON_BUILD_VERSION: "${{ matrix.pyversion }}" }
+
+ - name: Release
+ uses: softprops/action-gh-release@v1
+ if: startsWith(github.ref, 'refs/tags/')
+ with:
+ files: wheelhouse/*/*-m*linux*.whl # manylinux / musllinux
+
+ - name: Upload wheels
+ uses: actions/upload-artifact@v2
+ with:
+ name: wheels-${{ matrix.image }}
+ path: wheelhouse/*/*-m*linux*.whl # manylinux / musllinux
+ if-no-files-found: ignore
+
+ non-Linux:
+ strategy:
+ # Allows for matrix sub-jobs to fail without canceling the rest
+ fail-fast: false
+
+ matrix:
+ #os: [macos-10.15, windows-latest]
+ os: [macos-10.15, macOS-M1]
+ python_version: ["2.7", "3.6", "3.7", "3.8", "3.9", "3.10", "pypy-3.7-v7.3.3", "pypy-3.8-v7.3.7"]
+
+ runs-on: ${{ matrix.os }}
+ env: { LIBXML2_VERSION: 2.9.10, LIBXSLT_VERSION: 1.1.34, MACOSX_DEPLOYMENT_TARGET: 10.14 }
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python_version }}
+
+ - name: Install MacOS dependencies
+ if: startsWith(matrix.os, 'mac')
+ run: |
+ brew install automake libtool
+ ln -s /usr/local/bin/glibtoolize /usr/local/bin/libtoolize
+
+ - name: Install dependencies
+ run: python -m pip install setuptools wheel -r requirements.txt
+
+ - name: Build wheels
+ run: make sdist wheel
+ env: { STATIC_DEPS: true, RUN_TESTS: true }
+
+ - name: Release
+ uses: softprops/action-gh-release@v1
+ if: startsWith(github.ref, 'refs/tags/')
+ with:
+ files: dist/lxml-*.whl
+
+ - name: Upload wheels
+ uses: actions/upload-artifact@v2
+ with:
+ name: wheels-${{ matrix.os }}
+ path: dist/lxml-*.whl
+ if-no-files-found: ignore
diff --git a/.travis.yml b/.travis.yml
index 13ec41be7..9d8a9f424 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,8 +8,10 @@ cache:
- libs
python:
- - 3.9
+ - nightly
+ - 3.10
- 2.7
+ - 3.9
- 3.8
- 3.7
- 3.6
@@ -61,6 +63,7 @@ matrix:
env: STATIC_DEPS=true
arch: ppc64le
allow_failures:
+ - python: nightly
- python: pypy
- python: pypy3
@@ -70,7 +73,7 @@ install:
then pip install --install-option=--no-cython-compile https://github.com/cython/cython/archive/master.zip;
else pip install -r requirements.txt;
fi
- - pip install -U beautifulsoup4 cssselect html5lib rnc2rng ${EXTRA_DEPS}
+ - pip install -U beautifulsoup4 cssselect html5lib rnc2rng==2.6.5 ${EXTRA_DEPS}
script:
- CFLAGS="-O0 -g -fPIC" python -u setup.py build_ext --inplace
@@ -79,3 +82,5 @@ script:
- ccache -s || true
- CFLAGS="-O0 -g -fPIC" PYTHONUNBUFFERED=x make test
- ccache -s || true
+ - python setup.py install
+ - python -c "from lxml import etree"
diff --git a/CHANGES.txt b/CHANGES.txt
index 22f4d450b..2a0e1e22e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -2,6 +2,32 @@
lxml changelog
==============
+4.6.5 (2021-12-12)
+==================
+
+Bugs fixed
+----------
+
+* A vulnerability (GHSL-2021-1038) in the HTML cleaner allowed sneaking script
+ content through SVG images.
+
+* A vulnerability (GHSL-2021-1037) in the HTML cleaner allowed sneaking script
+ content through CSS imports and other crafted constructs.
+
+
+4.6.4 (2021-11-01)
+==================
+
+Features added
+--------------
+
+* GH#317: A new property ``system_url`` was added to DTD entities.
+ Patch by Thirdegree.
+
+* GH#314: The ``STATIC_*`` variables in ``setup.py`` can now be passed via env vars.
+ Patch by Isaac Jurado.
+
+
4.6.3 (2021-03-21)
==================
diff --git a/Makefile b/Makefile
index a8c9de829..555d851e8 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@ PYTHON3?=python3
TESTFLAGS=-p -v
TESTOPTS=
SETUPFLAGS=
-LXMLVERSION:=$(shell sed -ne '/__version__/s|.*__version__\s*=\s*"\([^"]*\)".*|\1|p' src/lxml/__init__.py)
+LXMLVERSION:=$(shell $(PYTHON3) -c 'import re; print(re.findall(r"__version__\s*=\s*\"([^\"]+)\"", open("src/lxml/__init__.py").read())[0])' )
PARALLEL?=$(shell $(PYTHON) -c 'import sys; print("-j7" if sys.version_info >= (3, 5) else "")' )
PARALLEL3?=$(shell $(PYTHON3) -c 'import sys; print("-j7" if sys.version_info >= (3, 5) else "")' )
@@ -12,17 +12,21 @@ PY3_WITH_CYTHON?=$(shell $(PYTHON3) -c 'import Cython.Build.Dependencies' >/dev/
CYTHON_WITH_COVERAGE?=$(shell $(PYTHON) -c 'import Cython.Coverage; import sys; assert not hasattr(sys, "pypy_version_info")' >/dev/null 2>/dev/null && echo " --coverage" || true)
CYTHON3_WITH_COVERAGE?=$(shell $(PYTHON3) -c 'import Cython.Coverage; import sys; assert not hasattr(sys, "pypy_version_info")' >/dev/null 2>/dev/null && echo " --coverage" || true)
+PYTHON_BUILD_VERSION ?= *
MANYLINUX_LIBXML2_VERSION=2.9.10
MANYLINUX_LIBXSLT_VERSION=1.1.34
MANYLINUX_CFLAGS=-O3 -g1 -pipe -fPIC -flto
MANYLINUX_LDFLAGS=-flto
-MANYLINUX_IMAGE_X86_64=quay.io/pypa/manylinux1_x86_64
-MANYLINUX_IMAGE_686=quay.io/pypa/manylinux1_i686
-MANYLINUX_IMAGE_AARCH64=quay.io/pypa/manylinux2014_aarch64
-AARCH64_ENV=-e AR="/opt/rh/devtoolset-9/root/usr/bin/gcc-ar" \
- -e NM="/opt/rh/devtoolset-9/root/usr/bin/gcc-nm" \
- -e RANLIB="/opt/rh/devtoolset-9/root/usr/bin/gcc-ranlib"
+MANYLINUX_IMAGES= \
+ manylinux1_x86_64 \
+ manylinux1_i686 \
+ manylinux_2_24_x86_64 \
+ manylinux_2_24_i686 \
+ manylinux_2_24_aarch64 \
+ manylinux_2_24_ppc64le \
+ manylinux_2_24_s390x \
+ musllinux_1_1_x86_64
.PHONY: all inplace inplace3 rebuild-sdist sdist build require-cython wheel_manylinux wheel
@@ -55,19 +59,22 @@ require-cython:
qemu-user-static:
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
-wheel_manylinux: wheel_manylinux64 wheel_manylinux32 wheel_manylinuxaarch64
-wheel_manylinuxaarch64: qemu-user-static
+wheel_manylinux: $(addprefix wheel_,$(MANYLINUX_IMAGES))
+$(addprefix wheel_,$(filter-out %_x86_64, $(filter-out %_i686, $(MANYLINUX_IMAGES)))): qemu-user-static
-wheel_manylinux32 wheel_manylinux64 wheel_manylinuxaarch64: dist/lxml-$(LXMLVERSION).tar.gz
+wheel_%: dist/lxml-$(LXMLVERSION).tar.gz
time docker run --rm -t \
-v $(shell pwd):/io \
- $(if $(patsubst %aarch64,,$@),,$(AARCH64_ENV)) \
+ -e AR=gcc-ar \
+ -e NM=gcc-nm \
+ -e RANLIB=gcc-ranlib \
-e CFLAGS="$(MANYLINUX_CFLAGS) $(if $(patsubst %aarch64,,$@),-march=core2,-march=armv8-a -mtune=cortex-a72)" \
-e LDFLAGS="$(MANYLINUX_LDFLAGS)" \
-e LIBXML2_VERSION="$(MANYLINUX_LIBXML2_VERSION)" \
-e LIBXSLT_VERSION="$(MANYLINUX_LIBXSLT_VERSION)" \
- -e WHEELHOUSE=wheelhouse_$(subst wheel_,,$@) \
- $(if $(filter $@,wheel_manylinuxaarch64),$(MANYLINUX_IMAGE_AARCH64),$(if $(patsubst %32,,$@),$(MANYLINUX_IMAGE_X86_64),$(MANYLINUX_IMAGE_686))) \
+ -e PYTHON_BUILD_VERSION="$(PYTHON_BUILD_VERSION)" \
+ -e WHEELHOUSE=$(subst wheel_,wheelhouse/,$@) \
+ quay.io/pypa/$(subst wheel_,,$@) \
bash /io/tools/manylinux/build-wheels.sh /io/$<
wheel:
@@ -89,6 +96,15 @@ valgrind_test_inplace: inplace
valgrind --tool=memcheck --leak-check=full --num-callers=30 --suppressions=valgrind-python.supp \
$(PYTHON) test.py
+fuzz: clean
+ $(MAKE) \
+ CC="/usr/bin/clang" \
+ CFLAGS="$$CFLAGS -fsanitize=fuzzer-no-link -g2" \
+ CXX="/usr/bin/clang++" \
+ CXXFLAGS="-fsanitize=fuzzer-no-link" \
+ inplace3
+ $(PYTHON3) src/lxml/tests/fuzz_xml_parse.py
+
gdb_test_inplace: inplace
@echo "file $(PYTHON)\nrun test.py" > .gdb.command
gdb -x .gdb.command -d src -d src/lxml
diff --git a/README.rst b/README.rst
index 3ad1ba177..01962c359 100644
--- a/README.rst
+++ b/README.rst
@@ -50,6 +50,11 @@ for other ways to support the lxml project,
as well as commercial consulting, customisations and trainings on lxml and
fast Python XML processing.
+Note that we are not accepting donations in crypto currencies.
+Much of the development and hosting for lxml is done in a carbon-neutral way
+or with compensated and very low emissions.
+Crypto currencies do not fit into that ambition.
+
.. |Donate| image:: https://lxml.de/paypal_btn_donateCC_LG.png
:width: 160
:height: 47
@@ -69,6 +74,12 @@ Another supporter of the lxml project is
Project income report
---------------------
+* Total project income in 2020: EUR 6065,86 (506.49 € / month)
+
+ - Tidelift: EUR 4064.77
+ - Paypal: EUR 1401.09
+ - other: EUR 600.00
+
* Total project income in 2019: EUR 717.52 (59.79 € / month)
- Tidelift: EUR 360.30
diff --git a/appveyor.yml b/appveyor.yml
index b8d7a72db..42eecd57b 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -2,6 +2,8 @@ version: 1.0.{build}
environment:
matrix:
+ - python: 310
+ - python: 310-x64
- python: 39
- python: 39-x64
- python: 27
@@ -14,6 +16,9 @@ environment:
- python: 36-x64
- python: 35
- python: 35-x64
+ - python: 310
+ arch: arm64
+ env: STATIC_DEPS=true
- python: 39
arch: arm64
env: STATIC_DEPS=true
diff --git a/benchmark/bench_etree.py b/benchmark/bench_etree.py
index 0f66db8e9..69ac5208e 100644
--- a/benchmark/bench_etree.py
+++ b/benchmark/bench_etree.py
@@ -1,9 +1,10 @@
import copy
+from io import BytesIO
from itertools import *
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
- serialized, children, nochange, BytesIO)
+ serialized, children, nochange)
TEXT = "some ASCII text"
UTEXT = u"some klingon: \F8D2"
diff --git a/benchmark/benchbase.py b/benchmark/benchbase.py
index e34e61036..a9f9ad857 100644
--- a/benchmark/benchbase.py
+++ b/benchmark/benchbase.py
@@ -1,4 +1,4 @@
-import sys, re, string, time, copy, gc
+import sys, re, string, copy, gc
from itertools import *
import time
@@ -474,6 +474,8 @@ def main(benchmark_class):
if import_lxml:
from lxml import etree
_etrees.append(etree)
+ print("Using lxml %s (with libxml2 %s)" % (
+ etree.__version__, '.'.join(map(str, etree.LIBXML_VERSION))))
try:
sys.argv.remove('-fel')
@@ -521,6 +523,8 @@ def main(benchmark_class):
print("No library to test. Exiting.")
sys.exit(1)
+ print("Running benchmarks in Python %s" % (sys.version_info,))
+
print("Preparing test suites and trees ...")
selected = set( sys.argv[1:] )
benchmark_suites, benchmarks = \
diff --git a/buildlibxml.py b/buildlibxml.py
index f45c86086..086d9115d 100644
--- a/buildlibxml.py
+++ b/buildlibxml.py
@@ -1,4 +1,4 @@
-import os, re, sys, subprocess
+import os, re, sys, subprocess, platform
import tarfile
from distutils import log, version
from contextlib import closing
@@ -38,7 +38,13 @@ def download_and_extract_windows_binaries(destdir):
if release_path in filename
]
- arch = "win64" if sys.maxsize > 2**32 else "win32"
+ if platform.machine() == 'ARM64':
+ arch = "win-arm64"
+ elif sys.maxsize > 2**32:
+ arch = "win64"
+ else:
+ arch = "win32"
+
if sys.version_info < (3, 5):
arch = 'vs2008.' + arch
@@ -437,6 +443,15 @@ def has_current_lib(name, build_dir, _build_all_following=[False]):
if not has_current_lib("libxml2", libxml2_dir):
cmmi(libxml2_configure_cmd, libxml2_dir, multicore, **call_setup)
+ # Fix up libxslt configure script (needed up to and including 1.1.34)
+ # https://gitlab.gnome.org/GNOME/libxslt/-/commit/90c34c8bb90e095a8a8fe8b2ce368bd9ff1837cc
+ with open(os.path.join(libxslt_dir, "configure"), 'rb') as f:
+ config_script = f.read()
+ if b' --libs print ' in config_script:
+ config_script = config_script.replace(b' --libs print ', b' --libs ')
+ with open(os.path.join(libxslt_dir, "configure"), 'wb') as f:
+ f.write(config_script)
+
# build libxslt
libxslt_configure_cmd = configure_cmd + [
'--without-python',
diff --git a/doc/FAQ.txt b/doc/FAQ.txt
index 24ec8c42e..48f69a6ad 100644
--- a/doc/FAQ.txt
+++ b/doc/FAQ.txt
@@ -117,11 +117,11 @@ wrote a nice article about high-performance aspects when `parsing
large files with lxml`_.
.. _`lxml.etree Tutorial`: tutorial.html
-.. _`tutorial for ElementTree`: https://effbot.org/zone/element.htm
+.. _`tutorial for ElementTree`: https://web.archive.org/web/20200720191942/https://effbot.org/zone/element.htm
.. _`extended etree API`: api.html
.. _`objectify documentation`: objectify.html
-.. _`Python XML processing with lxml`: http://www.nmt.edu/tcc/help/pubs/pylxml/
-.. _`element library`: https://effbot.org/zone/element-lib.htm
+.. _`Python XML processing with lxml`: https://web.archive.org/web/20190522191656/http://infohost.nmt.edu/tcc/help/pubs/pylxml/web/index.html
+.. _`element library`: https://web.archive.org/web/20200703234431/http://www.effbot.org/zone/element-lib.htm
.. _`parsing large files with lxml`: http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
@@ -143,7 +143,7 @@ web page`_.
The `generated API documentation`_ is a comprehensive API reference
for the lxml package.
-.. _`ElementTree API`: https://effbot.org/zone/element-index.htm
+.. _`ElementTree API`: https://web.archive.org/web/20200703191710/http://www.effbot.org/zone/element-index.htm
.. _`the web page`: https://lxml.de/#documentation
.. _`generated API documentation`: api/index.html
@@ -1239,8 +1239,8 @@ Element. Its children will then inherit this prefix for serialization.
How can I specify a default namespace for XPath expressions?
------------------------------------------------------------
-You can't. In XPath, there is no such thing as a default namespace. Just use
-an arbitrary prefix and let the namespace dictionary of the XPath evaluators
+You can't. In XPath 1.0, there is no such thing as a default namespace. Just
+use an arbitrary prefix and let the namespace dictionary of the XPath evaluators
map it to your namespace. See also the question above.
diff --git a/doc/capi.txt b/doc/capi.txt
index 0167a5a4e..0471d811e 100644
--- a/doc/capi.txt
+++ b/doc/capi.txt
@@ -7,11 +7,10 @@ C extensions to efficiently access public functions and classes of lxml,
without going through the Python API.
The API is described in the file `etreepublic.pxd`_, which is directly
-c-importable by extension modules implemented in Pyrex_ or Cython_.
+c-importable by extension modules implemented in Cython_.
.. _`etreepublic.pxd`: https://github.com/lxml/lxml/blob/master/src/lxml/includes/etreepublic.pxd
-.. _Cython: http://cython.org
-.. _Pyrex: http://www.cosc.canterbury.ac.nz/~greg/python/Pyrex/
+.. _Cython: https://cython.org
.. contents::
..
@@ -45,7 +44,7 @@ Writing external modules in Cython
----------------------------------
This is the easiest way of extending lxml at the C level. A Cython_
-(or Pyrex_) module should start like this::
+module should start like this::
# My Cython extension
diff --git a/doc/compatibility.txt b/doc/compatibility.txt
index e23d18171..654cb7c4e 100644
--- a/doc/compatibility.txt
+++ b/doc/compatibility.txt
@@ -146,11 +146,11 @@ ElementTree. Nonetheless, some differences and incompatibilities exist:
not. This means that a comment text "text" that ElementTree serializes as
"" will become "" in lxml.
-* When the string '*' is used as tag filter in the ``Element.getiterator()``
- method, ElementTree returns all elements in the tree, including comments and
- processing instructions. lxml.etree only returns real Elements, i.e. tree
- nodes that have a string tag name. Without a filter, both libraries iterate
- over all nodes.
+* When the string ``'*'`` is used as tag filter in the ``Element.iter()`` and
+ ``.find*()`` methods, ElementTree returns all elements in the tree, including
+ comments and processing instructions. lxml.etree only returns real Elements,
+ i.e. tree nodes that have a string tag name. Without a filter, both libraries
+ iterate over all nodes.
Note that currently only lxml.etree supports passing the ``Element`` factory
function as filter to select only Elements. Both libraries support passing
diff --git a/doc/html/flattr-badge-large.png b/doc/html/flattr-badge-large.png
deleted file mode 100644
index 110530585..000000000
Binary files a/doc/html/flattr-badge-large.png and /dev/null differ
diff --git a/doc/html/style.css b/doc/html/style.css
index 4cc454aac..7d1b0e675 100644
--- a/doc/html/style.css
+++ b/doc/html/style.css
@@ -164,7 +164,7 @@ div.banner {
border: 2px solid darkred;
color: darkgreen;
line-height: 1em;
- margin: 1ex;
+ margin: 3ex 1ex 1ex;
padding: 3pt;
}
@@ -321,6 +321,18 @@ html > .pagequote {
position: fixed;
}
+div.admonition {
+ border: solid 1px;
+ border-radius: 1ex;
+ margin: 0.5ex;
+ padding: 0.5ex 1.5ex 0.5ex 1.5ex;
+ background: lightyellow;
+}
+
+div.admonition > .admonition-title {
+ background: yellow;
+}
+
code {
color: Black;
background-color: #f0f0f0;
diff --git a/doc/licenses/ZopePublicLicense.txt b/doc/licenses/ZopePublicLicense.txt
deleted file mode 100644
index 44e0648b3..000000000
--- a/doc/licenses/ZopePublicLicense.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Zope Public License (ZPL) Version 2.0
------------------------------------------------
-
-This software is Copyright (c) Zope Corporation (tm) and
-Contributors. All rights reserved.
-
-This license has been certified as open source. It has also
-been designated as GPL compatible by the Free Software
-Foundation (FSF).
-
-Redistribution and use in source and binary forms, with or
-without modification, are permitted provided that the
-following conditions are met:
-
-1. Redistributions in source code must retain the above
- copyright notice, this list of conditions, and the following
- disclaimer.
-
-2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions, and the following
- disclaimer in the documentation and/or other materials
- provided with the distribution.
-
-3. The name Zope Corporation (tm) must not be used to
- endorse or promote products derived from this software
- without prior written permission from Zope Corporation.
-
-4. The right to distribute this software or to use it for
- any purpose does not give you the right to use Servicemarks
- (sm) or Trademarks (tm) of Zope Corporation. Use of them is
- covered in a separate agreement (see
- http://www.zope.com/Marks).
-
-5. If any files are modified, you must cause the modified
- files to carry prominent notices stating that you changed
- the files and the date of any change.
-
-Disclaimer
-
- THIS SOFTWARE IS PROVIDED BY ZOPE CORPORATION ``AS IS''
- AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
- NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- NO EVENT SHALL ZOPE CORPORATION OR ITS CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- DAMAGE.
-
-
-This software consists of contributions made by Zope
-Corporation and many individuals on behalf of Zope
-Corporation. Specific attributions are listed in the
-accompanying credits file.
diff --git a/doc/main.txt b/doc/main.txt
index ead457d6f..55e32d545 100644
--- a/doc/main.txt
+++ b/doc/main.txt
@@ -159,8 +159,8 @@ Index `_ (PyPI). It has the source
that compiles on various platforms. The source distribution is signed
with `this key `_.
-The latest version is `lxml 4.6.3`_, released 2021-03-21
-(`changes for 4.6.3`_). `Older versions <#old-versions>`_
+The latest version is `lxml 4.6.5`_, released 2021-12-12
+(`changes for 4.6.5`_). `Older versions <#old-versions>`_
are listed below.
Please take a look at the
@@ -256,7 +256,11 @@ See the websites of lxml
..
and the `latest in-development version `_.
-.. _`PDF documentation`: lxmldoc-4.6.3.pdf
+.. _`PDF documentation`: lxmldoc-4.6.5.pdf
+
+* `lxml 4.6.5`_, released 2021-12-12 (`changes for 4.6.5`_)
+
+* `lxml 4.6.4`_, released 2021-11-01 (`changes for 4.6.4`_)
* `lxml 4.6.3`_, released 2021-03-21 (`changes for 4.6.3`_)
@@ -282,6 +286,8 @@ See the websites of lxml
* `older releases `_
+.. _`lxml 4.6.5`: /files/lxml-4.6.5.tgz
+.. _`lxml 4.6.4`: /files/lxml-4.6.4.tgz
.. _`lxml 4.6.3`: /files/lxml-4.6.3.tgz
.. _`lxml 4.6.2`: /files/lxml-4.6.2.tgz
.. _`lxml 4.6.1`: /files/lxml-4.6.1.tgz
@@ -294,6 +300,8 @@ See the websites of lxml
.. _`lxml 4.4.1`: /files/lxml-4.4.1.tgz
.. _`lxml 4.4.0`: /files/lxml-4.4.0.tgz
+.. _`changes for 4.6.5`: /changes-4.6.5.html
+.. _`changes for 4.6.4`: /changes-4.6.4.html
.. _`changes for 4.6.3`: /changes-4.6.3.html
.. _`changes for 4.6.2`: /changes-4.6.2.html
.. _`changes for 4.6.1`: /changes-4.6.1.html
diff --git a/doc/mkhtml.py b/doc/mkhtml.py
index c65233563..36da5de99 100644
--- a/doc/mkhtml.py
+++ b/doc/mkhtml.py
@@ -194,7 +194,7 @@ def insert_link(match):
out_file.close()
-def publish(dirname, lxml_path, release):
+def publish(dirname, lxml_path, release, with_donations=True):
if not os.path.exists(dirname):
os.mkdir(dirname)
@@ -245,7 +245,8 @@ def publish(dirname, lxml_path, release):
menu = Element("div", {'class': 'sidemenu', 'id': 'sidemenu'})
SubElement(menu, 'div', {'class': 'menutrigger', 'onclick': 'trigger_menu(event)'}).text = "Menu"
menu_div = SubElement(menu, 'div', {'class': 'menu'})
- inject_banner(menu_div)
+ if with_donations:
+ inject_banner(menu_div)
# build HTML pages and parse them back
for section, text_files in SITE_STRUCTURE:
@@ -266,13 +267,14 @@ def publish(dirname, lxml_path, release):
rest2html(script, path, outpath, stylesheet_url)
tree = parse(outpath)
- page_div = tree.getroot()[1][0] # html->body->div[class=document]
- inject_banner(page_div)
+ if with_donations:
+ page_div = tree.getroot()[1][0] # html->body->div[class=document]
+ inject_banner(page_div)
- if filename == 'main.txt':
- # inject donation buttons
- #inject_flatter_button(tree)
- inject_donate_buttons(lxml_path, script, tree)
+ if filename == 'main.txt':
+ # inject donation buttons
+ #inject_flatter_button(tree)
+ inject_donate_buttons(lxml_path, script, tree)
trees[filename] = (tree, basename, outpath)
build_menu(tree, basename, section_head)
@@ -324,4 +326,7 @@ def publish(dirname, lxml_path, release):
if __name__ == '__main__':
- publish(sys.argv[1], sys.argv[2], sys.argv[3])
+ no_donations = '--no-donations' in sys.argv[1:]
+ if no_donations:
+ sys.argv.remove('--no-donations')
+ publish(sys.argv[1], sys.argv[2], sys.argv[3], with_donations=not no_donations)
diff --git a/doc/performance.txt b/doc/performance.txt
index 1a0c9ad6b..c6f2edb42 100644
--- a/doc/performance.txt
+++ b/doc/performance.txt
@@ -88,18 +88,11 @@ very easy to add as tiny test methods, so if you write a performance test for
a specific part of the API yourself, please consider sending it to the lxml
mailing list.
-The timings presented below compare lxml 3.1.1 (with libxml2 2.9.0) to the
+The timings presented below compare lxml 4.6.3 (with libxml2 2.9.10) to the
latest released versions of ElementTree (with cElementTree as accelerator
-module) in the standard library of CPython 3.3.0. They were run
-single-threaded on a 2.9GHz 64bit double core Intel i7 machine under
-Ubuntu Linux 12.10 (Quantal). The C libraries were compiled with the
-same platform specific optimisation flags. The Python interpreter was
-also manually compiled for the platform. Note that many of the following
-ElementTree timings are therefore better than what a normal Python
-installation with the standard library (c)ElementTree modules would yield.
-Note also that CPython 2.7 and 3.2+ come with a newer ElementTree version,
-so older Python installations will not perform as good for (c)ElementTree,
-and sometimes substantially worse.
+module) in the standard library of CPython 3.8.10. They were run
+single-threaded on a 2.3GHz 64bit double core Intel i5 machine under
+Ubuntu Linux 20.04 (Focal).
.. _`bench_etree.py`: https://github.com/lxml/lxml/blob/master/benchmark/bench_etree.py
.. _`bench_xpath.py`: https://github.com/lxml/lxml/blob/master/benchmark/bench_xpath.py
@@ -141,50 +134,50 @@ is native to libxml2. While 20 to 40 times faster than (c)ElementTree
lxml is still more than 10 times as fast as the much improved
ElementTree 1.3 in recent Python versions::
- lxe: tostring_utf16 (S-TR T1) 7.9958 msec/pass
- cET: tostring_utf16 (S-TR T1) 83.1358 msec/pass
+ lxe: tostring_utf16 (S-TR T1) 5.9340 msec/pass
+ cET: tostring_utf16 (S-TR T1) 38.3270 msec/pass
- lxe: tostring_utf16 (UATR T1) 8.3222 msec/pass
- cET: tostring_utf16 (UATR T1) 84.4688 msec/pass
+ lxe: tostring_utf16 (UATR T1) 6.2032 msec/pass
+ cET: tostring_utf16 (UATR T1) 37.7944 msec/pass
- lxe: tostring_utf16 (S-TR T2) 8.2297 msec/pass
- cET: tostring_utf16 (S-TR T2) 87.3415 msec/pass
+ lxe: tostring_utf16 (S-TR T2) 6.1841 msec/pass
+ cET: tostring_utf16 (S-TR T2) 40.2577 msec/pass
- lxe: tostring_utf8 (S-TR T2) 6.5677 msec/pass
- cET: tostring_utf8 (S-TR T2) 76.2064 msec/pass
+ lxe: tostring_utf8 (S-TR T2) 4.6697 msec/pass
+ cET: tostring_utf8 (S-TR T2) 30.5173 msec/pass
- lxe: tostring_utf8 (U-TR T3) 1.1952 msec/pass
- cET: tostring_utf8 (U-TR T3) 22.0058 msec/pass
+ lxe: tostring_utf8 (U-TR T3) 1.2085 msec/pass
+ cET: tostring_utf8 (U-TR T3) 9.0246 msec/pass
The difference is somewhat smaller for plain text serialisation::
- lxe: tostring_text_ascii (S-TR T1) 2.7738 msec/pass
- cET: tostring_text_ascii (S-TR T1) 4.7629 msec/pass
+ lxe: tostring_text_ascii (S-TR T1) 2.6727 msec/pass
+ cET: tostring_text_ascii (S-TR T1) 2.9683 msec/pass
- lxe: tostring_text_ascii (S-TR T3) 0.8273 msec/pass
- cET: tostring_text_ascii (S-TR T3) 1.5273 msec/pass
+ lxe: tostring_text_ascii (S-TR T3) 0.6952 msec/pass
+ cET: tostring_text_ascii (S-TR T3) 1.0073 msec/pass
- lxe: tostring_text_utf16 (S-TR T1) 2.7659 msec/pass
- cET: tostring_text_utf16 (S-TR T1) 10.5038 msec/pass
+ lxe: tostring_text_utf16 (S-TR T1) 2.7366 msec/pass
+ cET: tostring_text_utf16 (S-TR T1) 7.3647 msec/pass
- lxe: tostring_text_utf16 (U-TR T1) 2.8017 msec/pass
- cET: tostring_text_utf16 (U-TR T1) 10.5207 msec/pass
+ lxe: tostring_text_utf16 (U-TR T1) 3.0322 msec/pass
+ cET: tostring_text_utf16 (U-TR T1) 7.5922 msec/pass
The ``tostring()`` function also supports serialisation to a Python
unicode string object, which is currently faster in ElementTree
-under CPython 3.3::
+under CPython 3.8::
- lxe: tostring_text_unicode (S-TR T1) 2.6896 msec/pass
- cET: tostring_text_unicode (S-TR T1) 1.0056 msec/pass
+ lxe: tostring_text_unicode (S-TR T1) 2.7645 msec/pass
+ cET: tostring_text_unicode (S-TR T1) 1.1806 msec/pass
- lxe: tostring_text_unicode (U-TR T1) 2.7366 msec/pass
- cET: tostring_text_unicode (U-TR T1) 1.0154 msec/pass
+ lxe: tostring_text_unicode (U-TR T1) 2.9871 msec/pass
+ cET: tostring_text_unicode (U-TR T1) 1.1659 msec/pass
- lxe: tostring_text_unicode (S-TR T3) 0.7997 msec/pass
- cET: tostring_text_unicode (S-TR T3) 0.3154 msec/pass
+ lxe: tostring_text_unicode (S-TR T3) 0.7446 msec/pass
+ cET: tostring_text_unicode (S-TR T3) 0.4532 msec/pass
lxe: tostring_text_unicode (U-TR T4) 0.0048 msec/pass
- cET: tostring_text_unicode (U-TR T4) 0.0160 msec/pass
+ cET: tostring_text_unicode (U-TR T4) 0.0134 msec/pass
For parsing, lxml.etree and cElementTree compete for the medal.
Depending on the input, either of the two can be faster. The (c)ET
@@ -192,37 +185,37 @@ libraries use a very thin layer on top of the expat parser, which is
known to be very fast. Here are some timings from the benchmarking
suite::
- lxe: parse_bytesIO (SAXR T1) 13.0246 msec/pass
- cET: parse_bytesIO (SAXR T1) 8.2929 msec/pass
+ lxe: parse_bytesIO (SAXR T1) 14.2074 msec/pass
+ cET: parse_bytesIO (SAXR T1) 7.9336 msec/pass
- lxe: parse_bytesIO (S-XR T3) 1.3542 msec/pass
- cET: parse_bytesIO (S-XR T3) 2.4023 msec/pass
+ lxe: parse_bytesIO (S-XR T3) 1.4477 msec/pass
+ cET: parse_bytesIO (S-XR T3) 2.1925 msec/pass
- lxe: parse_bytesIO (UAXR T3) 7.5610 msec/pass
- cET: parse_bytesIO (UAXR T3) 11.2455 msec/pass
+ lxe: parse_bytesIO (UAXR T3) 8.4128 msec/pass
+ cET: parse_bytesIO (UAXR T3) 12.2926 msec/pass
And another couple of timings `from a benchmark`_ that Fredrik Lundh
`used to promote cElementTree`_, comparing a number of different
parsers. First, parsing a 274KB XML file containing Shakespeare's
Hamlet::
- xml.etree.ElementTree.parse done in 0.017 seconds
+ xml.etree.ElementTree.parse done in 0.006 seconds
xml.etree.cElementTree.parse done in 0.007 seconds
- xml.etree.cElementTree.XMLParser.feed(): 6636 nodes read in 0.007 seconds
- lxml.etree.parse done in 0.003 seconds
- drop_whitespace.parse done in 0.003 seconds
+ xml.etree.cElementTree.XMLParser.feed(): 6636 nodes read in 0.006 seconds
+ lxml.etree.parse done in 0.004 seconds
+ drop_whitespace.parse done in 0.004 seconds
lxml.etree.XMLParser.feed(): 6636 nodes read in 0.004 seconds
- minidom tree read in 0.080 seconds
+ minidom tree read in 0.066 seconds
And a 3.4MB XML file containing the Old Testament::
- xml.etree.ElementTree.parse done in 0.038 seconds
- xml.etree.cElementTree.parse done in 0.030 seconds
- xml.etree.cElementTree.XMLParser.feed(): 25317 nodes read in 0.030 seconds
- lxml.etree.parse done in 0.016 seconds
- drop_whitespace.parse done in 0.015 seconds
- lxml.etree.XMLParser.feed(): 25317 nodes read in 0.022 seconds
- minidom tree read in 0.288 seconds
+ xml.etree.ElementTree.parse done in 0.037 seconds
+ xml.etree.cElementTree.parse done in 0.036 seconds
+ xml.etree.cElementTree.XMLParser.feed(): 25317 nodes read in 0.036 seconds
+ lxml.etree.parse done in 0.025 seconds
+ drop_whitespace.parse done in 0.022 seconds
+ lxml.etree.XMLParser.feed(): 25317 nodes read in 0.026 seconds
+ minidom tree read in 0.194 seconds
.. _`from a benchmark`: http://svn.effbot.org/public/elementtree-1.3/benchmark.py
.. _`used to promote cElementTree`: http://effbot.org/zone/celementtree.htm#benchmarks
@@ -232,43 +225,42 @@ of the process in KB before and after parsing (using os.fork() to
make sure we start from a clean state each time). For the 274KB
hamlet.xml file::
- Memory usage: 7284
- xml.etree.ElementTree.parse done in 0.017 seconds
- Memory usage: 9432 (+2148)
+ Memory usage: 9256
+ xml.etree.ElementTree.parse done in 0.006 seconds
+ Memory usage: 12764 (+3508)
xml.etree.cElementTree.parse done in 0.007 seconds
- Memory usage: 9432 (+2152)
- xml.etree.cElementTree.XMLParser.feed(): 6636 nodes read in 0.007 seconds
- Memory usage: 9448 (+2164)
- lxml.etree.parse done in 0.003 seconds
- Memory usage: 11032 (+3748)
- drop_whitespace.parse done in 0.003 seconds
- Memory usage: 10224 (+2940)
+ Memory usage: 12764 (+3508)
+ xml.etree.cElementTree.XMLParser.feed(): 6636 nodes read in 0.006 seconds
+ Memory usage: 12720 (+3464)
+ lxml.etree.parse done in 0.004 seconds
+ Memory usage: 15052 (+5796)
+ drop_whitespace.parse done in 0.004 seconds
+ Memory usage: 14040 (+4784)
lxml.etree.XMLParser.feed(): 6636 nodes read in 0.004 seconds
- Memory usage: 11804 (+4520)
- minidom tree read in 0.080 seconds
- Memory usage: 12324 (+5040)
+ Memory usage: 15812 (+6556)
+ minidom tree read in 0.066 seconds
+ Memory usage: 15332 (+6076)
And for the 3.4MB Old Testament XML file::
- Memory usage: 10420
- xml.etree.ElementTree.parse done in 0.038 seconds
- Memory usage: 20660 (+10240)
- xml.etree.cElementTree.parse done in 0.030 seconds
- Memory usage: 20660 (+10240)
- xml.etree.cElementTree.XMLParser.feed(): 25317 nodes read in 0.030 seconds
- Memory usage: 20844 (+10424)
- lxml.etree.parse done in 0.016 seconds
- Memory usage: 27624 (+17204)
- drop_whitespace.parse done in 0.015 seconds
- Memory usage: 24468 (+14052)
- lxml.etree.XMLParser.feed(): 25317 nodes read in 0.022 seconds
- Memory usage: 29844 (+19424)
- minidom tree read in 0.288 seconds
- Memory usage: 28788 (+18368)
+ Memory usage: 12456
+ xml.etree.ElementTree.parse done in 0.037 seconds
+ Memory usage: 23288 (+10832)
+ xml.etree.cElementTree.parse done in 0.036 seconds
+ Memory usage: 23288 (+10832)
+ xml.etree.cElementTree.XMLParser.feed(): 25317 nodes read in 0.036 seconds
+ Memory usage: 23644 (+11220)
+ lxml.etree.parse done in 0.025 seconds
+ Memory usage: 31404 (+18948)
+ drop_whitespace.parse done in 0.022 seconds
+ Memory usage: 28752 (+16296)
+ lxml.etree.XMLParser.feed(): 25317 nodes read in 0.026 seconds
+ Memory usage: 33924 (+21500)
+ minidom tree read in 0.194 seconds
+ Memory usage: 31284 (+18828)
As can be seen from the sizes, both lxml.etree and cElementTree are
-rather memory friendly compared to the pure Python libraries
-ElementTree and (especially) minidom. Comparing to older CPython
+rather memory friendly and fast. Comparing to older CPython
versions, the memory footprint of the minidom library was considerably
reduced in CPython 3.3, by about a factor of 4 in this case.
@@ -277,26 +269,26 @@ rather close to each other, usually within a factor of two, with
winners well distributed over both sides. Similar timings can be
observed for the ``iterparse()`` function::
- lxe: iterparse_bytesIO (SAXR T1) 17.9198 msec/pass
- cET: iterparse_bytesIO (SAXR T1) 14.4982 msec/pass
+ lxe: iterparse_bytesIO (SAXR T1) 20.3598 msec/pass
+ cET: iterparse_bytesIO (SAXR T1) 10.8948 msec/pass
- lxe: iterparse_bytesIO (UAXR T3) 8.8522 msec/pass
- cET: iterparse_bytesIO (UAXR T3) 12.9857 msec/pass
+ lxe: iterparse_bytesIO (UAXR T3) 10.1640 msec/pass
+ cET: iterparse_bytesIO (UAXR T3) 12.9926 msec/pass
However, if you benchmark the complete round-trip of a serialise-parse
cycle, the numbers will look similar to these::
- lxe: write_utf8_parse_bytesIO (S-TR T1) 19.8867 msec/pass
- cET: write_utf8_parse_bytesIO (S-TR T1) 80.7259 msec/pass
+ lxe: write_utf8_parse_bytesIO (S-TR T1) 18.9857 msec/pass
+ cET: write_utf8_parse_bytesIO (S-TR T1) 35.7475 msec/pass
- lxe: write_utf8_parse_bytesIO (UATR T2) 23.7896 msec/pass
- cET: write_utf8_parse_bytesIO (UATR T2) 98.0766 msec/pass
+ lxe: write_utf8_parse_bytesIO (UATR T2) 22.4853 msec/pass
+ cET: write_utf8_parse_bytesIO (UATR T2) 42.6254 msec/pass
- lxe: write_utf8_parse_bytesIO (S-TR T3) 3.0684 msec/pass
- cET: write_utf8_parse_bytesIO (S-TR T3) 24.6122 msec/pass
+ lxe: write_utf8_parse_bytesIO (S-TR T3) 3.3801 msec/pass
+ cET: write_utf8_parse_bytesIO (S-TR T3) 11.2493 msec/pass
- lxe: write_utf8_parse_bytesIO (SATR T4) 0.3495 msec/pass
- cET: write_utf8_parse_bytesIO (SATR T4) 1.9610 msec/pass
+ lxe: write_utf8_parse_bytesIO (SATR T4) 0.4263 msec/pass
+ cET: write_utf8_parse_bytesIO (SATR T4) 1.0326 msec/pass
For applications that require a high parser throughput of large files,
and that do little to no serialization, both cET and lxml.etree are a
@@ -352,14 +344,14 @@ restructuring. This can be seen from the tree setup times of the
benchmark (given in seconds)::
lxe: -- S- U- -A SA UA
- T1: 0.0299 0.0343 0.0344 0.0293 0.0345 0.0342
- T2: 0.0368 0.0423 0.0418 0.0427 0.0474 0.0459
- T3: 0.0088 0.0084 0.0086 0.0251 0.0258 0.0261
- T4: 0.0002 0.0002 0.0002 0.0005 0.0006 0.0006
+ T1: 0.0219 0.0254 0.0257 0.0216 0.0259 0.0259
+ T2: 0.0234 0.0279 0.0283 0.0271 0.0318 0.0307
+ T3: 0.0051 0.0050 0.0058 0.0218 0.0233 0.0231
+ T4: 0.0001 0.0001 0.0001 0.0004 0.0004 0.0004
cET: -- S- U- -A SA UA
- T1: 0.0050 0.0045 0.0093 0.0044 0.0043 0.0043
- T2: 0.0073 0.0075 0.0074 0.0201 0.0075 0.0074
- T3: 0.0033 0.0213 0.0032 0.0034 0.0033 0.0035
+ T1: 0.0035 0.0029 0.0078 0.0031 0.0031 0.0029
+ T2: 0.0047 0.0051 0.0053 0.0046 0.0055 0.0048
+ T3: 0.0016 0.0216 0.0027 0.0021 0.0023 0.0026
T4: 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
The timings are somewhat close to each other, although cET can be
@@ -379,30 +371,30 @@ The same tree overhead makes operations like collecting children as in
a shallow copy of their list of children, lxml has to create a Python
object for each child and collect them in a list::
- lxe: root_list_children (--TR T1) 0.0038 msec/pass
- cET: root_list_children (--TR T1) 0.0010 msec/pass
+ lxe: root_list_children (--TR T1) 0.0036 msec/pass
+ cET: root_list_children (--TR T1) 0.0005 msec/pass
- lxe: root_list_children (--TR T2) 0.0455 msec/pass
- cET: root_list_children (--TR T2) 0.0050 msec/pass
+ lxe: root_list_children (--TR T2) 0.0634 msec/pass
+ cET: root_list_children (--TR T2) 0.0086 msec/pass
This handicap is also visible when accessing single children::
- lxe: first_child (--TR T2) 0.0424 msec/pass
- cET: first_child (--TR T2) 0.0384 msec/pass
+ lxe: first_child (--TR T2) 0.0601 msec/pass
+ cET: first_child (--TR T2) 0.0548 msec/pass
- lxe: last_child (--TR T1) 0.0477 msec/pass
- cET: last_child (--TR T1) 0.0467 msec/pass
+ lxe: last_child (--TR T1) 0.0570 msec/pass
+ cET: last_child (--TR T1) 0.0534 msec/pass
... unless you also add the time to find a child index in a bigger
list. ET and cET use Python lists here, which are based on arrays.
The data structure used by libxml2 is a linked tree, and thus, a
linked list of children::
- lxe: middle_child (--TR T1) 0.0710 msec/pass
- cET: middle_child (--TR T1) 0.0420 msec/pass
+ lxe: middle_child (--TR T1) 0.0892 msec/pass
+ cET: middle_child (--TR T1) 0.0510 msec/pass
- lxe: middle_child (--TR T2) 1.7393 msec/pass
- cET: middle_child (--TR T2) 0.0396 msec/pass
+ lxe: middle_child (--TR T2) 2.3038 msec/pass
+ cET: middle_child (--TR T2) 0.0508 msec/pass
Element creation
@@ -412,18 +404,18 @@ As opposed to ET, libxml2 has a notion of documents that each element must be
in. This results in a major performance difference for creating independent
Elements that end up in independently created documents::
- lxe: create_elements (--TC T2) 1.0045 msec/pass
- cET: create_elements (--TC T2) 0.0753 msec/pass
+ lxe: create_elements (--TC T2) 0.8032 msec/pass
+ cET: create_elements (--TC T2) 0.0675 msec/pass
Therefore, it is always preferable to create Elements for the document they
are supposed to end up in, either as SubElements of an Element or using the
explicit ``Element.makeelement()`` call::
- lxe: makeelement (--TC T2) 1.0586 msec/pass
- cET: makeelement (--TC T2) 0.1483 msec/pass
+ lxe: makeelement (--TC T2) 0.8030 msec/pass
+ cET: makeelement (--TC T2) 0.0625 msec/pass
- lxe: create_subelements (--TC T2) 0.8826 msec/pass
- cET: create_subelements (--TC T2) 0.0827 msec/pass
+ lxe: create_subelements (--TC T2) 0.8621 msec/pass
+ cET: create_subelements (--TC T2) 0.0923 msec/pass
So, if the main performance bottleneck of an application is creating large XML
trees in memory through calls to Element and SubElement, cET is the best
@@ -440,11 +432,11 @@ requires lxml to do recursive adaptations throughout the moved tree structure.
The following benchmark appends all root children of the second tree to the
root of the first tree::
- lxe: append_from_document (--TR T1,T2) 1.0812 msec/pass
- cET: append_from_document (--TR T1,T2) 0.1104 msec/pass
+ lxe: append_from_document (--TR T1,T2) 1.3800 msec/pass
+ cET: append_from_document (--TR T1,T2) 0.0513 msec/pass
- lxe: append_from_document (--TR T3,T4) 0.0155 msec/pass
- cET: append_from_document (--TR T3,T4) 0.0060 msec/pass
+ lxe: append_from_document (--TR T3,T4) 0.0150 msec/pass
+ cET: append_from_document (--TR T3,T4) 0.0026 msec/pass
Although these are fairly small numbers compared to parsing, this easily shows
the different performance classes for lxml and (c)ET. Where the latter do not
@@ -455,19 +447,19 @@ with the size of the tree that is moved.
This difference is not always as visible, but applies to most parts of the
API, like inserting newly created elements::
- lxe: insert_from_document (--TR T1,T2) 3.9763 msec/pass
- cET: insert_from_document (--TR T1,T2) 0.1459 msec/pass
+ lxe: insert_from_document (--TR T1,T2) 5.2345 msec/pass
+ cET: insert_from_document (--TR T1,T2) 0.0732 msec/pass
or replacing the child slice by a newly created element::
- lxe: replace_children_element (--TC T1) 0.0749 msec/pass
- cET: replace_children_element (--TC T1) 0.0081 msec/pass
+ lxe: replace_children_element (--TC T1) 0.0720 msec/pass
+ cET: replace_children_element (--TC T1) 0.0105 msec/pass
as opposed to replacing the slice with an existing element from the
same document::
- lxe: replace_children (--TC T1) 0.0052 msec/pass
- cET: replace_children (--TC T1) 0.0036 msec/pass
+ lxe: replace_children (--TC T1) 0.0060 msec/pass
+ cET: replace_children (--TC T1) 0.0050 msec/pass
While these numbers are too small to provide a major performance
impact in practice, you should keep this difference in mind when you
@@ -481,14 +473,14 @@ deepcopy
Deep copying a tree is fast in lxml::
- lxe: deepcopy_all (--TR T1) 3.1650 msec/pass
- cET: deepcopy_all (--TR T1) 53.9973 msec/pass
+ lxe: deepcopy_all (--TR T1) 4.1246 msec/pass
+ cET: deepcopy_all (--TR T1) 2.5451 msec/pass
- lxe: deepcopy_all (-ATR T2) 3.7365 msec/pass
- cET: deepcopy_all (-ATR T2) 61.6267 msec/pass
+ lxe: deepcopy_all (-ATR T2) 4.7867 msec/pass
+ cET: deepcopy_all (-ATR T2) 2.7504 msec/pass
- lxe: deepcopy_all (S-TR T3) 0.7913 msec/pass
- cET: deepcopy_all (S-TR T3) 13.6220 msec/pass
+ lxe: deepcopy_all (S-TR T3) 1.0097 msec/pass
+ cET: deepcopy_all (S-TR T3) 0.6278 msec/pass
So, for example, if you have a database-like scenario where you parse in a
large tree and then search and copy independent subtrees from it for further
@@ -504,31 +496,31 @@ traversal of the XML tree and especially if few elements are of
interest or the target element tag name is known, the ``.iter()``
method is a good choice::
- lxe: iter_all (--TR T1) 1.0529 msec/pass
- cET: iter_all (--TR T1) 0.2635 msec/pass
+ lxe: iter_all (--TR T1) 1.3661 msec/pass
+ cET: iter_all (--TR T1) 0.2670 msec/pass
- lxe: iter_islice (--TR T2) 0.0110 msec/pass
- cET: iter_islice (--TR T2) 0.0050 msec/pass
+ lxe: iter_islice (--TR T2) 0.0122 msec/pass
+ cET: iter_islice (--TR T2) 0.0033 msec/pass
- lxe: iter_tag (--TR T2) 0.0079 msec/pass
- cET: iter_tag (--TR T2) 0.0112 msec/pass
+ lxe: iter_tag (--TR T2) 0.0098 msec/pass
+ cET: iter_tag (--TR T2) 0.0086 msec/pass
- lxe: iter_tag_all (--TR T2) 0.1822 msec/pass
- cET: iter_tag_all (--TR T2) 0.5343 msec/pass
+ lxe: iter_tag_all (--TR T2) 0.6840 msec/pass
+ cET: iter_tag_all (--TR T2) 0.4323 msec/pass
This translates directly into similar timings for ``Element.findall()``::
- lxe: findall (--TR T2) 1.7176 msec/pass
- cET: findall (--TR T2) 0.9973 msec/pass
+ lxe: findall (--TR T2) 3.9611 msec/pass
+ cET: findall (--TR T2) 0.9227 msec/pass
- lxe: findall (--TR T3) 0.3967 msec/pass
- cET: findall (--TR T3) 0.2525 msec/pass
+ lxe: findall (--TR T3) 0.3989 msec/pass
+ cET: findall (--TR T3) 0.2670 msec/pass
- lxe: findall_tag (--TR T2) 0.2258 msec/pass
- cET: findall_tag (--TR T2) 0.5770 msec/pass
+ lxe: findall_tag (--TR T2) 0.7420 msec/pass
+ cET: findall_tag (--TR T2) 0.4942 msec/pass
- lxe: findall_tag (--TR T3) 0.1085 msec/pass
- cET: findall_tag (--TR T3) 0.1919 msec/pass
+ lxe: findall_tag (--TR T3) 0.1099 msec/pass
+ cET: findall_tag (--TR T3) 0.1748 msec/pass
Note that all three libraries currently use the same Python
implementation for ``.findall()``, except for their native tree
@@ -548,38 +540,38 @@ provides more than one way of accessing it and you should take care which part
of the lxml API you use. The most straight forward way is to call the
``xpath()`` method on an Element or ElementTree::
- lxe: xpath_method (--TC T1) 0.3982 msec/pass
- lxe: xpath_method (--TC T2) 7.8895 msec/pass
- lxe: xpath_method (--TC T3) 0.0477 msec/pass
- lxe: xpath_method (--TC T4) 0.3982 msec/pass
+ lxe: xpath_method (--TC T1) 0.2828 msec/pass
+ lxe: xpath_method (--TC T2) 5.4705 msec/pass
+ lxe: xpath_method (--TC T3) 0.0324 msec/pass
+ lxe: xpath_method (--TC T4) 0.2804 msec/pass
This is well suited for testing and when the XPath expressions are as diverse
as the trees they are called on. However, if you have a single XPath
expression that you want to apply to a larger number of different elements,
the ``XPath`` class is the most efficient way to do it::
- lxe: xpath_class (--TC T1) 0.0713 msec/pass
- lxe: xpath_class (--TC T2) 1.1325 msec/pass
- lxe: xpath_class (--TC T3) 0.0215 msec/pass
- lxe: xpath_class (--TC T4) 0.0722 msec/pass
+ lxe: xpath_class (--TC T1) 0.0570 msec/pass
+ lxe: xpath_class (--TC T2) 0.6924 msec/pass
+ lxe: xpath_class (--TC T3) 0.0148 msec/pass
+ lxe: xpath_class (--TC T4) 0.0446 msec/pass
Note that this still allows you to use variables in the expression, so you can
parse it once and then adapt it through variables at call time. In other
cases, where you have a fixed Element or ElementTree and want to run different
expressions on it, you should consider the ``XPathEvaluator``::
- lxe: xpath_element (--TR T1) 0.1101 msec/pass
- lxe: xpath_element (--TR T2) 2.0473 msec/pass
- lxe: xpath_element (--TR T3) 0.0267 msec/pass
- lxe: xpath_element (--TR T4) 0.1087 msec/pass
+ lxe: xpath_element (--TR T1) 0.0684 msec/pass
+ lxe: xpath_element (--TR T2) 1.0865 msec/pass
+ lxe: xpath_element (--TR T3) 0.0174 msec/pass
+ lxe: xpath_element (--TR T4) 0.0665 msec/pass
While it looks slightly slower, creating an XPath object for each of the
expressions generates a much higher overhead here::
- lxe: xpath_class_repeat (--TC T1 ) 0.3884 msec/pass
- lxe: xpath_class_repeat (--TC T2 ) 7.6182 msec/pass
- lxe: xpath_class_repeat (--TC T3 ) 0.0465 msec/pass
- lxe: xpath_class_repeat (--TC T4 ) 0.3877 msec/pass
+ lxe: xpath_class_repeat (--TC T1 ) 0.2813 msec/pass
+ lxe: xpath_class_repeat (--TC T2 ) 5.4042 msec/pass
+ lxe: xpath_class_repeat (--TC T3 ) 0.0339 msec/pass
+ lxe: xpath_class_repeat (--TC T4 ) 0.2706 msec/pass
Note that tree iteration can be substantially faster than XPath if
your code short-circuits after the first couple of elements were
@@ -589,25 +581,25 @@ regardless of how much of it will actually be used.
Here is an example where only the first matching element is being
searched, a case for which XPath has syntax support as well::
- lxe: find_single (--TR T2) 0.0184 msec/pass
- cET: find_single (--TR T2) 0.0052 msec/pass
+ lxe: find_single (--TR T2) 0.0031 msec/pass
+ cET: find_single (--TR T2) 0.0026 msec/pass
- lxe: iter_single (--TR T2) 0.0024 msec/pass
- cET: iter_single (--TR T2) 0.0007 msec/pass
+ lxe: iter_single (--TR T2) 0.0019 msec/pass
+ cET: iter_single (--TR T2) 0.0002 msec/pass
- lxe: xpath_single (--TR T2) 0.0033 msec/pass
+ lxe: xpath_single (--TR T2) 0.0861 msec/pass
When looking for the first two elements out of many, the numbers
explode for XPath, as restricting the result subset requires a
more complex expression::
- lxe: iterfind_two (--TR T2) 0.0184 msec/pass
- cET: iterfind_two (--TR T2) 0.0062 msec/pass
+ lxe: iterfind_two (--TR T2) 0.0050 msec/pass
+ cET: iterfind_two (--TR T2) 0.0036 msec/pass
- lxe: iter_two (--TR T2) 0.0029 msec/pass
- cET: iter_two (--TR T2) 0.0017 msec/pass
+ lxe: iter_two (--TR T2) 0.0021 msec/pass
+ cET: iter_two (--TR T2) 0.0014 msec/pass
- lxe: xpath_two (--TR T2) 0.2768 msec/pass
+ lxe: xpath_two (--TR T2) 0.0916 msec/pass
A longer example
@@ -774,21 +766,21 @@ ObjectPath can be used to speed up the access to elements that are deep in the
tree. It avoids step-by-step Python element instantiations along the path,
which can substantially improve the access time::
- lxe: attribute (--TR T1) 4.1828 msec/pass
- lxe: attribute (--TR T2) 17.3802 msec/pass
- lxe: attribute (--TR T4) 3.8657 msec/pass
+ lxe: attribute (--TR T1) 2.4018 msec/pass
+ lxe: attribute (--TR T2) 16.3755 msec/pass
+ lxe: attribute (--TR T4) 2.3725 msec/pass
- lxe: objectpath (--TR T1) 0.9289 msec/pass
- lxe: objectpath (--TR T2) 13.3109 msec/pass
- lxe: objectpath (--TR T4) 0.9289 msec/pass
+ lxe: objectpath (--TR T1) 1.1816 msec/pass
+ lxe: objectpath (--TR T2) 14.4675 msec/pass
+ lxe: objectpath (--TR T4) 1.2276 msec/pass
- lxe: attributes_deep (--TR T1) 6.2900 msec/pass
- lxe: attributes_deep (--TR T2) 20.4713 msec/pass
- lxe: attributes_deep (--TR T4) 6.1679 msec/pass
+ lxe: attributes_deep (--TR T1) 3.7086 msec/pass
+ lxe: attributes_deep (--TR T2) 17.5436 msec/pass
+ lxe: attributes_deep (--TR T4) 3.8407 msec/pass
- lxe: objectpath_deep (--TR T1) 1.3049 msec/pass
- lxe: objectpath_deep (--TR T2) 14.0815 msec/pass
- lxe: objectpath_deep (--TR T4) 1.3051 msec/pass
+ lxe: objectpath_deep (--TR T1) 1.4980 msec/pass
+ lxe: objectpath_deep (--TR T2) 14.7266 msec/pass
+ lxe: objectpath_deep (--TR T4) 1.4834 msec/pass
Note, however, that parsing ObjectPath expressions is not for free either, so
this is most effective for frequently accessing the same element.
@@ -818,17 +810,17 @@ expressions to be more selective. By choosing the right trees (or even
subtrees and elements) to cache, you can trade memory usage against access
speed::
- lxe: attribute_cached (--TR T1) 3.1357 msec/pass
- lxe: attribute_cached (--TR T2) 15.8911 msec/pass
- lxe: attribute_cached (--TR T4) 2.9194 msec/pass
+ lxe: attribute_cached (--TR T1) 1.9207 msec/pass
+ lxe: attribute_cached (--TR T2) 15.6903 msec/pass
+ lxe: attribute_cached (--TR T4) 1.8718 msec/pass
- lxe: attributes_deep_cached (--TR T1) 3.8984 msec/pass
- lxe: attributes_deep_cached (--TR T2) 16.8300 msec/pass
- lxe: attributes_deep_cached (--TR T4) 3.6936 msec/pass
+ lxe: attributes_deep_cached (--TR T1) 2.6512 msec/pass
+ lxe: attributes_deep_cached (--TR T2) 16.7937 msec/pass
+ lxe: attributes_deep_cached (--TR T4) 2.5539 msec/pass
- lxe: objectpath_deep_cached (--TR T1) 0.7496 msec/pass
- lxe: objectpath_deep_cached (--TR T2) 12.3763 msec/pass
- lxe: objectpath_deep_cached (--TR T4) 0.7427 msec/pass
+ lxe: objectpath_deep_cached (--TR T1) 0.8519 msec/pass
+ lxe: objectpath_deep_cached (--TR T2) 13.9337 msec/pass
+ lxe: objectpath_deep_cached (--TR T4) 0.8645 msec/pass
Things to note: you cannot currently use ``weakref.WeakKeyDictionary`` objects
for this as lxml's element objects do not support weak references (which are
diff --git a/doc/update_performance_results.py b/doc/update_performance_results.py
new file mode 100644
index 000000000..cf0f45bbc
--- /dev/null
+++ b/doc/update_performance_results.py
@@ -0,0 +1,58 @@
+import operator
+import re
+
+_parse_result_line = re.compile(
+ "\s*(?P\w+):\s*(?P\w+)\s+\((?P[-\w]+\s[\w,]+)\s*\)\s+(?P