diff --git a/.github/workflows/cve_scan_runner.yml b/.github/workflows/cve_scan_runner.yml new file mode 100644 index 00000000000..09c779d9c1a --- /dev/null +++ b/.github/workflows/cve_scan_runner.yml @@ -0,0 +1,36 @@ +name: cve-scan-runner + +on: + workflow_dispatch: + + schedule: + - cron: "17 23 * * *" + +env: + # To see the script output in real time + PYTHONUNBUFFERED: 1 + +jobs: + scan-and-open-issues: + runs-on: ubuntu-20.04 + + steps: + - name: Clone the osquery repository + uses: actions/checkout@v3 + + - name: Install python pre-requisites + run: | + pip3 install -r ./tools/ci/scripts/cve/requirements.txt + + - name: Scan CVEs and open issues + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NIST_API_KEY: ${{ secrets.NVD_API_KEY }} + + run: | + ./tools/ci/scripts/cve/validate_manifest_libraries_versions.py --manifest libraries/third_party_libraries_manifest.json \ + --repository . + + ./tools/ci/scripts/cve/third_party_libraries_cves_scanner.py --manifest libraries/third_party_libraries_manifest.json \ + --create_issues \ + --debug diff --git a/.github/workflows/hosted_runners.yml b/.github/workflows/hosted_runners.yml index 18deb8f2ed1..b6839f63e18 100644 --- a/.github/workflows/hosted_runners.yml +++ b/.github/workflows/hosted_runners.yml @@ -75,8 +75,8 @@ jobs: mkdir -p "${rel_build_path}" ln -sf "$(pwd)" "${rel_source_path}" - echo ::set-output name=SOURCE::$(realpath ${rel_source_path}) - echo ::set-output name=BINARY::$(realpath ${rel_build_path}) + echo "SOURCE=$(realpath ${rel_source_path})" >> $GITHUB_OUTPUT + echo "BINARY=$(realpath ${rel_build_path})" >> $GITHUB_OUTPUT - name: Configure the project working-directory: ${{ steps.build_paths.outputs.BINARY }} @@ -96,10 +96,30 @@ jobs: + # This jobs checks that the third party libraries manifest has the correct format + # and that is up to date compared to the current state of the repository + check_libraries_manifest: + runs-on: ubuntu-20.04 + + steps: + - name: Clone the osquery repository + uses: actions/checkout@v3 + + - name: Install python pre-requisites + run: | + pip3 install -r ./tools/ci/scripts/cve/requirements.txt + + - name: Verify the third party libraries manifest + run: | + ./tools/ci/scripts/cve/validate_manifest_libraries_versions.py --manifest libraries/third_party_libraries_manifest.json \ + --repository . + + + # This job runs source code analysis tools (currently, just cppcheck) check_source_code: - needs: check_code_style + needs: [check_libraries_manifest, check_libraries_manifest] runs-on: ${{ matrix.os }} @@ -130,9 +150,9 @@ jobs: mv .git "${rel_source_path}" ( cd "${rel_source_path}" && git reset --hard ) - echo ::set-output name=SOURCE::$(realpath ${rel_source_path}) - echo ::set-output name=BINARY::$(realpath ${rel_build_path}) - echo ::set-output name=REL_BINARY::${rel_build_path} + echo "SOURCE=$(realpath ${rel_source_path})" >> $GITHUB_OUTPUT + echo "BINARY=$(realpath ${rel_build_path})" >> $GITHUB_OUTPUT + echo "REL_BINARY=${rel_build_path}" >> $GITHUB_OUTPUT - name: Update the cache (git submodules) uses: actions/cache@v2 @@ -235,16 +255,16 @@ jobs: shell: bash id: build_job_count run: | - echo ::set-output name=VALUE::$(($(nproc) + 1)) + echo "VALUE=$(($(nproc) + 1))" >> $GITHUB_OUTPUT - name: Select the build options for the tests shell: bash id: tests_build_settings run: | if [[ "${{ matrix.build_type }}" == "RelWithDebInfo" ]] ; then - echo ::set-output name=VALUE::OFF + echo "VALUE=OFF" >> $GITHUB_OUTPUT else - echo ::set-output name=VALUE::ON + echo "VALUE=ON" >> $GITHUB_OUTPUT fi # We don't have enough space on the worker to actually generate all @@ -255,9 +275,9 @@ jobs: id: debug_symbols_settings run: | if [[ "${{ matrix.build_type }}" == "Debug" ]] ; then - echo ::set-output name=VALUE::ON + echo "VALUE=ON" >> $GITHUB_OUTPUT else - echo ::set-output name=VALUE::OFF + echo "VALUE=OFF" >> $GITHUB_OUTPUT fi # When we spawn in the container, we are root; create an unprivileged @@ -267,7 +287,7 @@ jobs: id: unprivileged_user run: | useradd -m -s /bin/bash unprivileged_user - echo ::set-output name=NAME::unprivileged_user + echo "NAME=unprivileged_user" >> $GITHUB_OUTPUT # Due to how the RPM packaging tools work, we have to adhere to some # character count requirements in the build path vs source path. @@ -295,13 +315,13 @@ jobs: mv .git "${rel_src_path}" ( cd "${rel_src_path}" && git reset --hard ) - echo ::set-output name=SOURCE::$(realpath ${rel_src_path}) - echo ::set-output name=BINARY::$(realpath ${rel_build_path}) - echo ::set-output name=CCACHE::$(realpath ${rel_ccache_path}) - echo ::set-output name=PACKAGING::$(realpath ${rel_packaging_path}) - echo ::set-output name=PACKAGE_DATA::$(realpath ${rel_package_data_path}) - echo ::set-output name=REL_PACKAGE_BUILD::${rel_package_build_path} - echo ::set-output name=PACKAGE_BUILD::$(realpath ${rel_package_build_path}) + echo "SOURCE=$(realpath ${rel_src_path})" >> $GITHUB_OUTPUT + echo "BINARY=$(realpath ${rel_build_path})" >> $GITHUB_OUTPUT + echo "CCACHE=$(realpath ${rel_ccache_path})" >> $GITHUB_OUTPUT + echo "PACKAGING=$(realpath ${rel_packaging_path})" >> $GITHUB_OUTPUT + echo "PACKAGE_DATA=$(realpath ${rel_package_data_path})" >> $GITHUB_OUTPUT + echo "REL_PACKAGE_BUILD=${rel_package_build_path}" >> $GITHUB_OUTPUT + echo "PACKAGE_BUILD=$(realpath ${rel_package_build_path})" >> $GITHUB_OUTPUT - name: Clone the osquery-packaging repository run: | @@ -435,12 +455,12 @@ jobs: id: packages shell: bash run: | - echo ::set-output name=REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH::${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/package_data.tar.gz - echo ::set-output name=REL_UNSIGNED_RELEASE_DEB_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.deb) - echo ::set-output name=REL_UNSIGNED_DEBUG_DEB_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.ddeb) - echo ::set-output name=REL_UNSIGNED_RELEASE_RPM_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-?.*.rpm) - echo ::set-output name=REL_UNSIGNED_DEBUG_RPM_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-debuginfo-*.rpm) - echo ::set-output name=REL_UNSIGNED_RELEASE_TGZ_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*linux_x86_64.tar.gz) + echo "REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH=${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/package_data.tar.gz" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_DEB_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.deb)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_DEBUG_DEB_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.ddeb)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_RPM_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-?.*.rpm)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_DEBUG_RPM_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-debuginfo-*.rpm)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_TGZ_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*linux_x86_64.tar.gz)" >> $GITHUB_OUTPUT - name: Store the unsigned release package data artifact if: matrix.build_type == 'RelWithDebInfo' @@ -515,7 +535,7 @@ jobs: shell: bash id: build_job_count run: | - echo ::set-output name=VALUE::$(($(sysctl -n hw.logicalcpu) + 1)) + echo "VALUE=$(($(sysctl -n hw.logicalcpu) + 1))" >> $GITHUB_OUTPUT - name: Setup the build paths shell: bash @@ -537,16 +557,16 @@ jobs: ${rel_package_data_path} \ ${rel_package_build_path} - echo ::set-output name=SOURCE::$(pwd)/${rel_src_path} - echo ::set-output name=REL_SOURCE::${rel_src_path} - echo ::set-output name=BINARY::$(pwd)/${rel_build_path} - echo ::set-output name=CCACHE::$(pwd)/${rel_ccache_path} - echo ::set-output name=DOWNLOADS::$(pwd)/${rel_downloads_path} - echo ::set-output name=INSTALL::$(pwd)/${rel_install_path} - echo ::set-output name=PACKAGING::$(pwd)/${rel_packaging_path} - echo ::set-output name=PACKAGE_DATA::$(pwd)/${rel_package_data_path} - echo ::set-output name=REL_PACKAGE_BUILD::${rel_package_build_path} - echo ::set-output name=PACKAGE_BUILD::$(pwd)/${rel_package_build_path} + echo "SOURCE=$(pwd)/${rel_src_path}" >> $GITHUB_OUTPUT + echo "REL_SOURCE=${rel_src_path}" >> $GITHUB_OUTPUT + echo "BINARY=$(pwd)/${rel_build_path}" >> $GITHUB_OUTPUT + echo "CCACHE=$(pwd)/${rel_ccache_path}" >> $GITHUB_OUTPUT + echo "DOWNLOADS=$(pwd)/${rel_downloads_path}" >> $GITHUB_OUTPUT + echo "INSTALL=$(pwd)/${rel_install_path}" >> $GITHUB_OUTPUT + echo "PACKAGING=$(pwd)/${rel_packaging_path}" >> $GITHUB_OUTPUT + echo "PACKAGE_DATA=$(pwd)/${rel_package_data_path}" >> $GITHUB_OUTPUT + echo "REL_PACKAGE_BUILD=${rel_package_build_path}" >> $GITHUB_OUTPUT + echo "PACKAGE_BUILD=$(pwd)/${rel_package_build_path}" >> $GITHUB_OUTPUT - name: Clone the osquery repository uses: actions/checkout@v2 @@ -602,8 +622,11 @@ jobs: gnu-sed - name: Install tests dependencies + id: install_test_deps run: | - pip3 install setuptools \ + python_root="/usr/local/Frameworks/Python.framework/Versions/Current" + + ${python_root}/bin/pip3 install setuptools \ pexpect==3.3 \ psutil \ timeout_decorator \ @@ -611,6 +634,8 @@ jobs: thrift==0.11.0 \ osquery + echo ::set-output name=PYTHON_ROOT::${python_root} + - name: Install CMake shell: bash run: | @@ -624,14 +649,14 @@ jobs: id: xcode_selector run: | xcode_path="/Applications/Xcode_13.0.app/Contents/Developer" - echo ::set-output name=PATH::${path} + echo "PATH=${path}" >> $GITHUB_OUTPUT sudo xcode-select -s "${xcode_path}" if [[ "${{ matrix.architecture }}" == "x86_64" ]] ; then - echo ::set-output name=DEPLOYMENT_TARGET::10.14 + echo "DEPLOYMENT_TARGET=10.14" >> $GITHUB_OUTPUT else - echo ::set-output name=DEPLOYMENT_TARGET::10.15 + echo "DEPLOYMENT_TARGET=10.15" >> $GITHUB_OUTPUT fi # We don't have enough space on the worker to actually generate all @@ -642,9 +667,9 @@ jobs: id: debug_symbols_settings run: | if [[ "${{ matrix.build_type }}" == "Debug" ]] ; then - echo ::set-output name=VALUE::ON + echo "VALUE=ON" >> $GITHUB_OUTPUT else - echo ::set-output name=VALUE::OFF + echo "VALUE=OFF" >> $GITHUB_OUTPUT fi - name: Configure the project @@ -663,6 +688,7 @@ jobs: -DCMAKE_BUILD_TYPE:STRING="${{ matrix.build_type }}" \ -DOSQUERY_BUILD_TESTS=ON \ -DOSQUERY_NO_DEBUG_SYMBOLS=${{ steps.debug_symbols_settings.outputs.VALUE }} \ + -DPython3_ROOT_DIR=${{ steps.install_test_deps.outputs.PYTHON_ROOT }} \ ${{ steps.build_paths.outputs.SOURCE }} - name: Build the project @@ -705,7 +731,7 @@ jobs: id: packages shell: bash run: | - echo ::set-output name=REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/package_data.tar.gz) + echo "REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/package_data.tar.gz)" >> $GITHUB_OUTPUT - name: Store the ${{ matrix.architecture }} unsigned release package data artifact if: matrix.build_type == 'Release' @@ -761,7 +787,8 @@ jobs: - name: Install tests dependencies run: | - pip3 install setuptools \ + python_root="/usr/local/Frameworks/Python.framework/Versions/Current" + ${python_root}/pip3 install setuptools \ pexpect==3.3 \ psutil \ timeout_decorator \ @@ -870,8 +897,8 @@ jobs: id: packages shell: bash run: | - echo ::set-output name=REL_UNSIGNED_RELEASE_PKG_PATH::$(ls package_build/*.pkg) - echo ::set-output name=REL_UNSIGNED_RELEASE_TGZ_PATH::$(ls package_build/*.tar.gz) + echo "REL_UNSIGNED_RELEASE_PKG_PATH=$(ls package_build/*.pkg)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_TGZ_PATH=$(ls package_build/*.tar.gz)" >> $GITHUB_OUTPUT - name: Store the PKG unsigned release packages uses: actions/upload-artifact@v1 @@ -911,8 +938,8 @@ jobs: $rel_sccache_path = "w\sccache" $rel_downloads_path = "w\downloads" $rel_install_path = "w\install" - $rel_package_data_path="w\package_data" - $rel_packaging_path="w\osquery-packaging" + $rel_package_data_path = "w\package_data" + $rel_packaging_path = "w\osquery-packaging" New-Item -ItemType Directory -Force -Path $rel_build_path New-Item -ItemType Directory -Force -Path $rel_sccache_path @@ -922,14 +949,14 @@ jobs: $base_dir = (Get-Item .).FullName - echo "::set-output name=SOURCE::$base_dir\$rel_src_path" - echo "::set-output name=REL_SOURCE::$rel_src_path" - echo "::set-output name=BINARY::$base_dir\$rel_build_path" - echo "::set-output name=SCCACHE::$base_dir\$rel_sccache_path" - echo "::set-output name=DOWNLOADS::$base_dir\$rel_downloads_path" - echo "::set-output name=INSTALL::$base_dir\$rel_install_path" - echo "::set-output name=PACKAGING::$base_dir\$rel_packaging_path" - echo "::set-output name=PACKAGE_DATA::$base_dir\$rel_package_data_path" + echo "SOURCE=$base_dir\$rel_src_path" >> $env:GITHUB_OUTPUT + echo "REL_SOURCE=$rel_src_path" >> $env:GITHUB_OUTPUT + echo "BINARY=$base_dir\$rel_build_path" >> $env:GITHUB_OUTPUT + echo "SCCACHE=$base_dir\$rel_sccache_path" >> $env:GITHUB_OUTPUT + echo "DOWNLOADS=$base_dir\$rel_downloads_path" >> $env:GITHUB_OUTPUT + echo "INSTALL=$base_dir\$rel_install_path" >> $env:GITHUB_OUTPUT + echo "PACKAGING=$base_dir\$rel_packaging_path" >> $env:GITHUB_OUTPUT + echo "PACKAGE_DATA=$base_dir\$rel_package_data_path" >> $env:GITHUB_OUTPUT # Symbolic links are supported by default on Linux and macOS. On # Windows, we have to enable them explicitly. They are used to @@ -953,7 +980,7 @@ jobs: cd ${{ steps.build_paths.outputs.SOURCE }} $osquery_version=$(git describe --tags --abbrev=0) - echo "::set-output name=VALUE::$osquery_version" + echo "VALUE=$osquery_version" >> $env:GITHUB_OUTPUT - name: Clone the osquery-packaging repository run: | @@ -1005,7 +1032,7 @@ jobs: $python_executable_path = $(Get-Command python.exe | Select-Object -ExpandProperty Definition) $python_root_directory = (Get-Item $python_executable_path).Directory.FullName - echo "::set-output name=VALUE::$python_root_directory" + echo "VALUE=$python_root_directory" >> $env:GITHUB_OUTPUT # Install the Python dependencies needed for our testing framework - name: Install tests prerequisites @@ -1130,7 +1157,7 @@ jobs: echo "Found compiler version $version" - echo "::set-output name=COMPILER_VERSION::$version" + echo "COMPILER_VERSION=$version" >> $env:GITHUB_OUTPUT - name: Update the cache (sccache) uses: actions/cache@v2 @@ -1225,9 +1252,9 @@ jobs: id: packages shell: bash run: | - echo ::set-output name=REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH::$(ls *.zip) - echo ::set-output name=REL_UNSIGNED_RELEASE_MSI_PATH::$(ls *.msi) - echo ::set-output name=REL_UNSIGNED_RELEASE_NUPKG_PATH::$(ls *.nupkg) + echo "REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH=$(ls *.zip)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_MSI_PATH=$(ls *.msi)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_NUPKG_PATH=$(ls *.nupkg)" >> $GITHUB_OUTPUT - name: Store the unsigned release package data artifact uses: actions/upload-artifact@v1 diff --git a/.github/workflows/self_hosted_runners.yml b/.github/workflows/self_hosted_runners.yml index beaf29147d1..4ce11997af7 100644 --- a/.github/workflows/self_hosted_runners.yml +++ b/.github/workflows/self_hosted_runners.yml @@ -71,8 +71,8 @@ jobs: mkdir -p "${rel_build_path}" ln -sf "$(pwd)" "${rel_source_path}" - echo ::set-output name=SOURCE::$(realpath ${rel_source_path}) - echo ::set-output name=BINARY::$(realpath ${rel_build_path}) + echo "SOURCE=$(realpath ${rel_source_path})" >> $GITHUB_OUTPUT + echo "BINARY=$(realpath ${rel_build_path})" >> $GITHUB_OUTPUT - name: Configure the project working-directory: ${{ steps.build_paths.outputs.BINARY }} @@ -179,7 +179,7 @@ jobs: shell: bash id: build_job_count run: | - echo ::set-output name=VALUE::$(($(nproc) + 1)) + echo "VALUE=$(($(nproc) + 1))" >> $GITHUB_OUTPUT # We don't have enough space on the worker to actually generate all # the debug symbols (osquery + dependencies), so we have a flag to @@ -189,9 +189,9 @@ jobs: id: debug_symbols_settings run: | if [[ "${{ matrix.build_type }}" == "Debug" ]] ; then - echo ::set-output name=VALUE::ON + echo "VALUE=ON" >> $GITHUB_OUTPUT else - echo ::set-output name=VALUE::OFF + echo "VALUE=OFF" >> $GITHUB_OUTPUT fi # When we spawn in the container, we are root; create an unprivileged @@ -200,7 +200,7 @@ jobs: id: unprivileged_user run: | useradd -m -s /bin/bash unprivileged_user - echo ::set-output name=NAME::unprivileged_user + echo "NAME=unprivileged_user" >> $GITHUB_OUTPUT # Due to how the RPM packaging tools work, we have to adhere to some # character count requirements in the build path vs source path. @@ -228,13 +228,13 @@ jobs: mv .git "${rel_src_path}" ( cd "${rel_src_path}" && git reset --hard ) - echo ::set-output name=SOURCE::$(realpath ${rel_src_path}) - echo ::set-output name=BINARY::$(realpath ${rel_build_path}) - echo ::set-output name=CCACHE::$(realpath ${rel_ccache_path}) - echo ::set-output name=PACKAGING::$(realpath ${rel_packaging_path}) - echo ::set-output name=PACKAGE_DATA::$(realpath ${rel_package_data_path}) - echo ::set-output name=REL_PACKAGE_BUILD::${rel_package_build_path} - echo ::set-output name=PACKAGE_BUILD::$(realpath ${rel_package_build_path}) + echo "SOURCE=$(realpath ${rel_src_path})" >> $GITHUB_OUTPUT + echo "BINARY=$(realpath ${rel_build_path})" >> $GITHUB_OUTPUT + echo "CCACHE=$(realpath ${rel_ccache_path})" >> $GITHUB_OUTPUT + echo "PACKAGING=$(realpath ${rel_packaging_path})" >> $GITHUB_OUTPUT + echo "PACKAGE_DATA=$(realpath ${rel_package_data_path})" >> $GITHUB_OUTPUT + echo "REL_PACKAGE_BUILD=${rel_package_build_path}" >> $GITHUB_OUTPUT + echo "PACKAGE_BUILD=$(realpath ${rel_package_build_path})" >> $GITHUB_OUTPUT - name: Clone the osquery-packaging repository run: | @@ -364,12 +364,12 @@ jobs: id: packages shell: bash run: | - echo ::set-output name=REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH::${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/package_data.tar.gz - echo ::set-output name=REL_UNSIGNED_RELEASE_DEB_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.deb) - echo ::set-output name=REL_UNSIGNED_DEBUG_DEB_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.ddeb) - echo ::set-output name=REL_UNSIGNED_RELEASE_RPM_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-?.*.rpm) - echo ::set-output name=REL_UNSIGNED_DEBUG_RPM_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-debuginfo-*.rpm) - echo ::set-output name=REL_UNSIGNED_RELEASE_TGZ_PATH::$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*linux_aarch64.tar.gz) + echo "REL_UNSIGNED_RELEASE_PACKAGE_DATA_PATH=${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/package_data.tar.gz" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_DEB_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.deb)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_DEBUG_DEB_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*.ddeb)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_RPM_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-?.*.rpm)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_DEBUG_RPM_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/osquery-debuginfo-*.rpm)" >> $GITHUB_OUTPUT + echo "REL_UNSIGNED_RELEASE_TGZ_PATH=$(ls ${{ steps.build_paths.outputs.REL_PACKAGE_BUILD }}/*linux_aarch64.tar.gz)" >> $GITHUB_OUTPUT - name: Store the unsigned release package data artifact if: matrix.build_type == 'RelWithDebInfo' diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cc9e7cf6fa..558378b76ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,48 @@ # osquery Changelog + +## [5.6.0](https://github.com/osquery/osquery/releases/tag/5.6.0) + +[Git Commits](https://github.com/osquery/osquery/compare/5.5.1...5.6.0) + +Representing commits from 10 contributors! Thank you all. + +### Table Changes + +- Add `firmware_type` column to `platform_info` on macOS ([#7727](https://github.com/osquery/osquery/pull/7727)) +- Add additional vendor support for the windows `wmi_bios_info` table ([#7631](https://github.com/osquery/osquery/pull/7631)) +- Fix `docker_container_processes` on macOS ([#7746](https://github.com/osquery/osquery/pull/7746)) +- Fix `process_file_events` subscriber being incorrectly initialized ([#7759](https://github.com/osquery/osquery/pull/7759)) +- Fix `secureboot` on windows by acquire the necessary process privileges ([#7743](https://github.com/osquery/osquery/pull/7743)) +- Improve macOS `mdfind` -- Reduce table overhead and support interruption ([#7738](https://github.com/osquery/osquery/pull/7738)) +- Remove `binary` column from `firefox_addons` table ([#7735](https://github.com/osquery/osquery/pull/7735)) +- Remove `is_running` column from macOS `running_apps` table ([#7774](https://github.com/osquery/osquery/pull/7774)) + +### Under the Hood improvements + +- Add `notes` field to the schema and associated json ([#7747](https://github.com/osquery/osquery/pull/7747)) +- Add extended platforms to the schema and associated json ([#7760](https://github.com/osquery/osquery/pull/7760)) +- Fix a leak and improve users and groups APIs on Windows ([#7755](https://github.com/osquery/osquery/pull/7755)) +- Have `--tls_dump` output body to `stderr` ([#7715](https://github.com/osquery/osquery/pull/7715)) +- Improvements to osquery AWS logic ([#7714](https://github.com/osquery/osquery/pull/7714)) +- Remove leftover FreeBSD related code and documentation ([#7739](https://github.com/osquery/osquery/pull/7739)) + +### Documentation + +- CHANGELOG 5.5.1 ([#7737](https://github.com/osquery/osquery/pull/7737)) +- Correct the description on how to configure and use Yara signature urls ([#7769](https://github.com/osquery/osquery/pull/7769)) +- Document difference between `yara` and `yara_events` ([#7744](https://github.com/osquery/osquery/pull/7744)) +- Link to the slack archives ([#7786](https://github.com/osquery/osquery/pull/7786)) +- Update docs: `_changes` tables are not evented ([#7762](https://github.com/osquery/osquery/pull/7762)) + +### Build + +- Delete temporary CTest files ([#7782](https://github.com/osquery/osquery/pull/7782)) +- Fix table tests for macOS `running_apps` ([#7775](https://github.com/osquery/osquery/pull/7775)) +- Fix table tests for windows `platform_info` ([#7742](https://github.com/osquery/osquery/pull/7742)) +- Migrate jobs from ubuntu-18.04 to ubuntu-20.04 ([#7745](https://github.com/osquery/osquery/pull/7745)) +- Remove unused find_packages modules and submodule ([#7771](https://github.com/osquery/osquery/pull/7771)) + ## [5.5.1](https://github.com/osquery/osquery/releases/tag/5.5.1) diff --git a/docs/wiki/deployment/yara.md b/docs/wiki/deployment/yara.md index 683917ae00a..c6e3f55b43a 100644 --- a/docs/wiki/deployment/yara.md +++ b/docs/wiki/deployment/yara.md @@ -1,5 +1,7 @@ # YARA-based scanning with osquery +YARA is a tool that allows you to find textual or binary patterns inside of files. + There are two YARA-related tables in osquery, which serve very different purposes. The first table, called `yara_events`, uses osquery's [Events framework](../development/pubsub-framework.md) to monitor for filesystem changes and will execute YARA when a file change event fires. The second table, just called `yara`, is a table for performing an @@ -8,6 +10,8 @@ on-demand YARA scan. In this document, "signature file" is intended to be synonymous with "YARA rule file" (plain-text files commonly distributed with a `.yar` or `.yara` filename extension, although any extension is allowed). +For more information about YARA, check out the [documentation](https://yara.readthedocs.io/en/stable/). + ## YARA Configuration The configuration for osquery is simple. Here is an example config, grouping some YARA rule files from the local diff --git a/docs/wiki/development/cve-scan.md b/docs/wiki/development/cve-scan.md new file mode 100644 index 00000000000..a5c2cf1af67 --- /dev/null +++ b/docs/wiki/development/cve-scan.md @@ -0,0 +1,97 @@ +# CVE Scan + +The osquery project has a CI job which once a day scans for CVEs that are present and not yet addressed in its third party libraries. +The scan is done by a python script at `tools/ci/scripts/cve/third_party_libraries_cves_scanner.py`, which uses the NIST database queried via their NVD APIs; a manifest file at `libraries/third_party_libraries_manifest.json` contains the list of third party libraries and their metadata necessary to correctly download the CVEs. + +The manifest file format is validated everytime the CVEs download happen and on every PR by the script `tools/ci/scripts/cve/validate_maninfest_libraries_scanner.py`; additionally the third party library versions in the manifest are verified, to ensure that they are up to date with their state in the repository. + +After having downloaded the list of CVEs, the script will open issues in the osquery repository, for all the unresolved CVEs, checking against the already opened ones to prevent duplicates, and it will go back in time up to 6 months old. +The issues can be recognized because they will be opened by the `github-bot` author and will have the `security`, `libraries`, `cve` and `severity-` labels on them. + +NOTE: This product uses the NVD API but is not endorsed or certified by the NVD. + +# Updating a third party library to resolve a CVE + +The process of updating a third party library is the usual, but in the PR updating the library the contributor MUST: + + 1. Link the CVE(s) issue(s) the PR is going to close, so that when it's merged, they are automatically closed + 2. Update the manifest and specifically the `version` and `commit` fields with the information of the new library. + Remember that the `commit` has to be the one of the submodule in the osquery repository, which might not always match the commit of the library original repository. + +Failing to do step 1. only leads to having to manually close those issue and link them back to the PR for tracking purposes. + +Failing to do step 2. will lead to the PR not being mergeable because the CI checks the `commit` field against the actual git submodule commit. +Note that if the `commit` is updated but not the `version`, this will not be detected by the CI and the periodic scan will use the incorrect +version to download CVEs, finding again the fixed CVE and reopening the issue. +If this happens, one just needs to do another PR that updates the `version` correctly. + +Any other situation where the `version` is incorrect (older than what previously was or newer than what actually is) is still not detected, +and will either cause the script to open issues for already fixed CVEs or to miss CVEs, so it's very important that the PR review process +double checks the new `version`. + +Important: Do not merge this kind of PR if the CI CVE scan job is running (which happens only once a day between 23:00 and 00:00 UTC), otherwise the job could start with an old view of the repository and open new issues on already fixed CVEs. +If this happens, we just need to close or even delete those issues, but it's mostly to avoid additional work or confusions. + +# Ignoring a CVE not affecting osquery + +There are cases where the API returns CVEs that are not affecting a third party library, not directly, but they are affecting other software that uses the third party library. There might be something we can do in the future to resolve what seems a bug in the API, but for now in the manifest it's possible to list CVEs that should be ignored, so that issues for those are not opened again in the future. + +Additionally we often have the case where a CVE is not affecting osquery due to how or what parts of the third party library are used, so having a way to ignore a CVE helps with that too. + +The process therefore is to: + + 1. Open a PR which updates the manifest and specifically updates the `ignored-cves` field of the library the CVE comes from. + 2. Describe in the CVE(s) issue(s) the reason why they are going to be closed + 3. Link the above issues to the PR, so that they are closed when the PR gets merged + +Important: As with updating a library, one has to ensure that the CI CVE scan job is not running + +# Adding a new library + +When a new library gets added, the manifest needs to be updated too, otherwise the CI check that verifies the manifest in the PR will fail. + +Currently the JSON format for a third party library as a submodule (taking as an example `libdpkg`) is: + +```json +"libdpkg": { + "product": "dpkg", + "vendor": "debian", + "version": "1.21.7", + "commit": "e61f582015a9c67bbb3791cb93a864cfeb9c7151", + "ignored-cves": [] +}, +``` + +The name of the library, `libdpkg`, and the `commit` field must match the name of the folder containing the submodule source code folder, and the commit at which the submodule currently is, respectively. + +The `product`, `vendor` and `version` fields are used in the NVD APIs instead, and they must match what the NIST database uses. +This is a matter of using the CPE search at https://nvd.nist.gov/products/cpe/search, and trying to find which are the correct `product` and `vendor` +using a cpe like `cpe:2.3:a:**:*:*:*:*:*:*:*:*:*` and for the product `cpe:2.3:a::**:*:*:*:*:*:*:*:*`. + +Another way could be to download the full dictionary from https://nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz, then playing with grep and awk. +For instance if we want to see all the unique products that the `amazon` vendor has: +```sh +cat official-cpe-dictionary_v2.3.xml | grep -Eo "cpe:2.3:a:amazon:[^\"]*" | awk -F ":" '{ print $5 }' | sort | uniq | less +``` + +For the `ignored-cves` refer to [Ignoring a CVE not affecting osquery](#ignoring-a-cve-not-affecting-osquery); the field will likely be empty at the beginning. + +## Special cases + +### Libraries without a CPE + +Some libraries do not have a CPE assigned, so no CVEs will be found in the NIST database. We track these libraries in the manifest, because the validation script will check for it, but one can provide less fields; only `vendor` and `commit` are required. + +The script though also needs to know that this is a library of that kind, so the contributor has to update the list of libraries that does not have a CPE in `tools/scripts/ci/cve/osquery/manifest_api.py` + +### Libraries not imported as a submodule + +Right now there's only one case and ideally, the only, but `openssl` is not a submodule, so there's no `commit` to use to check if the manifest is up to date. `version` is used instead, and it's parsed from the CMake file at `libraries/cmake/formula/openssl/CMakeLists.txt`. +If it will ever happen that osquery needs to add another library of this kind, then logic to get its version should be written in the `tools/scripts/ci/cve/validate_manifest_libraries_versions.py` script. + +Additionally the name of the library should be added in `tools/scripts/ci/cve/osquery/manifest_api.py` and finally the manifest fields requirements would be the same as for a normal library, just without the `commit` field. + +### Libraries not used in the release build + +Another case is when the library is only used for testing purposes; this doesn't need to be tracked in the manifest, but still needs to be ignored by the script that checks that all the necessary libraries are present and up to date in the manifest. +The script to update is `tools/scripts/ci/cve/validate_manifest_libraries_versions.py` (the current example is `googletest`). diff --git a/libraries/third_party_libraries_manifest.json b/libraries/third_party_libraries_manifest.json new file mode 100644 index 00000000000..981abcef6a6 --- /dev/null +++ b/libraries/third_party_libraries_manifest.json @@ -0,0 +1,300 @@ +{ + "openssl": { + "product": "openssl", + "vendor": "openssl", + "version": "1.1.1q", + "ignored-cves": [ + "CVE-2007-5536", + "CVE-2019-0190" + ] + }, + "augeas": { + "product": "augeas", + "vendor": "augeas", + "version": "1.12.0", + "commit": "d133d9786350f1560c2f36d49da07d477c52aa76", + "ignored-cves": [] + }, + "aws-c-auth": { + "vendor": "aws", + "commit": "ace1311f8ef6ea890b26dd376031bed2721648eb" + }, + "aws-c-cal": { + "vendor": "aws", + "commit": "aa89aa4950074babe84762413f39bd364ecaf944" + }, + "aws-c-common": { + "vendor": "aws", + "commit": "c258a154bb89db73eff60a467a0750ee5435ebc6" + }, + "aws-c-compression": { + "vendor": "aws", + "commit": "fc1631ea1ce563b0a453cb7a7233fca949e36970" + }, + "aws-c-event-stream": { + "vendor": "aws", + "commit": "e87537be561d753ec82e783bc0929b1979c585f8" + }, + "aws-c-http": { + "vendor": "aws", + "commit": "fa1692ae103dcc40e3d0a9db1b29acfc204a294e" + }, + "aws-c-io": { + "vendor": "aws", + "commit": "14b5e6d73b41eeabf04fc6228276eb1eb59bd99c" + }, + "aws-c-mqtt": { + "vendor": "aws", + "commit": "0a70bf814845e487b7e4862af7ad9e4a1199b5f4" + }, + "aws-c-s3": { + "vendor": "aws", + "commit": "bd1f6910503165963506f9f168c87416996197b6" + }, + "aws-checksums": { + "vendor": "aws", + "commit": "99bb0ad4b89d335d638536694352c45e0d2188f5" + }, + "aws-crt-cpp": { + "vendor": "aws", + "commit": "c2d6ffa5597825111cc76ad71ffc6aef664d0f25" + }, + "aws-sdk-cpp": { + "vendor": "aws", + "commit": "1135c3ac31df6ab68d3bf58fc6679368da1f09e0" + }, + "s2n": { + "vendor": "aws", + "commit": "7f43b102def1d52422f6c3e48d5cb3e6dd26c646" + }, + "gnulib": { + "product": "gnulib", + "vendor": "gnulib", + "date": "2019-04-07", + "commit": "91584ed612fa26a505c8fc4c7f6fb19f7413795d", + "ignored-cves": [] + }, + "boost": { + "product": "boost", + "vendor": "boost", + "version": "1.77.0", + "commit": "9d3f9bcd7d416880d4631d7d39cceeb4e8f25da0", + "ignored-cves": [] + }, + "bzip2": { + "product": "bzip2", + "vendor": "bzip", + "version": "1.0.8", + "commit": "6a8690fc8d26c815e798c588f796eabe9d684cf0", + "ignored-cves": [] + }, + "dbus": { + "product": "d-bus", + "vendor": "d-bus_project", + "version": "1.12.20", + "commit": "ab88811768f750777d1a8b9d9ab12f13390bfd3a", + "ignored-cves": [] + }, + "ebpfpub": { + "vendor": "trailofbits", + "commit": "71c8554bbbf3f78e9c9ea39fd9f349fccf997dce" + }, + "expat": { + "product": "expat", + "vendor": "libexpat_project", + "version": "2.4.7", + "commit": "27d5b8ba1771f916d9cfea2aac6bdac72071dc66", + "ignored-cves": [] + }, + "gflags": { + "vendor": "google", + "version": "2.2.2", + "commit": "e171aa2d15ed9eb17054558e0b3a6a413bb01067" + }, + "glog": { + "vendor": "google", + "version": "0.5.0", + "commit": "8f9ccfe770add9e4c64e9b25c102658e3c763b73" + }, + "libarchive": { + "product": "libarchive", + "vendor": "libarchive", + "version": "3.6.1", + "commit": "6c3301111caa75c76e1b2acb1afb2d71341932ef", + "ignored-cves": [] + }, + "libaudit": { + "product": "audit-userspace", + "vendor": "audit-userspace_project", + "version": "2.4.3", + "commit": "20204ccf43e856818d7ef695242192b3f5963aae", + "ignored-cves": [] + }, + "libcap": { + "product": "libcap", + "vendor": "libcap_project", + "version": "2.59", + "commit": "9eb56596eef5e55a596aa97ecaf8466ea559d05c", + "ignored-cves": [] + }, + "libcryptsetup": { + "product": "cryptsetup", + "vendor": "cryptsetup_project", + "version": "1.7.5", + "commit": "0ba577666c62bb3c82e90f3c8dd01f3f81a26cf4", + "ignored-cves": [] + }, + "libdevmapper": { + "product": "lvm2", + "vendor": "redhat", + "version": "2.02.173", + "commit": "88f15659374042f7657d73393f73e267d752b4e1", + "ignored-cves": [] + }, + "libdpkg": { + "product": "dpkg", + "vendor": "debian", + "version": "1.21.7", + "commit": "e61f582015a9c67bbb3791cb93a864cfeb9c7151", + "ignored-cves": [] + }, + "libgcrypt": { + "product": "libgcrypt", + "vendor": "gnupg", + "version": "1.8.1", + "commit": "80fd8615048c3897b91a315cca22ab139b056ccd", + "ignored-cves": [] + }, + "libgpg-error": { + "product": "libgpg-error", + "vendor": "gnupg", + "version": "1.27", + "commit": "c1668f61c58ea6f0439e5193d83b4e2ac622b286", + "ignored-cves": [] + }, + "libiptables": { + "product": "iptables", + "vendor": "netfilter", + "version": "1.8.3", + "commit": "1447b15100fe73810237809c1d4ade3c861b6d96", + "ignored-cves": [] + }, + "libmagic": { + "product": "file", + "vendor": "file_project", + "version": "5.40", + "commit": "f49fda6f52a9477d817dbd9c06afab02daf025f8", + "ignored-cves": [] + }, + "librdkafka": { + "product": "librdkafka", + "vendor": "edenhill", + "version": "1.8.0", + "commit": "9ded5eefaf3ba3b65ebc95b0dff7a6d5faaaa38d" + }, + "librpm": { + "product": "rpm", + "vendor": "rpm", + "version": "4.17.0", + "commit": "3e74e8ba2dd5e76a5353d238dc7fc38651ce27b3", + "ignored-cves": [] + }, + "libudev": { + "product": "udev", + "vendor": "udev_project", + "version": "174", + "commit": "b3eccdce81d18ec36c6ea95ac161002fc33d1810", + "ignored-cves": [] + }, + "libxml2": { + "product": "libxml2", + "vendor": "xmlsoft", + "version": "2.9.14", + "commit": "7846b0a677f8d3ce72486125fa281e92ac9970e8", + "ignored-cves": [] + }, + "linenoise-ng": { + "product": "linenoise-ng", + "vendor": "arangodb", + "version": "master", + "commit": "4754bee2d8eb3c4511e6ac87cac62255b2011e2f", + "ignored-cves": [] + }, + "lzma": { + "product": "xz", + "vendor": "tukaani", + "version": "5.2.5", + "commit": "2327a461e1afce862c22269b80d3517801103c1b", + "ignored-cves": [] + }, + "popt": { + "product": "popt", + "vendor": "popt_project", + "version": "1.16", + "commit": "abe4af616ffc0e22e54d691e73a67fabc267cc26", + "ignored-cves": [] + }, + "rapidjson": { + "product": "rapidjson", + "vendor": "tencent", + "version": "1.1.0", + "commit": "1a825d24fa322a5fe721624b2ed7a18b6de9b48a", + "ignored-cves": [] + }, + "rocksdb": { + "product": "rocksdb", + "vendor": "facebook", + "version": "6.22.1", + "commit": "51b540921dd7495c9cf2265eb58942dad1f2ef72", + "ignored-cves": [] + }, + "sleuthkit": { + "product": "the_sleuth_kit", + "vendor": "sleuthkit", + "version": "4.11.0", + "commit": "a397493d8fd5198b40d6c0ce1e4135c1f86d9ea9", + "ignored-cves": [] + }, + "sqlite": { + "product": "sqlite", + "vendor": "sqlite", + "version": "3.39.2", + "commit": "cea3fbb89fb5dbf9a613964a3786867df17a0204", + "ignored-cves": [] + }, + "thrift": { + "product": "thrift", + "vendor": "apache", + "version": "0.15.0", + "commit": "8317ec43ea2425b6f8e24e4dc4f5b2360f717eb4", + "ignored-cves": [] + }, + "util-linux": { + "product": "util-linux", + "vendor": "kernel", + "version": "2.27.1", + "commit": "9f5e5bec91a72365b9baa771fa02cbedab804fe3", + "ignored-cves": [] + }, + "yara": { + "product": "yara", + "vendor": "virustotal", + "version": "4.1.3", + "commit": "b99a808cf9955090b909c72d6a0da5295c3cbc7c", + "ignored-cves": [] + }, + "zlib": { + "product": "zlib", + "vendor": "zlib", + "version": "1.2.12", + "commit": "21767c654d31d2dccdde4330529775c6c5fd5389", + "ignored-cves": [] + }, + "zstd": { + "product": "zstandard", + "vendor": "facebook", + "version": "1.4.0", + "commit": "83b51e9f886be7c2a4d477b6e7bc6db831791d8d", + "ignored-cves": ["CVE-2021-24031"] + } +} diff --git a/osquery/core/watcher.cpp b/osquery/core/watcher.cpp index a9298e3ea12..e11a4f9e8cc 100644 --- a/osquery/core/watcher.cpp +++ b/osquery/core/watcher.cpp @@ -598,12 +598,6 @@ Status WatcherRunner::isChildSane(const PlatformProcess& child) const { 1, "Memory limits exceeded: " + std::to_string(change.footprint)); } - // The worker is sane, no action needed. - // Attempt to flush status logs to the well-behaved worker. - if (use_worker_ && child.pid() == watcher_->getWorker().pid()) { - relayStatusLogs(); - } - return Status(0); } diff --git a/osquery/dispatcher/scheduler.cpp b/osquery/dispatcher/scheduler.cpp index f738873cee7..904737859ef 100644 --- a/osquery/dispatcher/scheduler.cpp +++ b/osquery/dispatcher/scheduler.cpp @@ -224,6 +224,17 @@ void SchedulerRunner::maybeScheduleCarves(uint64_t time_step) { void SchedulerRunner::maybeReloadSchedule(uint64_t time_step) { if (FLAGS_schedule_reload > 0 && (time_step % FLAGS_schedule_reload) == 0) { + /* Before resetting the database we want to ensure that there's no pending + log relay thread started by the scheduler thread in a previous loop, + to avoid deadlocks. + This is because resetDatabase logs and also holds an exclusive lock + to the database, so when a log relay thread started via relayStatusLog + is pending, log calls done on the same thread that started it + (in this case the scheduler thread), will wait until the log relaying + thread finishes serializing the logs to the database; but this can't + happen due to the exclusive lock. */ + waitLogRelay(); + if (FLAGS_schedule_reload_sql) { SQLiteDBManager::resetPrimary(); } @@ -232,7 +243,14 @@ void SchedulerRunner::maybeReloadSchedule(uint64_t time_step) { } void SchedulerRunner::maybeFlushLogs(uint64_t time_step) { - // GLog is not re-entrant, so logs must be flushed in a dedicated thread. + /* In daemon mode we start a log relay thread to flush the logs from the + BufferedLogSink to the database. + The thread is started from the scheduler thread, + since if we did it in the send() function of BufferedLogSink, + inline to the log call itself, we would cause deadlocks + if there's recursive logging caused by the logger plugins. + We do the flush itself also in a new thread so we don't slow down + the scheduler thread too much */ if ((time_step % 3) == 0) { relayStatusLogs(LoggerRelayMode::Async); } @@ -281,6 +299,10 @@ void SchedulerRunner::start() { } } + /* Wait for the thread relaying/flushing the logs, + to prevent race conditions on shutdown */ + waitLogRelay(); + // Scheduler ended. if (!interrupted() && request_shutdown_on_expiration) { LOG(INFO) << "The scheduler ended after " << timeout_ << " seconds"; diff --git a/osquery/events/darwin/endpointsecurity.h b/osquery/events/darwin/endpointsecurity.h index 18dcdad5dd4..ba1ef70e1b6 100644 --- a/osquery/events/darwin/endpointsecurity.h +++ b/osquery/events/darwin/endpointsecurity.h @@ -57,6 +57,7 @@ struct EndpointSecurityEventContext : public EventContext { std::string team_id; std::string cdhash; bool platform_binary; + std::string codesigning_flags; std::string executable; std::string username; diff --git a/osquery/events/darwin/es_utils.cpp b/osquery/events/darwin/es_utils.cpp index 08cdcc10b78..b777627db6d 100644 --- a/osquery/events/darwin/es_utils.cpp +++ b/osquery/events/darwin/es_utils.cpp @@ -7,6 +7,8 @@ * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) */ +#include +#include #include #include #include @@ -64,6 +66,37 @@ std::string getSigningId(const es_process_t* p) { : ""; } +std::string getCodesigningFlags(const es_process_t* p) { + // Parses flags from kern/cs_blobs.h header that are useful for monitoring. + // Flags that are commonly set are inverted to make unusual or potentially + // insecure processes stand out. + + std::vector flags; + if (!(p->codesigning_flags & CS_VALID)) { + // Process code signature is invalid, either initially or after paging + // in an invalid page to a previously valid code signature. + flags.push_back("NOT_VALID"); + } + + if (p->codesigning_flags & CS_ADHOC) { + // Process is signed "ad-hoc", without a code signing identity. + flags.push_back("ADHOC"); + } + + if (!(p->codesigning_flags & CS_RUNTIME)) { + // Process is signed without using the hardened runtime. + flags.push_back("NOT_RUNTIME"); + } + + if (p->codesigning_flags & CS_INSTALLER) { + // Process has installer entitlement, which can modify system integrity + // protected (SIP) files. + flags.push_back("INSTALLER"); + } + + return boost::algorithm::join(flags, ", "); +} + std::string getTeamId(const es_process_t* p) { return p->team_id.length > 0 && p->team_id.data != nullptr ? p->team_id.data : ""; @@ -112,6 +145,7 @@ void getProcessProperties(const es_process_t* p, ec->team_id = getTeamId(p); ec->cdhash = getCDHash(p); ec->platform_binary = p->is_platform_binary; + ec->codesigning_flags = getCodesigningFlags(p); auto user = getpwuid(ec->uid); ec->username = user->pw_name != nullptr ? std::string(user->pw_name) : ""; @@ -119,4 +153,4 @@ void getProcessProperties(const es_process_t* p, ec->cwd = getCwdPathFromPid(ec->pid); } -} // namespace osquery \ No newline at end of file +} // namespace osquery diff --git a/osquery/logger/data_logger.h b/osquery/logger/data_logger.h index 489797947e7..d5e38a6b314 100644 --- a/osquery/logger/data_logger.h +++ b/osquery/logger/data_logger.h @@ -124,6 +124,15 @@ Status logSnapshotQuery(const QueryLogItem& item); */ void relayStatusLogs(LoggerRelayMode relay_mode = LoggerRelayMode::Sync); +/** + * @brief Waits for the relay thread to finish + * + * Waits for the new relay thread launched by the relayStatusLogs function, + * called previously on the current thread, to finish. + * Must not be called in a path that can be called by Google Log. + */ +void waitLogRelay(); + /// Inspect the number of internal-buffered status log lines. size_t queuedStatuses(); diff --git a/osquery/logger/logger.cpp b/osquery/logger/logger.cpp index ed8391f48cf..e3733de7011 100644 --- a/osquery/logger/logger.cpp +++ b/osquery/logger/logger.cpp @@ -326,7 +326,7 @@ void BufferedLogSink::send(google::LogSeverity severity, std::string()}); } - // The daemon will relay according to the schedule. + // This is for testing only, the daemon will relay according to the schedule. if (enabled_ && !isDaemon()) { relayStatusLogs(FLAGS_logger_status_sync ? LoggerRelayMode::Sync : LoggerRelayMode::Async); @@ -479,6 +479,18 @@ size_t queuedStatuses() { return BufferedLogSink::get().dump().size(); } +void waitLogRelay() { + if (kOptBufferedLogSinkSender.has_value()) { + /* NOTE: We are not doing a workaround for Windows + as in BufferedLogSink::WaitTillSent because we are not and we must not be + in a path called by Google Log, and failing to properly wait + for the thread to finish will either cause a race condition or a deadlock + */ + kOptBufferedLogSinkSender->wait(); + kOptBufferedLogSinkSender.reset(); + } +} + void relayStatusLogs(LoggerRelayMode relay_mode) { if (FLAGS_disable_logging || !databaseInitialized()) { // The logger plugins may not be setUp if logging is disabled. @@ -533,6 +545,9 @@ void relayStatusLogs(LoggerRelayMode relay_mode) { if (relay_mode == LoggerRelayMode::Sync) { sender(); } else { + // Wait on a previous relaying thread, if present + waitLogRelay(); + std::packaged_task task(std::move(sender)); kOptBufferedLogSinkSender = task.get_future(); std::thread(std::move(task)).detach(); diff --git a/osquery/remote/http_client.cpp b/osquery/remote/http_client.cpp index beb05c04d30..9380c1eb7ce 100644 --- a/osquery/remote/http_client.cpp +++ b/osquery/remote/http_client.cpp @@ -458,7 +458,7 @@ Response Client::put(Request& req, std::string const& content_type) { req.method(beast_http::verb::put); req.body() = body; - if (!content_type.empty() && content_type.size() < 512) { + if (!content_type.empty()) { req.set(beast_http::field::content_type, content_type); } return sendHTTPRequest(req); @@ -480,7 +480,7 @@ Response Client::put(Request& req, std::string const& content_type) { req.method(beast_http::verb::put); req.body() = std::move(body); - if (!content_type.empty() && content_type.size() < 512) { + if (!content_type.empty()) { req.set(beast_http::field::content_type, content_type); } return sendHTTPRequest(req); diff --git a/osquery/tables/applications/posix/docker.cpp b/osquery/tables/applications/posix/docker.cpp index 1f2d9603e66..19a0f26e050 100644 --- a/osquery/tables/applications/posix/docker.cpp +++ b/osquery/tables/applications/posix/docker.cpp @@ -409,8 +409,13 @@ QueryData genContainers(QueryContext& context) { BIGINT(container_details.get_child("State").get("Pid", -1)); r["started_at"] = container_details.get_child("State").get( "StartedAt", ""); - r["finished_at"] = container_details.get_child("State").get( - "FinishedAt", ""); + if (r["state"] != "running") { + r["finished_at"] = + container_details.get_child("State").get("FinishedAt", + ""); + } else { + r["finished_at"] = ""; + } r["privileged"] = container_details.get_child("HostConfig") .get("Privileged", false) ? INTEGER(1) diff --git a/osquery/tables/events/darwin/es_process_events.cpp b/osquery/tables/events/darwin/es_process_events.cpp index a9fb29e93f1..6d8ef88607a 100644 --- a/osquery/tables/events/darwin/es_process_events.cpp +++ b/osquery/tables/events/darwin/es_process_events.cpp @@ -62,6 +62,7 @@ Status ESProcessEventSubscriber::Callback( r["signing_id"] = ec->signing_id; r["team_id"] = ec->team_id; r["cdhash"] = ec->cdhash; + r["codesigning_flags"] = ec->codesigning_flags; r["cmdline"] = ec->args; r["cmdline_count"] = BIGINT(ec->argc); diff --git a/osquery/tables/system/CMakeLists.txt b/osquery/tables/system/CMakeLists.txt index 8b0e3d85258..2d93e53155b 100644 --- a/osquery/tables/system/CMakeLists.txt +++ b/osquery/tables/system/CMakeLists.txt @@ -202,6 +202,8 @@ function(generateOsqueryTablesSystemSystemtable) windows/registry.cpp windows/scheduled_tasks.cpp windows/secureboot.cpp + windows/security_profile_info_utils.cpp + windows/security_profile_info.cpp windows/services.cpp windows/shared_resources.cpp windows/shellbags.cpp @@ -269,6 +271,7 @@ function(generateOsqueryTablesSystemSystemtable) if(DEFINED PLATFORM_LINUX) target_link_libraries(osquery_tables_system_systemtable PUBLIC osquery_utils_linux + osquery_utils_system_boottime thirdparty_libdevmapper thirdparty_libcryptsetup thirdparty_librpm @@ -348,6 +351,7 @@ function(generateOsqueryTablesSystemSystemtable) windows/certificates.h windows/windows_eventlog.h windows/windows_update_history.h + windows/security_profile_info_utils.h ) endif() diff --git a/osquery/tables/system/darwin/processes.cpp b/osquery/tables/system/darwin/processes.cpp index 2aa21394231..8f44c670f82 100644 --- a/osquery/tables/system/darwin/processes.cpp +++ b/osquery/tables/system/darwin/processes.cpp @@ -8,12 +8,11 @@ */ #include +#include #include #include #include -#include - #include #include #include @@ -36,9 +35,8 @@ namespace tables { // The maximum number of expected memory regions per process. #define MAX_MEMORY_MAPS 512 -#define CPU_TIME_RATIO 1000000 -#define START_TIME_RATIO 1000000000 -#define NSECS_IN_USEC 1000 +#define NSEC_TO_MSEC_RATIO 1000000UL +#define USEC_TO_SEC_RATIO NSEC_TO_MSEC_RATIO // Process states are as defined in sys/proc.h // SIDL (1) Process being created by fork @@ -109,13 +107,15 @@ struct proc_cred { } real, effective, saved; }; -inline bool genProcCred(QueryContext& context, - int pid, - proc_cred& cred, - ProcessesRow& r) { +inline bool genProcCredAndStartTime(QueryContext& context, + int pid, + proc_cred& cred, + ProcessesRow& r) { struct proc_bsdinfo bsdinfo; struct proc_bsdshortinfo bsdinfo_short; + r.start_time_col = -1; + if (proc_pidinfo(pid, PROC_PIDTBSDINFO, 1, &bsdinfo, PROC_PIDTBSDINFO_SIZE) == PROC_PIDTBSDINFO_SIZE) { cred.parent = bsdinfo.pbi_ppid; @@ -128,6 +128,10 @@ inline bool genProcCred(QueryContext& context, cred.effective.gid = bsdinfo.pbi_gid; cred.saved.uid = bsdinfo.pbi_svuid; cred.saved.gid = bsdinfo.pbi_svgid; + + r.start_time_col = ((bsdinfo.pbi_start_tvsec * USEC_TO_SEC_RATIO) + + bsdinfo.pbi_start_tvusec) / + USEC_TO_SEC_RATIO; } else if (proc_pidinfo(pid, PROC_PIDT_SHORTBSDINFO, 1, @@ -473,32 +477,21 @@ void genProcResourceUsage(const QueryContext& context, // time information r.user_time_col = ((rusage_info_data.ri_user_time * time_base.numer) / time_base.denom) / - CPU_TIME_RATIO; + NSEC_TO_MSEC_RATIO; + r.system_time_col = ((rusage_info_data.ri_system_time * time_base.numer) / time_base.denom) / - CPU_TIME_RATIO; + NSEC_TO_MSEC_RATIO; // disk i/o information r.disk_bytes_read_col = rusage_info_data.ri_diskio_bytesread; r.disk_bytes_written_col = rusage_info_data.ri_diskio_byteswritten; - - if (context.isAnyColumnUsed(ProcessesRow::START_TIME)) { - uint64_t const absoluteTime = mach_absolute_time(); - auto const process_age = std::chrono::nanoseconds{ - (absoluteTime - rusage_info_data.ri_proc_start_abstime) * - time_base.numer / time_base.denom}; - - r.start_time_col = - std::time(nullptr) - - std::chrono::duration_cast(process_age).count(); - } } else { r.wired_size_col = -1; r.resident_size_col = -1; r.total_size_col = -1; r.user_time_col = -1; r.system_time_col = -1; - r.start_time_col = -1; } } @@ -516,7 +509,7 @@ TableRows genProcesses(QueryContext& context) { genProcRootAndCWD(context, pid, *r); proc_cred cred; - if (!genProcCred(context, pid, cred, *r)) { + if (!genProcCredAndStartTime(context, pid, cred, *r)) { continue; } diff --git a/osquery/tables/system/linux/processes.cpp b/osquery/tables/system/linux/processes.cpp index 3d6ffebd51e..0d486d003db 100644 --- a/osquery/tables/system/linux/processes.cpp +++ b/osquery/tables/system/linux/processes.cpp @@ -28,9 +28,8 @@ #include #include #include - #include -#include +#include #include @@ -50,10 +49,11 @@ inline std::string readProcCMDLine(const std::string& pid) { std::string content; readFile(attr, content); // Remove \0 delimiters. - std::replace_if(content.begin(), - content.end(), - [](const char& c) { return c == 0; }, - ' '); + std::replace_if( + content.begin(), + content.end(), + [](const char& c) { return c == 0; }, + ' '); // Remove trailing delimiter. boost::algorithm::trim(content); return content; @@ -308,12 +308,32 @@ SimpleProcStat::SimpleProcStat(const std::string& pid) { this->name = detail.at(1); } else if (detail.at(0) == "VmRSS") { detail[1].erase(detail.at(1).end() - 3, detail.at(1).end()); - // Memory is reported in kB. - this->resident_size = detail.at(1) + "000"; + // Memory is reported in kB (1024 bytes). + auto resident_size_result = osquery::tryTo(detail.at(1)); + + if (resident_size_result.isError()) { + status = + Status::failure("Failed to convert VmRSS string value to integer"); + return; + } + + const auto resident_size = resident_size_result.get() * 1024; + + this->resident_size = std::to_string(resident_size); } else if (detail.at(0) == "VmSize") { detail[1].erase(detail.at(1).end() - 3, detail.at(1).end()); - // Memory is reported in kB. - this->total_size = detail.at(1) + "000"; + // Memory is reported in kB (1024 bytes). + auto virtual_size_result = osquery::tryTo(detail.at(1)); + + if (virtual_size_result.isError()) { + status = + Status::failure("Failed to convert VmSize string value to integer"); + return; + } + + const auto virtual_size = virtual_size_result.get() * 1024; + + this->total_size = std::to_string(virtual_size); } else if (detail.at(0) == "Gid") { // Format is: R E - - auto gid_detail = osquery::split(detail.at(1), "\t"); @@ -427,7 +447,7 @@ int getOnDisk(const std::string& pid, std::string& path) { } void genProcess(const std::string& pid, - long system_boot_time, + std::uint64_t system_boot_time, QueryContext& context, TableRows& results) { // Parse the process stat and status. @@ -478,8 +498,9 @@ void genProcess(const std::string& pid, auto proc_start_time_exp = tryTo(proc_stat.start_time); if (proc_start_time_exp.isValue() && system_boot_time > 0) { - r["start_time"] = INTEGER(system_boot_time + proc_start_time_exp.take() / - sysconf(_SC_CLK_TCK)); + auto proc_start_time = proc_start_time_exp.take() / sysconf(_SC_CLK_TCK); + + r["start_time"] = BIGINT(system_boot_time + proc_start_time); } else { r["start_time"] = "-1"; } @@ -520,10 +541,7 @@ void genNamespaces(const std::string& pid, QueryData& results) { TableRows genProcesses(QueryContext& context) { TableRows results; - auto system_boot_time = getUptime(); - if (system_boot_time > 0) { - system_boot_time = std::time(nullptr) - system_boot_time; - } + static const std::uint64_t system_boot_time = getBootTime(); auto pidlist = getProcList(context); for (const auto& pid : pidlist) { @@ -565,5 +583,5 @@ QueryData genProcessNamespaces(QueryContext& context) { return results; } -} -} +} // namespace tables +} // namespace osquery diff --git a/osquery/tables/system/windows/logged_in_users.cpp b/osquery/tables/system/windows/logged_in_users.cpp index ea9eca4649c..c288a6d5d34 100644 --- a/osquery/tables/system/windows/logged_in_users.cpp +++ b/osquery/tables/system/windows/logged_in_users.cpp @@ -117,6 +117,24 @@ QueryData genLoggedInUsers(QueryContext& context) { // TODO: IPv6 addresses are given as an array of byte values. auto addr = reinterpret_cast(wtsClient->ClientAddress); r["host"] = std::string(addr, CLIENTADDRESS_LENGTH); + } else if (wtsClient->ClientAddressFamily == AF_UNSPEC) { + LPWSTR clientName = nullptr; + res = WTSQuerySessionInformationW(WTS_CURRENT_SERVER_HANDLE, + pSessionInfo[i].SessionId, + WTSClientName, + &clientName, + &bytesRet); + + if (res == 0 || clientName == nullptr) { + VLOG(1) << "Error querying WTS clientName information (" + << GetLastError() << ")"; + } else { + r["host"] = wstringToString(clientName); + } + + if (clientName != nullptr) { + WTSFreeMemory(clientName); + } } r["pid"] = INTEGER(-1); diff --git a/osquery/tables/system/windows/pipes.cpp b/osquery/tables/system/windows/pipes.cpp index ce17a91fcaf..67ad5b28ca8 100644 --- a/osquery/tables/system/windows/pipes.cpp +++ b/osquery/tables/system/windows/pipes.cpp @@ -37,8 +37,14 @@ QueryData genPipes(QueryContext& context) { unsigned long pid = 0; auto pipePath = L"\\\\.\\pipe\\" + std::wstring(findFileData.cFileName); - auto pipeHandle = CreateFileW( - pipePath.c_str(), GENERIC_READ, 0, nullptr, OPEN_EXISTING, 0, nullptr); + auto pipeHandle = + CreateFileW(pipePath.c_str(), + GENERIC_READ, + (FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE), + nullptr, + OPEN_EXISTING, + 0, + nullptr); if (pipeHandle == INVALID_HANDLE_VALUE) { results.push_back(r); LOG(INFO) << "Failed to open handle to pipe with " << GetLastError(); diff --git a/osquery/tables/system/windows/security_profile_info.cpp b/osquery/tables/system/windows/security_profile_info.cpp new file mode 100644 index 00000000000..30f6b887a67 --- /dev/null +++ b/osquery/tables/system/windows/security_profile_info.cpp @@ -0,0 +1,105 @@ +/** + * Copyright (c) 2014-present, The osquery authors + * + * This source code is licensed as defined by the LICENSE file found in the + * root directory of this source tree. + * + * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + */ + +#include +#include +#include + +namespace osquery { +namespace tables { + +QueryData genSecurityProfileInformation(QueryContext& context) { + QueryData results; + + // Getting system security profile information + SceProfileData data; + const SceClientHelper::SceProfileInfo* profileData = data.getProfileInfo(); + if (profileData == nullptr) { + LOG(ERROR) << "Failed to retrieve security profile information data."; + return results; + } + + // And then populating the table with obtained data + Row seceditRow; + seceditRow["minimum_password_age"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->MinPasswdAge)); + + seceditRow["maximum_password_age"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->MaxPasswdAge)); + + seceditRow["minimum_password_length"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->MinPasswdLen)); + + seceditRow["password_complexity"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->PasswdComplexity)); + + seceditRow["password_history_size"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->PasswdHistSize)); + + seceditRow["lockout_bad_count"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->LockoutBadCount)); + + seceditRow["logon_to_change_password"] = INTEGER( + SceProfileData::getNormalizedInt(profileData->ReqLogonChangePasswd)); + + seceditRow["force_logoff_when_expire"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->ForceLogoffExpire)); + + seceditRow["new_administrator_name"] = + wstringToString(profileData->AdministratorName); + + seceditRow["new_guest_name"] = wstringToString(profileData->GuestName); + + seceditRow["clear_text_password"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->ClearTextPasswd)); + + seceditRow["lsa_anonymous_name_lookup"] = + INTEGER(SceProfileData::getNormalizedInt( + profileData->LsaAllowAnonymousSidLookup)); + + seceditRow["enable_admin_account"] = INTEGER( + SceProfileData::getNormalizedInt(profileData->EnableAdminAccount)); + + seceditRow["enable_guest_account"] = INTEGER( + SceProfileData::getNormalizedInt(profileData->EnableGuestAccount)); + + seceditRow["audit_system_events"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->AuditSystemEvents)); + + seceditRow["audit_logon_events"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->AuditLogonEvents)); + + seceditRow["audit_object_access"] = INTEGER( + SceProfileData::getNormalizedInt(profileData->AuditObjectsAccess)); + + seceditRow["audit_privilege_use"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->AuditPrivilegeUse)); + + seceditRow["audit_policy_change"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->AuditPolicyChange)); + + seceditRow["audit_account_manage"] = INTEGER( + SceProfileData::getNormalizedInt(profileData->AuditAccountManage)); + + seceditRow["audit_process_tracking"] = INTEGER( + SceProfileData::getNormalizedInt(profileData->AuditProcessTracking)); + + seceditRow["audit_ds_access"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->AuditDSAccess)); + + seceditRow["audit_account_logon"] = + INTEGER(SceProfileData::getNormalizedInt(profileData->AuditAccountLogon)); + + results.push_back(std::move(seceditRow)); + + return results; +} + +} // namespace tables +} // namespace osquery diff --git a/osquery/tables/system/windows/security_profile_info_utils.cpp b/osquery/tables/system/windows/security_profile_info_utils.cpp new file mode 100644 index 00000000000..cb4adb294d3 --- /dev/null +++ b/osquery/tables/system/windows/security_profile_info_utils.cpp @@ -0,0 +1,247 @@ +/** + * Copyright (c) 2014-present, The osquery authors + * + * This source code is licensed as defined by the LICENSE file found in the + * root directory of this source tree. + * + * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + */ + +#include +#include + +namespace osquery { +namespace tables { + +SceClientHelper& SceClientHelper::instance() { + static SceClientHelper instance; + return instance; +} + +SceClientHelper::SceClientHelper() {} + +SceClientHelper::~SceClientHelper() { + if (handleSceDLL_ != nullptr) { + if (!FreeLibrary(handleSceDLL_)) { + LOG(ERROR) << "Failed to free module handle of dll " << kTargetSCEDLL; + } + + handleSceDLL_ = nullptr; + } + + if (sceFreeMemory_ != nullptr) { + sceFreeMemory_ = nullptr; + } + + if (sceGetSecurityProfileInfo_ != nullptr) { + sceGetSecurityProfileInfo_ = nullptr; + } + + initialized_ = false; +} + +Status SceClientHelper::initialize() { + // Checking first if the class is already initialized + if (initialized_) { + return Status::success(); + } + + // Sanity check to ensure that function pointers are not initialized if + // running process is a WoW64 process + Status wow64Status = isWow64Process(); + if (wow64Status.ok()) { + return Status::failure("Init failed: " + wow64Status.getMessage()); + } + + // Checking if the input DLL is already mapped to memory before loading it. + // If mapped module is not found, LoadLibraryExA() gets called to load the + // module from system32 folder. + bool increasedRefCount = false; + HMODULE dllHandle = GetModuleHandleA(kTargetSCEDLL.c_str()); + if (dllHandle == nullptr) { + // Library was not there in memory already, so we are loading here it and + // freeing it on the class destructor + increasedRefCount = true; + dllHandle = LoadLibraryExA( + kTargetSCEDLL.c_str(), NULL, LOAD_LIBRARY_SEARCH_SYSTEM32); + } + + // An invalid module handle means that the DLL couldn't be loaded + if (dllHandle == nullptr) { + return Status::failure(kTargetSCEDLL + " dll couldn't be loaded"); + } + + // Getting the address to exported SceFreeMemory function + PVOID sceFreeMemoryAddr = GetProcAddress(dllHandle, kSceFreeMemoryFn.c_str()); + if (sceFreeMemoryAddr == nullptr) { + if (increasedRefCount) { + FreeLibrary(dllHandle); + } + return Status::failure(kSceFreeMemoryFn + " couldn't be loaded"); + } + + // Getting the address to exported SceGetSecurityProfileInfo function + PVOID sceGetProfileInforAddr = + GetProcAddress(dllHandle, kSceGetSecProfileInfoFn.c_str()); + if (sceGetProfileInforAddr == nullptr) { + if (increasedRefCount) { + FreeLibrary(dllHandle); + } + return Status::failure(kSceGetSecProfileInfoFn + " couldn't be loaded"); + } + + // Assigning the address of the exports in memory so they can be called thru + // function pointers that match the target function prototypes + sceFreeMemory_ = static_cast(sceFreeMemoryAddr); + + sceGetSecurityProfileInfo_ = + static_cast(sceGetProfileInforAddr); + + // Assigning the handle to the loaded library if ref counter was increased + if (increasedRefCount) { + handleSceDLL_ = dllHandle; + } + + initialized_ = true; + + return Status::success(); +} + +Status SceClientHelper::isValidSceProfileData(const PVOID& profileData) { + // Checking that input pointer is initialized + if (profileData == nullptr) { + return Status::failure("profileData is NULL."); + } + + // Checking that input pointer points to an accessible SceProfileInfo layout + if (IsBadReadPtr(&profileData, sizeof(SceProfileInfo))) { + return Status::failure("profileData layout is invalid."); + } + + return Status::success(); +} + +Status SceClientHelper::releaseSceProfileData(const PVOID& profileData) { + // initializing the class if this is first run + Status initStatus = initialize(); + if (!initStatus.ok()) { + return Status::failure(initStatus.getMessage()); + } + + // Sanity check on input + Status sceProfileDataStatus = isValidSceProfileData(profileData); + if (!sceProfileDataStatus.ok()) { + return Status::failure(sceProfileDataStatus.getMessage()); + } + + // Sanity check on function pointer about to be used + if (sceFreeMemory_ == nullptr) { + return Status::failure(kSceFreeMemoryFn + " cannot be used"); + } + + // Calling the runtime-linked function and checking return code + DWORD retCode = sceFreeMemory_(profileData, kSceAreaAllFlag); + if (retCode != ERROR_SUCCESS) { + return Status::failure( + kSceGetSecProfileInfoFn + + " call failed with error: " + std::to_string(retCode)); + } + + // freeing RPC related data + LocalFree(profileData); + + return Status::success(); +} + +Status SceClientHelper::isWow64Process() { + BOOL wow64 = FALSE; + if ((IsWow64Process(GetCurrentProcess(), &wow64)) && (wow64)) { + return Status::success(); + } + + return Status::failure("Current process is a WoW64 process."); +} + +Status SceClientHelper::getSceSecurityProfileInfo(PVOID& profileData) { + // initializing the class if this is first run + Status initStatus = initialize(); + if (!initStatus.ok()) { + return Status::failure(initStatus.getMessage()); + } + + // Sanity check on function pointer about to be used + if (sceGetSecurityProfileInfo_ == nullptr) { + return Status::failure(kSceGetSecProfileInfoFn + " cannot be used"); + } + + // Calling the runtime-linked function and returning the obtained data + PVOID workProfileData = nullptr; + DWORD retCode = sceGetSecurityProfileInfo_( + nullptr, kSceSystemFlag, kSceAreaAllFlag, &workProfileData, nullptr); + + if (retCode != ERROR_SUCCESS) { + return Status::failure( + kSceGetSecProfileInfoFn + + " call failed with error: " + std::to_string(retCode)); + } + + // Sanity check on input + Status sceProfileDataStatus = isValidSceProfileData(workProfileData); + if (!sceProfileDataStatus.ok()) { + return Status::failure( + kSceGetSecProfileInfoFn + + " returned invalid data: " + sceProfileDataStatus.getMessage()); + } + + profileData = workProfileData; + + return Status::success(); +} + +const SceClientHelper::SceProfileInfo* SceProfileData::getProfileInfo() { + SceClientHelper::SceProfileInfo* profilePtr = nullptr; + + // Obtaining the security profile information if this is the first-run + if (data_ == nullptr) { + // grabing the profile data from SCE API + auto& sceHelper = SceClientHelper::instance(); + Status secGetProfileData = sceHelper.getSceSecurityProfileInfo(data_); + if (!secGetProfileData.ok()) { + LOG(ERROR) << "Failed to get security profile data: " + << secGetProfileData.getMessage(); + return profilePtr; + } + } + + if (data_ != nullptr) { + profilePtr = reinterpret_cast(data_); + } + + return profilePtr; +} + +SceProfileData::~SceProfileData() { + // Releasing memory allocated by getSceSecurityProfileInfo() call + if (data_ != nullptr) { + auto& sceHelper = SceClientHelper::instance(); + Status secReleaseProfileData = sceHelper.releaseSceProfileData(data_); + if (!secReleaseProfileData.ok()) { + LOG(ERROR) << "Failed to release security profile data: " + << secReleaseProfileData.getMessage(); + } + + data_ = nullptr; + } +} + +int SceProfileData::getNormalizedInt(const DWORD& input) { + int workValue = static_cast(input); + if (workValue < 0) { + workValue = -1; + } + + return workValue; +} + +} // namespace tables +} // namespace osquery diff --git a/osquery/tables/system/windows/security_profile_info_utils.h b/osquery/tables/system/windows/security_profile_info_utils.h new file mode 100644 index 00000000000..cb5969d6b2a --- /dev/null +++ b/osquery/tables/system/windows/security_profile_info_utils.h @@ -0,0 +1,253 @@ +/** + * Copyright (c) 2014-present, The osquery authors + * + * This source code is licensed as defined by the LICENSE file found in the + * root directory of this source tree. + * + * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + */ + +#pragma once + +#include + +namespace osquery { +namespace tables { + +/** + * @brief Manages access to SCE RPC client API exported in scecli.dll + */ +class SceClientHelper : public boost::noncopyable { + /** + * @brief SCE flag that indicates SCE SYSTEM profile request + */ + static constexpr DWORD kSceSystemFlag = 300; + + /** + * @brief SCE bitmask flag that indicates ALL security information flags + */ + static constexpr DWORD kSceAreaAllFlag = 0xFFFFL; + + /** + * @brief SCE profile array subdata size + */ + static constexpr DWORD kSceInfoMaxArray = 3; + + /** + * @brief Name of the DLL containing the SCE RPC Client API + */ + const std::string kTargetSCEDLL = "scecli.dll"; + + /** + * @brief Name of the SceFreeMemory export function in scecli.dll + */ + const std::string kSceFreeMemoryFn = "SceFreeMemory"; + + /** + * @brief Name of the SceGetSecurityProfileInfo export function in scecli.dll + */ + const std::string kSceGetSecProfileInfoFn = "SceGetSecurityProfileInfo"; + + public: + /** + * @brief Data structure used by the SCE RPC Protocol to return security + * profile information + */ + struct SceProfileInfo { + DWORD Unk0; + DWORD MinPasswdAge; + DWORD MaxPasswdAge; + DWORD MinPasswdLen; + DWORD PasswdComplexity; + DWORD PasswdHistSize; + DWORD LockoutBadCount; + DWORD ResetLockoutCount; + DWORD LockoutDuration; + DWORD ReqLogonChangePasswd; + DWORD ForceLogoffExpire; + PWSTR AdministratorName; + PWSTR GuestName; + DWORD Unk1; + DWORD ClearTextPasswd; + DWORD LsaAllowAnonymousSidLookup; + PVOID Unk2; + PVOID Unk3; + PVOID Unk4; + PVOID Unk5; + PVOID Unk6; + PVOID Unk7; + PVOID Unk8; + PVOID Unk9; + DWORD MaxLogSize[kSceInfoMaxArray]; + DWORD RetentionLog[kSceInfoMaxArray]; + DWORD RetentionLogDays[kSceInfoMaxArray]; + DWORD RestrictAccessGuest[kSceInfoMaxArray]; + DWORD AuditSystemEvents; + DWORD AuditLogonEvents; + DWORD AuditObjectsAccess; + DWORD AuditPrivilegeUse; + DWORD AuditPolicyChange; + DWORD AuditAccountManage; + DWORD AuditProcessTracking; + DWORD AuditDSAccess; + DWORD AuditAccountLogon; + DWORD AuditFull; + DWORD RegInfoCount; + PVOID Unk10; + DWORD EnableAdminAccount; + DWORD EnableGuestAccount; + }; + + public: + /** + * @brief It ensures that SceClientHelper class only has one instance, and + * provides a global point of access to it. + * + * @return Reference to the single global SceClientHelper instance + */ + static SceClientHelper& instance(); + + /** + * @brief This helper returns the system security profile information. + * This is achieved by calling the SceGetSecurityProfileInfo() export in + * scecli.dll. This function talks to the SCE RPC server to obtain the system + * security profile information. + * + * @param profileData The SceGetSecurityProfileInfo() receives a void pointer, + * which can be initialized to nullptr as no previous allocation is required. + * This pointer will be pointing to an allocated instance of SceProfileInfo + * data structure. + * + * @return Status of the call to SceGetSecurityProfileInfo() + */ + Status getSceSecurityProfileInfo(PVOID& profileData); + + /** + * @brief This helper frees any memory allocated from calling the + * SceGetSecurityProfileInfo() exported function in scecli.dll. This is + * achieved by calling the SceFreeMemory() export in scecli.dll. + * + * @param profileData The SceFreeMemory() receives a void pointer, + * which should be pointing to an instance of the SceProfileInfo data + * structure. + * + * @return Status of the call to SceFreeMemory() + */ + Status releaseSceProfileData(const PVOID& profileData); + + /** + * @brief This helper determines if current process is running under WoW64 or + * an Intel64 of x64 processor. WOW64 is the x86 emulator that allows 32-bit + * Windows-based applications to run seamlessly on 64-bit Windows. This check + * is done by calling IsWow64Process(). + * + * @return WoW64 status of the current process + */ + static Status isWow64Process(); + + private: + /** + * @brief Internal helper that checks if process can access the memory layout + * of the SceProfileInfo data structure pointed by profileData. + * + * @param profileData This is pointer to an instance of the SceProfileInfo + * data structure. + * + * @return Valid/Invalid status of the SceProfileInfo pointer + */ + Status isValidSceProfileData(const PVOID& profileData); + + /** + * @brief Default constructor + */ + SceClientHelper(); + + /** + * @brief Default destructor that releases allocated resources + */ + virtual ~SceClientHelper(); + + /** + * @brief This helper initializes function pointers to SceFreeMemory + * and SceGetSecurityProfileInfo export functions in scecli.dll by performing + * run-time dynamic linking. + * + * @return Status of the run-time dynamic linking process + */ + Status initialize(); + + private: + /** + * @brief This is the function prototype of the undocumented + * SceFreeMemory() function. This prototype has not + * changed since windows 7. + */ + using SceFreeMemoryPtr = DWORD(WINAPI*)(PVOID data, DWORD securityArea); + + /** + * @brief This is the function prototype of the undocumented + * SceGetSecurityProfileInfo() function. This prototype has not + * changed since windows 7. + */ + using GetSecProfileInfoFnPtr = DWORD(WINAPI*)(PVOID profileHandle, + DWORD type, + DWORD securityArea, + PVOID profileInfo, + PVOID errorInfo); + + /** + * @brief This handle holds the module reference to scecli.dll + */ + HMODULE handleSceDLL_{nullptr}; + + /** + * @brief This function pointer points to exported SceFreeMemory in + * memory. + */ + SceFreeMemoryPtr sceFreeMemory_{nullptr}; + + /** + * @brief This function pointer points to exported SceGetSecurityProfileInfo + * in memory. + */ + GetSecProfileInfoFnPtr sceGetSecurityProfileInfo_{nullptr}; + + std::atomic initialized_{false}; +}; + +/** + * @brief Scoped management of the security profile memory information + * returned by the SCE RPC Server + */ +struct SceProfileData { + /** + * @brief This helper returns a pointer to the SceProfileInfo data in memory, + * obtained from calling SceGetSecurityProfileInfo(). + * + * @return pointer to SceProfileInfo data + */ + const SceClientHelper::SceProfileInfo* getProfileInfo(); + + /** + * @brief Default destructor in charge of performing RAII scoped data + * management and freeing allocated memory by calling SceFreeMemory() + */ + ~SceProfileData(); + + /** + * @brief The SCE RPC protocol IDL uses DWORDs to transport integer values. + * There are policies on the SCE protocol that uses -1 as a way to indicate + * maximum value. This helper mimics the secedit binary behavior and + * normalizes the negative integer data representation in memory of password + * policy fields to be -1 when representing a negative integer value. + * + * @return normalized negative integer + */ + static int getNormalizedInt(const DWORD& input); + + private: + PVOID data_{nullptr}; +}; + +} // namespace tables +} // namespace osquery diff --git a/osquery/tables/system/windows/shimcache.cpp b/osquery/tables/system/windows/shimcache.cpp index 9a107a80fbd..8e6feeb14d6 100644 --- a/osquery/tables/system/windows/shimcache.cpp +++ b/osquery/tables/system/windows/shimcache.cpp @@ -31,8 +31,11 @@ const std::string kWin10CreatorStart = "34"; const std::string kWin8110ShimcacheDelimiter = "31307473"; // Shimcache can be in multiple ControlSets (ControlSet001, ControlSet002, etc) +// We are only going to check CurrentControlSet, which is symlinked to the +// active ControlSet + const std::string kShimcacheControlset = - "HKEY_LOCAL_MACHINE\\SYSTEM\\%ControlSet%\\Control\\Session " + "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session " "Manager\\AppCompatCache"; struct ShimcacheData { diff --git a/osquery/utils/aws/aws_util.cpp b/osquery/utils/aws/aws_util.cpp index d048751cf4c..838ea52aace 100644 --- a/osquery/utils/aws/aws_util.cpp +++ b/osquery/utils/aws/aws_util.cpp @@ -215,17 +215,38 @@ std::shared_ptr OsqueryHttpClient::MakeRequest( auto response = std::make_shared(request_ptr); http::Response resp; + if (osquery::shutdownRequested()) { + /* This is technically a client error, but some AWS requests + consider any client error as retryable. + Since we want to stop the retries, + we use instead a non-retryable response code, + although we did not have any response. + We also log the reason of the failure to provide more information */ + + response->SetResponseCode(Aws::Http::HttpResponseCode::BLOCKED); + LOG(WARNING) << "An AWS request has been blocked since a shutdown has been " + "requested"; + + return response; + } + try { switch (request.GetMethod()) { case Aws::Http::HttpMethod::HTTP_GET: resp = client.get(req); break; - case Aws::Http::HttpMethod::HTTP_POST: - resp = client.post(req, body, request.GetContentType()); + case Aws::Http::HttpMethod::HTTP_POST: { + std::string content_type = + request.HasContentType() ? request.GetContentType() : ""; + resp = client.post(req, body, content_type); break; - case Aws::Http::HttpMethod::HTTP_PUT: - resp = client.put(req, body, request.GetContentType()); + } + case Aws::Http::HttpMethod::HTTP_PUT: { + std::string content_type = + request.HasContentType() ? request.GetContentType() : ""; + resp = client.put(req, body, content_type); break; + } case Aws::Http::HttpMethod::HTTP_HEAD: resp = client.head(req); break; @@ -268,8 +289,8 @@ std::shared_ptr OsqueryHttpClient::MakeRequest( request.GetMethod()) << " request to URL (" << url << "): " << e.what(); - response->SetResponseCode( - static_cast(resp.status())); + response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION); + response->SetClientErrorMessage(e.what()); } return response; @@ -335,11 +356,28 @@ OsquerySTSAWSCredentialsProvider::GetAWSCredentials() { access_key_id_ = sts_result.GetCredentials().GetAccessKeyId(); secret_access_key_ = sts_result.GetCredentials().GetSecretAccessKey(); session_token_ = sts_result.GetCredentials().GetSessionToken(); + // Calculate when our credentials will expire. token_expire_time_ = current_time + FLAGS_aws_sts_timeout; } else { - LOG(ERROR) << "Failed to create STS temporary credentials, error: " - << sts_outcome.GetError().GetMessage(); + const auto& error = sts_outcome.GetError(); + + std::stringstream error_message; + + error_message << static_cast(error.GetErrorType()); + + if (error.GetResponseCode() != + Aws::Http::HttpResponseCode::REQUEST_NOT_MADE) { + error_message << ", HTTP responde code: " + << static_cast(error.GetResponseCode()); + } + + if (!error.GetMessage().empty()) { + error_message << ", error message: " << error.GetMessage(); + } + + LOG(ERROR) << "Failed to create STS temporary credentials, error type: " + << error_message.rdbuf(); } } return Aws::Auth::AWSCredentials( diff --git a/osquery/utils/system/CMakeLists.txt b/osquery/utils/system/CMakeLists.txt index 9002bfd863f..f1fd44a5c0b 100644 --- a/osquery/utils/system/CMakeLists.txt +++ b/osquery/utils/system/CMakeLists.txt @@ -28,6 +28,10 @@ function(osqueryUtilsSystemMain) generateOsqueryUtilsSystem() generateOsqueryUtilsSystemUptime() + if(DEFINED PLATFORM_LINUX) + generateOsqueryUtilsSystemBoottime() + endif() + if(DEFINED PLATFORM_WINDOWS) generateOsqueryUtilsSystemUsersGroups() endif() @@ -248,6 +252,30 @@ function(generateOsqueryUtilsSystemUptime) generateIncludeNamespace(osquery_utils_system_uptime "osquery/utils/system" "FILE_ONLY" ${public_header_files}) endfunction() +function(generateOsqueryUtilsSystemBoottime) + + if(DEFINED PLATFORM_LINUX) + set(source_files + linux/boottime.cpp + ) + endif() + + add_osquery_library(osquery_utils_system_boottime EXCLUDE_FROM_ALL + ${source_files} + ) + + target_link_libraries(osquery_utils_system_boottime PUBLIC + osquery_cxx_settings + osquery_filesystem + ) + + set(public_header_files + boottime.h + ) + + generateIncludeNamespace(osquery_utils_system_boottime "osquery/utils/system" "FILE_ONLY" ${public_header_files}) +endfunction() + function(generateOsqueryUtilsSystemUsersGroups) add_osquery_library(osquery_utils_system_usersgroupshelpers EXCLUDE_FROM_ALL windows/users_groups_helpers.cpp diff --git a/osquery/utils/system/boottime.h b/osquery/utils/system/boottime.h new file mode 100644 index 00000000000..e2a3e31214d --- /dev/null +++ b/osquery/utils/system/boottime.h @@ -0,0 +1,16 @@ +/** + * Copyright (c) 2014-present, The osquery authors + * + * This source code is licensed as defined by the LICENSE file found in the + * root directory of this source tree. + * + * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + */ + +#include + +#include + +namespace osquery { +std::uint64_t getBootTime(); +} // namespace osquery diff --git a/osquery/utils/system/linux/boottime.cpp b/osquery/utils/system/linux/boottime.cpp new file mode 100644 index 00000000000..0fe424f2db7 --- /dev/null +++ b/osquery/utils/system/linux/boottime.cpp @@ -0,0 +1,50 @@ +/** + * Copyright (c) 2014-present, The osquery authors + * + * This source code is licensed as defined by the LICENSE file found in the + * root directory of this source tree. + * + * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + */ + +#include +#include +#include + +#include +#include + +namespace osquery { +std::uint64_t getBootTime() { + std::string content; + auto status = readFile("/proc/stat", content); + + if (!status.ok()) { + return 0; + } + + auto btime_start = content.find("btime"); + + if (btime_start == std::string::npos) { + return 0; + } + + btime_start += 6; + + auto btime_end = content.find("\n", btime_start); + + if (btime_end == std::string::npos) { + return 0; + } + + auto btime = content.substr(btime_start, btime_end - btime_start); + + auto btime_res = tryTo(btime); + + if (btime_res.isError()) { + return 0; + } + + return btime_res.take(); +} +} // namespace osquery diff --git a/packs/incident-response.conf b/packs/incident-response.conf index 3c1922ca8a5..5daba50c748 100644 --- a/packs/incident-response.conf +++ b/packs/incident-response.conf @@ -227,9 +227,9 @@ "process_memory": { "query" : "select * from process_memory_map;", "interval" : "86400", - "platform" : "linux", + "platform" : "posix", "version" : "1.4.5", - "description" : "Retrieves the memory map per process in the target Linux system.", + "description" : "Retrieves the memory map per process in the target Linux or macOS system.", "value" : "Ability to compare with known good. Identify mapped regions corresponding with or containing injected code." }, "arp_cache": { diff --git a/plugins/logger/aws_log_forwarder.h b/plugins/logger/aws_log_forwarder.h index f0e571cca30..4380194f781 100644 --- a/plugins/logger/aws_log_forwarder.h +++ b/plugins/logger/aws_log_forwarder.h @@ -83,8 +83,9 @@ class AwsLogForwarder : public BufferedLogForwarder { } std::stringstream output; - output << name_ << ": The following log records have been discarded " - "because they were too big:\n"; + output << name_ + << ": The following log records have been discarded " + "because they were too big:\n"; for (const auto& record : discarded_records) { output << record << "\n"; @@ -132,8 +133,9 @@ class AwsLogForwarder : public BufferedLogForwarder { if (!status.ok()) { // To achieve behavior parity with TLS logger plugin, skip non-JSON // content - LOG(ERROR) << name_ << ": The following log record has been discarded " - "because it was not in JSON format: " + LOG(ERROR) << name_ + << ": The following log record has been discarded " + "because it was not in JSON format: " << record; continue; @@ -198,6 +200,12 @@ class AwsLogForwarder : public BufferedLogForwarder { (retry == 0 ? 0 : base_retry_delay) + (retry * 1000U); if (retry_delay != 0) { pause(std::chrono::milliseconds(retry_delay)); + + /* Stop retrying, osquery should shutdown; we fail the send + so that it's attempted again at the next start */ + if (interrupted()) { + return false; + } } // Attempt to send the batch @@ -279,6 +287,15 @@ class AwsLogForwarder : public BufferedLogForwarder { for (auto batch_it = batch_list.begin(); batch_it != batch_list.end();) { auto& batch = *batch_it; if (!sendBatch(batch, status_output)) { + /* Since we are shutting down, we don't want to count this send failure + as a real error; returning with failure here will make + the BufferedLogForwarder try to send this batch again + when osquery starts again */ + if (interrupted()) { + return Status::failure( + "Interrupted sending log batch due to osquery shutdown"); + } + // We couldn't write some of the records; log them locally so that the // administrator will at least be able to inspect them dumpBatchToErrorLog(batch); @@ -339,4 +356,4 @@ class AwsLogForwarder : public BufferedLogForwarder { /// Service endpoint override std::string endpoint_override_; }; -} +} // namespace osquery diff --git a/plugins/logger/buffered.cpp b/plugins/logger/buffered.cpp index b09d020eeb9..37ecbfb6887 100644 --- a/plugins/logger/buffered.cpp +++ b/plugins/logger/buffered.cpp @@ -73,6 +73,10 @@ void BufferedLogForwarder::check() { status = send(results, "result"); if (!status.ok()) { VLOG(1) << "Error sending results to logger: " << status.getMessage(); + + if (interrupted()) { + return; + } } else { // Clear the results logs once they were sent. iterate(indexes, ([this](std::string& index) { @@ -88,6 +92,10 @@ void BufferedLogForwarder::check() { status = send(statuses, "status"); if (!status.ok()) { VLOG(1) << "Error sending status to logger: " << status.getMessage(); + + if (interrupted()) { + return; + } } else { // Clear the status logs once they were sent. iterate(indexes, ([this](std::string& index) { @@ -288,4 +296,4 @@ Status BufferedLogForwarder::deleteValueWithCount(const std::string& domain, } return status; } -} +} // namespace osquery diff --git a/specs/CMakeLists.txt b/specs/CMakeLists.txt index ed49643c6b7..65915dd6827 100644 --- a/specs/CMakeLists.txt +++ b/specs/CMakeLists.txt @@ -302,6 +302,7 @@ function(generateNativeTables) "windows/shellbags.table:windows" "windows/prefetch.table:windows" "windows/tpm_info.table:windows" + "windows/security_profile_info.table:windows" "yara/yara_events.table:linux,macos" "yara/yara.table:linux,macos,windows" ) diff --git a/specs/darwin/es_process_events.table b/specs/darwin/es_process_events.table index fe620b5a0dc..39625dd07cd 100644 --- a/specs/darwin/es_process_events.table +++ b/specs/darwin/es_process_events.table @@ -27,6 +27,7 @@ schema([ Column("time", BIGINT, "Time of execution in UNIX time"), Column("event_type", TEXT, "Type of EndpointSecurity event"), Column("eid", TEXT, "Event ID", hidden=True), + Column("codesigning_flags", TEXT, "Codesigning flags matching one of these options, in a comma separated list: NOT_VALID, ADHOC, NOT_RUNTIME, INSTALLER. See kern/cs_blobs.h in XNU for descriptions."), ]) attributes(event_subscriber=True) implementation("events/darwin/es_process_events@es_process_events::genTable") diff --git a/specs/darwin/keychain_items.table b/specs/darwin/keychain_items.table index 0153ec54480..cb5f421dc91 100644 --- a/specs/darwin/keychain_items.table +++ b/specs/darwin/keychain_items.table @@ -5,7 +5,7 @@ schema([ Column("description", TEXT, "Optional item description"), Column("comment", TEXT, "Optional keychain comment"), Column("account", TEXT, "Optional item account"), - Column("created", TEXT, "Data item was created"), + Column("created", TEXT, "Date item was created"), Column("modified", TEXT, "Date of last modification"), Column("type", TEXT, "Keychain item type (class)"), Column("path", TEXT, "Path to keychain containing item", additional=True), diff --git a/specs/windows/security_profile_info.table b/specs/windows/security_profile_info.table new file mode 100644 index 00000000000..0d36a4fe091 --- /dev/null +++ b/specs/windows/security_profile_info.table @@ -0,0 +1,28 @@ +table_name("security_profile_info") +description("Information on the security profile of a given system by listing the system Account and Audit Policies. This table mimics the exported securitypolicy output from the secedit tool.") +schema([ + Column("minimum_password_age", INTEGER, "Determines the minimum number of days that a password must be used before the user can change it"), + Column("maximum_password_age", INTEGER, "Determines the maximum number of days that a password can be used before the client requires the user to change it"), + Column("minimum_password_length", INTEGER, "Determines the least number of characters that can make up a password for a user account"), + Column("password_complexity", INTEGER, "Determines whether passwords must meet a series of strong-password guidelines"), + Column("password_history_size", INTEGER, "Number of unique new passwords that must be associated with a user account before an old password can be reused"), + Column("lockout_bad_count", INTEGER, "Number of failed logon attempts after which a user account MUST be locked out"), + Column("logon_to_change_password", INTEGER, "Determines if logon session is required to change the password"), + Column("force_logoff_when_expire", INTEGER, "Determines whether SMB client sessions with the SMB server will be forcibly disconnected when the client's logon hours expire"), + Column("new_administrator_name", TEXT, "Determines the name of the Administrator account on the local computer"), + Column("new_guest_name", TEXT, "Determines the name of the Guest account on the local computer"), + Column("clear_text_password", INTEGER, "Determines whether passwords MUST be stored by using reversible encryption"), + Column("lsa_anonymous_name_lookup", INTEGER, "Determines if an anonymous user is allowed to query the local LSA policy"), + Column("enable_admin_account", INTEGER, "Determines whether the Administrator account on the local computer is enabled"), + Column("enable_guest_account", INTEGER, "Determines whether the Guest account on the local computer is enabled"), + Column("audit_system_events", INTEGER, "Determines whether the operating system MUST audit System Change, System Startup, System Shutdown, Authentication Component Load, and Loss or Excess of Security events"), + Column("audit_logon_events", INTEGER, "Determines whether the operating system MUST audit each instance of a user attempt to log on or log off this computer"), + Column("audit_object_access", INTEGER, "Determines whether the operating system MUST audit each instance of user attempts to access a non-Active Directory object that has its own SACL specified"), + Column("audit_privilege_use", INTEGER, "Determines whether the operating system MUST audit each instance of user attempts to exercise a user right"), + Column("audit_policy_change", INTEGER, "Determines whether the operating system MUST audit each instance of user attempts to change user rights assignment policy, audit policy, account policy, or trust policy"), + Column("audit_account_manage", INTEGER, "Determines whether the operating system MUST audit each event of account management on a computer"), + Column("audit_process_tracking", INTEGER, "Determines whether the operating system MUST audit process-related events"), + Column("audit_ds_access", INTEGER, "Determines whether the operating system MUST audit each instance of user attempts to access an Active Directory object that has its own system access control list (SACL) specified"), + Column("audit_account_logon", INTEGER, "Determines whether the operating system MUST audit each time this computer validates the credentials of an account"), +]) +implementation("security_profile_info@genSecurityProfileInformation") diff --git a/tests/integration/tables/CMakeLists.txt b/tests/integration/tables/CMakeLists.txt index ed4311656fa..6db849ad5b2 100644 --- a/tests/integration/tables/CMakeLists.txt +++ b/tests/integration/tables/CMakeLists.txt @@ -323,6 +323,7 @@ function(generateTestsIntegrationTablesTestsTest) hvci_status.cpp yara.cpp tpm_info.cpp + security_profile_info.cpp ) list(APPEND source_files ${platform_source_files}) diff --git a/tests/integration/tables/security_profile_info.cpp b/tests/integration/tables/security_profile_info.cpp new file mode 100644 index 00000000000..823a579444a --- /dev/null +++ b/tests/integration/tables/security_profile_info.cpp @@ -0,0 +1,62 @@ +/** + * Copyright (c) 2014-present, The osquery authors + * + * This source code is licensed as defined by the LICENSE file found in the + * root directory of this source tree. + * + * SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + */ + +// Sanity check integration test for security_profile_info +// Spec file: specs/windows/security_profile_info.table + +#include +#include + +namespace osquery { +namespace table_tests { + +class WindowsSecurityProfileTests : public testing::Test { + protected: + void SetUp() override { + setUpEnvironment(); + } +}; + +TEST_F(WindowsSecurityProfileTests, test_sanity_profile_info) { + // Sanity check to ensure that test only runs if this is not a WoW64 process + if (!osquery::tables::SceClientHelper::isWow64Process()) { + auto const data = execute_query("select * from security_profile_info"); + + ValidationMap rowMap = { + {"minimum_password_age", IntType | NonEmpty | NonNull}, + {"maximum_password_age", IntType | NonEmpty | NonNull}, + {"minimum_password_length", IntType | NonEmpty | NonNull}, + {"password_complexity", IntType | NonEmpty | NonNull}, + {"password_history_size", IntType | NonEmpty | NonNull}, + {"lockout_bad_count", IntType | NonEmpty | NonNull}, + {"logon_to_change_password", IntType | NonEmpty | NonNull}, + {"force_logoff_when_expire", IntType | NonEmpty | NonNull}, + {"new_administrator_name", EmptyOk}, + {"new_guest_name", EmptyOk}, + {"clear_text_password", IntType | NonEmpty | NonNull}, + {"lsa_anonymous_name_lookup", IntType | NonEmpty | NonNull}, + {"enable_admin_account", IntType | NonEmpty | NonNull}, + {"enable_guest_account", IntType | NonEmpty | NonNull}, + {"audit_system_events", IntType | NonEmpty | NonNull}, + {"audit_logon_events", IntType | NonEmpty | NonNull}, + {"audit_object_access", IntType | NonEmpty | NonNull}, + {"audit_privilege_use", IntType | NonEmpty | NonNull}, + {"audit_policy_change", IntType | NonEmpty | NonNull}, + {"audit_account_manage", IntType | NonEmpty | NonNull}, + {"audit_process_tracking", IntType | NonEmpty | NonNull}, + {"audit_ds_access", IntType | NonEmpty | NonNull}, + {"audit_account_logon", IntType | NonEmpty | NonNull}, + }; + + validate_rows(data, rowMap); + } +} + +} // namespace table_tests +} // namespace osquery diff --git a/tools/ci/scripts/cve/osquery/__init__.py b/tools/ci/scripts/cve/osquery/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tools/ci/scripts/cve/osquery/github_api.py b/tools/ci/scripts/cve/osquery/github_api.py new file mode 100755 index 00000000000..0a38b2bf106 --- /dev/null +++ b/tools/ci/scripts/cve/osquery/github_api.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2014-present, The osquery authors +# +# This source code is licensed as defined by the LICENSE file found in the +# root directory of this source tree. +# +# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + +from time import sleep +import requests +import time +from datetime import datetime, timedelta +from enum import Enum + + +class IssueCreationError(Exception): + pass + + +class IssuesListingError(Exception): + pass + + +class GithubIssueState(Enum): + Open = "open" + Close = "close" + All = "all" + + +class GithubAPI: + def __init__(self, source_repo: str, dest_repo: str, github_token: str, debug=False): + self.github_token_ = github_token + self.source_repo_url = "https://api.github.com/repos/%s/issues" % source_repo + self.dest_repo_url = "https://api.github.com/repos/%s/issues" % dest_repo + self.last_request_time = 0 + self.debug = debug + + def debugPrint(self, message: str): + if self.debug: + print(f"DEBUG: {message}") + + def makePostRequest(self, url: str, data: dict): + time_passed = (time.time_ns() - self.last_request_time) / (1000 * 1000) + + # We should in theory detect if the answer contains a Retry-After, + # because then that should be the delay to be used. + # We are not because currently this is only used for creating issues, + # and the Retry-After header is not sent in that case. + if time_passed < 5000: + sleep((5000 - time_passed) / 1000.0) + + self.last_request_time = time.time_ns() + + response = requests.post( + url, + json=data, + headers={ + "Accept": "application/vnd.github.v3+json", + "Authorization": "token %s" % self.github_token_, + }, + ) + + return response + + def makeGetRequest(self, url, params: dict): + time_passed = (time.time_ns() - self.last_request_time) / (1000 * 1000) + + # Limit at 1 request every 5 seconds + if time_passed < 5000: + sleep((5000 - time_passed) / 1000.0) + + self.last_request_time = time.time_ns() + + response = requests.get( + url, + params=params, + auth=("Bearer", self.github_token_), + headers={ + "Accept": "application/vnd.github.v3+json", + }, + ) + + return response + + def createIssue(self, title: str, content: str, labels: list): + + data = { + "title": title, + "body": content, + "labels": labels, + } + + attempts = 0 + while attempts < 3: + attempts += 1 + + self.debugPrint(f"Trying to open issue with title \"{data['title']}\"") + + response = self.makePostRequest(self.dest_repo_url, data) + + if response.status_code != 201: + self.debugPrint( + f"Request to {self.dest_repo_url} to create issue with title" + f" {data['title']} failed with {response.status_code}, reason:" + f" {response.reason + ' ' + response.text if response.text else response.reason}" + ) + sleep(attempts * 5) + continue + + return response + + raise IssueCreationError( + "Failed to open issue with status code: %s and reason: %s" + % (response.status_code, response.reason) + ) + + def getRecentOpenIssues( + self, creator="github-actions[bot]", state=GithubIssueState.Open, labels=[] + ) -> "list[list[dict]]": + # Query only issues that are open and have been created or updated in the last 6 months. + # Issues older than that are most likely not interesting + # because hopefully a release fixing them + # or something that ignores the CVEs have been created. + since = datetime.utcnow() - timedelta(days=180) + + params = { + "creator": creator, + "since": since.strftime("%Y-%m-%dT%H:%M:%SZ"), + "per_page": 100, + "page": 1, + "state": state.value, + } + + if len(labels) > 0: + params["labels"] = labels + + issues_batches = [] + listed_all_pages = False + + # Github returns max 100 elements per page/response, + # so we loop until we got them all + while not listed_all_pages: + attempts = 0 + while attempts < 3: + self.debugPrint( + f"Trying to request issues from {self.source_repo_url}, page {params['page']}" + ) + + response = self.makeGetRequest(self.source_repo_url, params) + + if response.status_code != 200: + + self.debugPrint( + f"Request to {self.source_repo_url} to list issues at page {params['page']}" + f" failed with {response.status_code}, reason:" + f" {response.reason + ' ' + response.text if response.text else response.reason}" + ) + attempts += 1 + sleep(attempts * 5) + continue + + json_response = response.json() + issues_count = len(json_response) + + # If the answer is empty, the previous one was the last page + if issues_count == 0: + listed_all_pages = True + break + + issues_batches.append(json_response) + + # If we have less issues than what we've request per page, + # means there's no other page and we can end early + if issues_count < params["per_page"]: + listed_all_pages = True + else: + params["page"] += 1 + + break + + if attempts == 3: + reason = response.reason + " " + response.text if response.text else response.reason + raise IssuesListingError( + f"Failed to list issues with status code: {response.status_code}" + f" and reason: {reason}" + ) + + return issues_batches diff --git a/tools/ci/scripts/cve/osquery/manifest_api.py b/tools/ci/scripts/cve/osquery/manifest_api.py new file mode 100644 index 00000000000..b63ac9117bb --- /dev/null +++ b/tools/ci/scripts/cve/osquery/manifest_api.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2014-present, The osquery authors +# +# This source code is licensed as defined by the LICENSE file found in the +# root directory of this source tree. +# +# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + +import sys + +# These libraries are not Github submodules, so there's no commit to check +libraries_without_commit = ["openssl"] + +# There are no CPE for these libraries +libraries_without_cpe = [ + "ebpfpub", + "gflags", + "glog", + "librdkafka", + "linenoise-ng", + "aws-c-auth", + "aws-c-cal", + "aws-c-auth", + "aws-c-cal", + "aws-c-common", + "aws-c-compression", + "aws-c-event-stream", + "aws-c-http", + "aws-c-io", + "aws-c-mqtt", + "aws-c-s3", + "aws-checksums", + "aws-crt-cpp", + "aws-sdk-cpp", + "s2n", +] + +# These libraries will be ignored from the up to date version check +libraries_to_ignore = ["googletest"] + +# These libraries have no version; the date will be checked instead +libraries_without_version = ["gnulib"] + + +def print_err(message: str): + print("Error: " + message, file=sys.stderr) + + +def validateManifestFormat(manifest: dict) -> bool: + + fields_no_commit = [("product", str), ("vendor", str), ("version", str), ("ignored-cves", list)] + fields_no_version = [("product", str), ("vendor", str), ("date", str), ("ignored-cves", list)] + fields_no_cpe = [("vendor", str), ("commit", str)] + all_fields = fields_no_commit + [("commit", str)] + + for library_name, library_metadata in manifest.items(): + + if library_name in libraries_without_cpe: + fields_to_check = fields_no_cpe + elif library_name in libraries_without_commit: + fields_to_check = fields_no_commit + elif library_name in libraries_without_version: + fields_to_check = fields_no_version + else: + fields_to_check = all_fields + + if library_name.strip() == "": + print_err("Manifest contains a library without name") + return False + + for field_name, field_type in fields_to_check: + if field_name not in library_metadata: + print_err(f"Library {library_name}, missing {field_name} field") + return False + + if not isinstance(library_metadata[field_name], field_type): + print_err( + f"Library {library_name}, the {field_name} field is not of type {field_type}" + ) + return False + + if field_type == str: + if library_metadata[field_name].strip() == "": + print_err(f"Library {library_name}, the {field_name} field is empty") + return False + + return True + + +def validateLibrariesVersions( + manifest: dict, versions: "list[tuple]", commits: "list[tuple]" +) -> bool: + + manifest_is_valid = True + + # Remove ignored libraries + versions = [v for v in versions if not v[0] in libraries_to_ignore] + + # First we search for libraries that are present + for library_name, current_library_version in versions: + + if library_name not in manifest: + manifest_is_valid = False + print_err(f"Library {library_name} is missing from the manifest, please add it") + continue + + manifest_library_version = manifest[library_name]["version"] + + if current_library_version != manifest_library_version: + manifest_is_valid = False + print_err( + f"Library {library_name} has an outdated version in the manifest. Expected" + f" {current_library_version}, found {manifest_library_version}. Update the manifest" + ) + + # Remove ignored libraries + commits = [c for c in commits if not c[0] in libraries_to_ignore] + + for library_name, current_library_commit in commits: + + if library_name not in manifest: + manifest_is_valid = False + print_err(f"Library {library_name} is missing from the manifest, please add it") + continue + + manifest_library_commit = manifest[library_name]["commit"] + + if current_library_commit != manifest_library_commit: + manifest_is_valid = False + print_err( + f"Library {library_name} has an outdated commit in the manifest. Expected" + f" {current_library_commit}, found {manifest_library_commit}. Please update both" + " the commit and the version when applicable" + ) + + manifest_libraries_names = set(list(zip(*manifest.items()))[0]) + + submodule_names = set(list(zip(*commits))[0]) + externallibs_names = set(list(zip(*versions))[0]) + + all_detected_libs = set.union(submodule_names, externallibs_names) + + diff = list(manifest_libraries_names - all_detected_libs) + + if len(diff) > 0: + print("Additional libraries found in the manifest that can be removed:") + for library_name in diff: + print(library_name) + + return manifest_is_valid diff --git a/tools/ci/scripts/cve/requirements.txt b/tools/ci/scripts/cve/requirements.txt new file mode 100644 index 00000000000..1293e3f20c6 --- /dev/null +++ b/tools/ci/scripts/cve/requirements.txt @@ -0,0 +1,2 @@ +nvdlib==0.6.1 +pygit2==1.10.1 diff --git a/tools/ci/scripts/cve/third_party_libraries_cves_scanner.py b/tools/ci/scripts/cve/third_party_libraries_cves_scanner.py new file mode 100755 index 00000000000..73681f5f73a --- /dev/null +++ b/tools/ci/scripts/cve/third_party_libraries_cves_scanner.py @@ -0,0 +1,390 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2014-present, The osquery authors +# +# This source code is licensed as defined by the LICENSE file found in the +# root directory of this source tree. +# +# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + +import json +import argparse +import os +import nvdlib +from osquery.github_api import GithubAPI +from osquery.manifest_api import validateManifestFormat +from osquery.manifest_api import libraries_without_cpe +import re +import sys +import time +from datetime import datetime, timedelta, date + +match_cve = re.compile(" (CVE-.*-.*)$") + +DEBUG = False + + +def parseCVEFromTitle(title: str): + match = match_cve.search(title) + if match: + return match.group(1) + else: + return None + + +def print_err(message: str): + print("Error: " + message, file=sys.stderr) + + +def getCVES( + vendor: str, product: str, api_key, interval: int, library_name: str, version=None, date=None +): + + attempt = 0 + max_attempts = 3 + nist_cves = [] + global DEBUG + now = datetime.now() + + while attempt < max_attempts: + try: + if version is not None: + nist_cves = nvdlib.searchCVE( + cpeMatchString="cpe:2.3:a:%s:%s:%s:*:*:*:*:*:*:*" % (vendor, product, version), + sortPublished=True, + key=api_key, + ) + break + else: + # Some libraries have a version that corresponds to a date, + # so we search for CVEs published in the window of time + # between the library last commit date and the current date, + # 120 days at a time, since the NVD API is limited + + if attempt == 0: + start_date = datetime.strptime(date, "%Y-%m-%d") + end_date = start_date + + while end_date < now: + end_date = start_date + timedelta(days=120) + + cves = nvdlib.searchCVE( + cpeMatchString="cpe:2.3:a:%s:%s:*:*:*:*:*:*:*:*" % (vendor, product), + sortPublished=True, + pubStartDate=start_date, + pubEndDate=end_date, + key=api_key, + ) + + start_date = end_date + nist_cves.extend(cves) + + break + + except Exception as e: + if DEBUG: + print(f"Error searching CVE for library {library_name}: {e}. Retrying") + + attempt += 1 + if attempt == max_attempts: + print(f"Failed to get CVEs for product: {library_name}. Skipping") + else: + time.sleep(interval) + interval *= 2 + + continue + + return (nist_cves, attempt > 0) + + +class CVE: + def __init__(self, name, severity, description, url) -> None: + self.name = name + self.severity = severity + self.description = description + self.url = url + + +parser = argparse.ArgumentParser() +parser.add_argument( + "--manifest", + type=str, + required=True, + help="Path to the third party libraries JSON manifest", +) + +parser.add_argument( + "--source-repo", + type=str, + help=( + "Specifies where the already opened issues should be searched. Used for testing the script." + " Defaults to osquery/osquery." + ), + default="osquery/osquery", +) + + +parser.add_argument( + "--dest-repo", + type=str, + help=( + "Specifies where the issues should be opened. Used for testing the script. Defaults to" + " osquery/osquery." + ), + default="osquery/osquery", +) + +parser.add_argument( + "--create_issues", + help="When enabled the script will also create a Github issue for each new CVE found", + required=False, + action="store_true", + default=False, +) + +parser.add_argument( + "--github-token", + required=False, + type=str, + help="Optional Github token to open issues about the detected CVEs", +) + +parser.add_argument( + "--api-key", + required=False, + type=str, + help="Optional API key for the NVD NIST APIs, to reduce rate limiting", +) + +parser.add_argument( + "--debug", action="store_true", default=False, required=False, help="Enable debug prints" +) + +parser.add_argument( + "--libraries", + required=False, + help="List of comma separated library names to check CVEs of", + type=str, +) + +args = parser.parse_args() + +if args.manifest == "": + print_err("No manifest to parse, please provide a path to the json file") + exit(1) + +if not os.path.exists(args.manifest): + print_err("The provided manifest path doesn't exist. Please provide a valid path") + exit(1) + +api_key = args.api_key +if args.api_key is None: + api_key = os.environ.get("NIST_API_KEY") + if api_key is None: + # Needed for the NVD API later, which uses boolean values for a missing value + api_key = False + +base_nvd_retry_interval = 1 if api_key else 6 +nvd_interval = base_nvd_retry_interval + +if args.debug: + DEBUG = True + +libraries = {} + +with open(args.manifest, "r") as manifest_file: + libraries = json.load(manifest_file) + +if not validateManifestFormat(libraries): + print_err("The manifest format is not valid, interrupting") + exit(1) + +cves_per_library = [] +all_ignored_cves = [] +libraries_to_check = [] + +# If not empty, only check the libraries provided in this list +if args.libraries: + libraries_to_check.extend(args.libraries.split(",")) + +# Loop through all the libraries in the manifest +for library_name, library_metadata in libraries.items(): + + # Some libraries do not have CPEs so we cannot query them + if library_name in libraries_without_cpe: + continue + + # Skip if we want to check a specific set of libraries and this is not in it + if len(libraries_to_check) > 0: + if library_name not in libraries_to_check: + continue + + product = library_metadata["product"] + + if "version" in library_metadata: + version = library_metadata["version"] + print(f"Verifying CVEs for library: {library_name} {version}") + date = None + else: + date = library_metadata["date"] + print(f"Verifying CVEs for library: {library_name} {date}") + version = None + + vendor = library_metadata["vendor"] + ignored_cves = library_metadata["ignored-cves"] + all_ignored_cves.extend(ignored_cves) + + # The underlying nvdlib library will wait around base_nvd_retry_interval already, + # which is the NVD API required wait time, but sometimes that's not enough. + # We sleep increasingly more when we are downloading CVEs for a specific library + # and getting errors, but at the next library we would start again + # with a base_nvd_retry_interval sleep; that might not be enough again to avoid + # further errors later. + # So we add some more sleeping between libraries if the previous download had errors. + nvd_interval_diff = nvd_interval - base_nvd_retry_interval + if nvd_interval_diff > 0: + time.sleep(nvd_interval_diff) + + if version: + nist_cves, had_errors = getCVES( + vendor, product, api_key, nvd_interval, library_name, version=version + ) + else: + nist_cves, had_errors = getCVES( + vendor, product, api_key, nvd_interval, library_name, date=date + ) + + # We assume errors are due to rate limiting and double the time we wait for each CVE query + if had_errors: + nvd_interval *= 2 + elif nvd_interval_diff > 0: + # But if we are successful we slowly go down again + nvd_interval = max(nvd_interval - 1, base_nvd_retry_interval) + + if len(nist_cves) == 0: + continue + + cves = [] + + # Sort by id, so the most recent CVEs are on top + nist_cves.sort(key=lambda cve: cve.id, reverse=True) + + for cve in nist_cves: + if hasattr(cve, "v3severity"): + severity = cve.v3severity + elif hasattr(cve, "v2severity"): + severity = cve.v2severity + else: + severity = "UNKNOWN" + + cves.append(CVE(cve.id, severity, cve.cve.description.description_data[0].value, cve.url)) + + cves_per_library.append({"name": library_name, "cves": cves}) + +if len(cves_per_library) == 0: + exit(0) + +# Always print the cves that have been found +print("\nFound the following CVEs:") + +libraries_with_cves = [] +libraries_with_ignored_cves = [] + +# Split ignored CVEs from still valid CVEs and organize both for display +for library in cves_per_library: + cves = library["cves"] + + cves_messages = [] + ignored_cves_messages = [] + + for cve in cves: + if cve.name not in all_ignored_cves: + cves_messages.append(f"\t Name: {cve.name}\tSeverity: {cve.severity}") + else: + ignored_cves_messages.append(f"\t Name: {cve.name}\tSeverity: {cve.severity}") + + if len(cves_messages) > 0: + libraries_with_cves.append((library["name"], cves_messages)) + + if len(ignored_cves_messages) > 0: + libraries_with_ignored_cves.append((library["name"], ignored_cves_messages)) + +for library in libraries_with_cves: + print(f"Library: {library[0]}") + for cve in library[1]: + print(cve) + print() + +print("\nThe following CVEs have been ignored:") + +for library in libraries_with_ignored_cves: + print(f"Library: {library[0]}") + for cve in library[1]: + print(cve) + print() + +if args.create_issues: + + github_token = None + if not args.github_token: + github_token = os.environ.get("GITHUB_TOKEN") + + if github_token is None: + print("Missing github token") + exit(1) + else: + github_token = args.github_token + + github_api = GithubAPI(args.source_repo, args.dest_repo, github_token, DEBUG) + + print("Retrieving already opened issues") + issues_batches = github_api.getRecentOpenIssues(labels=["cve", "security", "libraries"]) + + # Process all opened issues and extract a list of CVEs that have been already reported + opened_cve_issues = set() + for batch in issues_batches: + for issue in batch: + if issue.get("pull_request") is not None: + continue + + cve_id = parseCVEFromTitle(issue["title"]) + + if cve_id is None: + continue + + opened_cve_issues.add(cve_id) + + # Open issues for each new CVE + errors = 0 + count = 0 + print("Opening issues for new CVEs") + for library in cves_per_library: + cves = library["cves"] + # We create the CVEs from older to newer + # so that the newer ones are on top of the Github issues list + for cve in reversed(cves): + if cve.name in opened_cve_issues or cve.name in all_ignored_cves: + continue + + library_name = library["name"] + + try: + issue_description = f"{cve.url}\n\n{cve.description}" + + github_api.createIssue( + f"Library {library_name} has vulnerability {cve.name}", + issue_description, + [f"severity-{cve.severity.lower()}", "cve", "libraries", "security"], + ) + except Exception as e: + print(f"Failed to create issue for library {library_name}: {e}") + errors += 1 + continue + + count += 1 + + if errors > 0: + print(f"Done. Opened {count} new issues, {errors} failed to be opened") + exit(1) + else: + print(f"Done. Opened {count} new issues") diff --git a/tools/ci/scripts/cve/validate_manifest_libraries_versions.py b/tools/ci/scripts/cve/validate_manifest_libraries_versions.py new file mode 100755 index 00000000000..c20932f4465 --- /dev/null +++ b/tools/ci/scripts/cve/validate_manifest_libraries_versions.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2014-present, The osquery authors +# +# This source code is licensed as defined by the LICENSE file found in the +# root directory of this source tree. +# +# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only) + +from importlib.resources import path +from osquery.manifest_api import validateLibrariesVersions, validateManifestFormat +import argparse +import pygit2 +import pathlib +import re +import json + +parser = argparse.ArgumentParser() + +parser.add_argument("--repository", type=str, required=True) +parser.add_argument("--manifest", type=str, required=True) + +args = parser.parse_args() +repository = args.repository + +repo = pygit2.Repository(repository) + +submodule_relative_paths = repo.listall_submodules() + +submodules_name_and_commit = [] + +for submodule_relative_path in submodule_relative_paths: + submodule_info = repo.lookup_submodule(submodule_relative_path) + + submodule_path = pathlib.Path(submodule_relative_path) + + submodule_name = ( + submodule_path.name if submodule_path.name != "src" else submodule_path.parent.name + ) + + # When a submodule gets remove/unregistered from the repository, + # it's not necessarily removed from the git database, + # so it might still appear here. + # We check if the submodule path actually exists. + if not submodule_path.exists(): + continue + + submodules_name_and_commit.append((submodule_name, submodule_info.head_id.hex)) + +external_libraries_name_and_version = [] + +with open(repository + "/libraries/cmake/formula/openssl/CMakeLists.txt") as openss_cmake: + match_version = re.compile('OPENSSL_VERSION[\\s]*"(.*)"') + + for line in openss_cmake: + match = match_version.search(line) + if match: + external_libraries_name_and_version.append(("openssl", match.group(1))) + + +print("Found the following libraries in the repository:\n") + +for name, version in external_libraries_name_and_version: + print(f"Library {name}, version {version}") + +for name, commit in submodules_name_and_commit: + print(f"Library {name}, commit {commit}") + +print("\n\nValidating manifest format and libraries:\n") + + +manifest_path = args.manifest + +manifest = {} +with open(manifest_path, "r") as manifest_file: + manifest = json.load(manifest_file) + +if not validateManifestFormat(manifest): + exit(1) + +if not validateLibrariesVersions( + manifest, external_libraries_name_and_version, submodules_name_and_commit +): + exit(1) + +print("Done. The manifest is valid") diff --git a/tools/ci/scripts/macos/package_tests.sh b/tools/ci/scripts/macos/package_tests.sh index 1e5c0ed7ca0..455d94509f4 100755 --- a/tools/ci/scripts/macos/package_tests.sh +++ b/tools/ci/scripts/macos/package_tests.sh @@ -29,7 +29,7 @@ main() { local launcher_path="${destination}/run.sh" printf '#!/usr/bin/env bash\n\n' > "${launcher_path}" - printf 'export _OSQUERY_PYTHON_INTERPRETER_PATH="$(which python3)"\n' >> "${launcher_path}" + printf 'export _OSQUERY_PYTHON_INTERPRETER_PATH="/usr/local/Frameworks/Python.framework/Versions/Current/bin/python3"\n' >> "${launcher_path}" printf 'export RUNNER_ROOT_FOLDER="$(pwd)"\n\n' >> "${launcher_path}" printf 'ctest --build-nocmake -V\n' >> "${launcher_path}" chmod 755 "${launcher_path}" || return 1