diff --git a/bsd/dev/monotonic.c b/bsd/dev/monotonic.c index 8a5d276e3..375a0ca4e 100644 --- a/bsd/dev/monotonic.c +++ b/bsd/dev/monotonic.c @@ -305,8 +305,8 @@ int thread_selfcounts(__unused struct proc *p, { switch (uap->type) { case 1: { - uint64_t counts[2] = {}; - uint64_t thread_counts[MT_CORE_NFIXED]; + uint64_t counts[2] = { 0 }; + uint64_t thread_counts[MT_CORE_NFIXED] = { 0 }; mt_cur_thread_fixed_counts(thread_counts); @@ -338,8 +338,8 @@ static int mt_sysctl SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) - uint64_t start[MT_CORE_NFIXED], end[MT_CORE_NFIXED]; - uint64_t counts[2] = {}; + uint64_t start[MT_CORE_NFIXED] = { 0 }, end[MT_CORE_NFIXED] = { 0 }; + uint64_t counts[2] = { 0 }; switch ((enum mt_sysctl)arg1) { case MT_SUPPORTED: diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index efc8616f7..d906cf440 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -4749,7 +4749,9 @@ fdexec(proc_t p, short flags, int self_exec) msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO, "fpdrain", NULL); } - + if (fp->f_flags & FP_WAITEVENT) { + (void)waitevent_close(p, fp); + } closef_locked(fp, fp->f_fglob, p); fileproc_free(fp); diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c index d8096ba03..df25f3112 100644 --- a/bsd/kern/kern_event.c +++ b/bsd/kern/kern_event.c @@ -8595,7 +8595,11 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, goto out; } kq_ids = kalloc(bufsize); - assert(kq_ids != NULL); + if (!kq_ids) { + err = ENOMEM; + goto out; + } + bzero(kq_ids, bufsize); } kqhash_lock(p); @@ -8618,7 +8622,7 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, if (kq_ids) { size_t copysize; - if (os_mul_overflow(sizeof(kqueue_id_t), min(ubuflen, nkqueues), ©size)) { + if (os_mul_overflow(sizeof(kqueue_id_t), min(buflen, nkqueues), ©size)) { err = ERANGE; goto out; } diff --git a/bsd/kern/posix_sem.c b/bsd/kern/posix_sem.c index 9dc882363..08a9a0c04 100644 --- a/bsd/kern/posix_sem.c +++ b/bsd/kern/posix_sem.c @@ -819,6 +819,10 @@ sem_close(proc_t p, struct sem_close_args *uap, __unused int32_t *retval) proc_fdunlock(p); return(error); } + if (fp->f_type != DTYPE_PSXSEM) { + proc_fdunlock(p); + return(EBADF); + } procfdtbl_markclosefd(p, fd); fileproc_drain(p, fp); fdrelse(p, fd); diff --git a/bsd/kern/posix_shm.c b/bsd/kern/posix_shm.c index 1fe787812..d220614db 100644 --- a/bsd/kern/posix_shm.c +++ b/bsd/kern/posix_shm.c @@ -865,6 +865,7 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct vm_map_offset_t user_start_addr; vm_map_size_t map_size, mapped_size; int prot = uap->prot; + int max_prot = VM_PROT_DEFAULT; int flags = uap->flags; vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos; vm_object_offset_t map_pos; @@ -887,8 +888,12 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct return(EINVAL); - if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) { - return(EPERM); + /* Can't allow write permission if the shm_open() didn't */ + if (!(fp->f_flag & FWRITE)) { + if (prot & VM_PROT_WRITE) { + return EPERM; + } + max_prot &= ~VM_PROT_WRITE; } if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) @@ -1000,7 +1005,7 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct file_pos - map_pos, docow, prot, - VM_PROT_DEFAULT, + max_prot, VM_INHERIT_SHARE); if (kret != KERN_SUCCESS) goto out; diff --git a/bsd/kern/sysv_shm.c b/bsd/kern/sysv_shm.c index 2fb45c996..9a240bbf9 100644 --- a/bsd/kern/sysv_shm.c +++ b/bsd/kern/sysv_shm.c @@ -618,7 +618,7 @@ shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval) } if (IS_64BIT_PROCESS(p)) { - struct user_shmid_ds shmid_ds; + struct user_shmid_ds shmid_ds = {}; memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds)); /* Clear kernel reserved pointer before copying to user space */ @@ -1046,7 +1046,7 @@ shminit(void) return ENOMEM; } - MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK); + MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK | M_ZERO); if (shmsegs == NULL) { return ENOMEM; } diff --git a/bsd/net/necp.c b/bsd/net/necp.c index 17d00fd4f..3fd05ae1b 100644 --- a/bsd/net/necp.c +++ b/bsd/net/necp.c @@ -8826,6 +8826,9 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (return_route_rule_id) { *return_route_rule_id = inp->inp_policyresult.results.route_rule_id; } + if (return_skip_policy_id) { + *return_skip_policy_id = inp->inp_policyresult.skip_policy_id; + } } lck_rw_done(&necp_kernel_policy_lock); goto done; diff --git a/bsd/netinet/flow_divert.c b/bsd/netinet/flow_divert.c index b73a86172..b6d668440 100644 --- a/bsd/netinet/flow_divert.c +++ b/bsd/netinet/flow_divert.c @@ -1186,7 +1186,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr if (fd_cb->local_address != NULL) { /* socket is bound. */ error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR, - sizeof(struct sockaddr_storage), fd_cb->local_address); + fd_cb->local_address->sa_len, fd_cb->local_address); if (error) { goto done; } @@ -2017,10 +2017,6 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off } } socket_unlock(fd_cb->so, 0); - - if (data != NULL) { - mbuf_freem(data); - } } FDUNLOCK(fd_cb); } diff --git a/bsd/netinet/mptcp_subr.c b/bsd/netinet/mptcp_subr.c index 1606cdb62..c7b154796 100644 --- a/bsd/netinet/mptcp_subr.c +++ b/bsd/netinet/mptcp_subr.c @@ -774,8 +774,10 @@ mptcp_trigger_cell_bringup(struct mptses *mpte) uuid_string_t uuidstr; int err; + mpte_unlock(mpte); err = necp_client_assert_bb_radio_manager(mpsotomppcb(mp_so)->necp_client_uuid, TRUE); + mpte_lock(mpte); if (err == 0) mpte->mpte_triggered_cell = 1; diff --git a/config/MASTER.arm64 b/config/MASTER.arm64 index 32189c5fd..a90486db3 100644 --- a/config/MASTER.arm64 +++ b/config/MASTER.arm64 @@ -29,7 +29,10 @@ # FILESYS_DEV = [ FILESYS_BASE fdesc ] # FILESYS_DEBUG = [ FILESYS_BASE fdesc ] # NFS = [ nfsclient nfsserver ] -# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto packet_mangler if_fake ] +# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ] +# NETWORKING_RELEASE = [ NETWORKING ] +# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ] +# NETWORKING_DEBUG = [ NETWORKING_DEV ] # VPN = [ ipsec flow_divert necp content_filter ] # PF = [ pf ] # MULTIPATH = [ multipath mptcp ] @@ -58,9 +61,9 @@ # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] # SECURITY = [ config_macf kernel_integrity ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] # KASAN = [ DEVELOPMENT ] # ###################################################################### diff --git a/config/MASTER.arm64.bcm2837 b/config/MASTER.arm64.bcm2837 index 65dd4861b..f6c35b27b 100644 --- a/config/MASTER.arm64.bcm2837 +++ b/config/MASTER.arm64.bcm2837 @@ -29,7 +29,10 @@ # FILESYS_DEV = [ FILESYS_BASE fdesc ] # FILESYS_DEBUG = [ FILESYS_BASE fdesc ] # NFS = [ nfsclient nfsserver ] -# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto packet_mangler if_fake ] +# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ] +# NETWORKING_RELEASE = [ NETWORKING ] +# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ] +# NETWORKING_DEBUG = [ NETWORKING_DEV ] # VPN = [ ipsec flow_divert necp content_filter ] # PF = [ pf ] # MULTIPATH = [ multipath mptcp ] @@ -58,9 +61,9 @@ # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] # SECURITY = [ config_macf kernel_integrity ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] # KASAN = [ DEVELOPMENT ] # ###################################################################### diff --git a/config/MASTER.x86_64 b/config/MASTER.x86_64 index b14a338d6..7f128cc61 100644 --- a/config/MASTER.x86_64 +++ b/config/MASTER.x86_64 @@ -29,7 +29,10 @@ # FILESYS_DEV = [ FILESYS_BASE ] # FILESYS_DEBUG = [ FILESYS_BASE ] # NFS = [ nfsclient nfsserver ] -# NETWORKING = [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge ipcomp_zlib MULTIPATH packet_mangler if_fake ] +# NETWORKING = [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge ipcomp_zlib MULTIPATH if_fake ] +# NETWORKING_RELEASE = [ NETWORKING ] +# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ] +# NETWORKING_DEBUG = [ NETWORKING_DEV ] # VPN = [ ipsec flow_divert necp content_filter ] # PF = [ pf pflog ] # MULTIPATH = [ multipath mptcp ] @@ -52,9 +55,9 @@ # SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] # VM = [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap phantom_cache config_background_queue] # SECURITY = [ config_macf config_audit config_csr ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG MACH_DEV SCHED_DEV VM SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG MACH_DEBUG SCHED_DEBUG VM SECURITY ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG MACH_DEV SCHED_DEV VM SECURITY ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING_DEBUG PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG MACH_DEBUG SCHED_DEBUG VM SECURITY ] # KASAN = [ DEVELOPMENT ] # ###################################################################### diff --git a/config/Unsupported.exports b/config/Unsupported.exports index 7938c5da9..8853251ca 100644 --- a/config/Unsupported.exports +++ b/config/Unsupported.exports @@ -119,6 +119,7 @@ _mach_make_memory_entry_64 _mach_memory_entry_page_op _mach_memory_entry_range_op _mach_msg_rpc_from_kernel_proper +_mach_msg_destroy_from_kernel_proper _mach_vm_region _max_mem _mem_size diff --git a/iokit/Kernel/IOPMrootDomain.cpp b/iokit/Kernel/IOPMrootDomain.cpp index 40a11e05e..85669860f 100644 --- a/iokit/Kernel/IOPMrootDomain.cpp +++ b/iokit/Kernel/IOPMrootDomain.cpp @@ -196,6 +196,51 @@ static void pmEventTimeStamp(uint64_t *recordTS); static const OSSymbol *sleepSupportedPEFunction = NULL; static const OSSymbol *sleepMessagePEFunction = NULL; +static const OSSymbol * gIOPMPSExternalConnectedKey; +static const OSSymbol * gIOPMPSExternalChargeCapableKey; +static const OSSymbol * gIOPMPSBatteryInstalledKey; +static const OSSymbol * gIOPMPSIsChargingKey; +static const OSSymbol * gIOPMPSAtWarnLevelKey; +static const OSSymbol * gIOPMPSAtCriticalLevelKey; +static const OSSymbol * gIOPMPSCurrentCapacityKey; +static const OSSymbol * gIOPMPSMaxCapacityKey; +static const OSSymbol * gIOPMPSDesignCapacityKey; +static const OSSymbol * gIOPMPSTimeRemainingKey; +static const OSSymbol * gIOPMPSAmperageKey; +static const OSSymbol * gIOPMPSVoltageKey; +static const OSSymbol * gIOPMPSCycleCountKey; +static const OSSymbol * gIOPMPSMaxErrKey; +static const OSSymbol * gIOPMPSAdapterInfoKey; +static const OSSymbol * gIOPMPSLocationKey; +static const OSSymbol * gIOPMPSErrorConditionKey; +static const OSSymbol * gIOPMPSManufacturerKey; +static const OSSymbol * gIOPMPSManufactureDateKey; +static const OSSymbol * gIOPMPSModelKey; +static const OSSymbol * gIOPMPSSerialKey; +static const OSSymbol * gIOPMPSLegacyBatteryInfoKey; +static const OSSymbol * gIOPMPSBatteryHealthKey; +static const OSSymbol * gIOPMPSHealthConfidenceKey; +static const OSSymbol * gIOPMPSCapacityEstimatedKey; +static const OSSymbol * gIOPMPSBatteryChargeStatusKey; +static const OSSymbol * gIOPMPSBatteryTemperatureKey; +static const OSSymbol * gIOPMPSAdapterDetailsKey; +static const OSSymbol * gIOPMPSChargerConfigurationKey; +static const OSSymbol * gIOPMPSAdapterDetailsIDKey; +static const OSSymbol * gIOPMPSAdapterDetailsWattsKey; +static const OSSymbol * gIOPMPSAdapterDetailsRevisionKey; +static const OSSymbol * gIOPMPSAdapterDetailsSerialNumberKey; +static const OSSymbol * gIOPMPSAdapterDetailsFamilyKey; +static const OSSymbol * gIOPMPSAdapterDetailsAmperageKey; +static const OSSymbol * gIOPMPSAdapterDetailsDescriptionKey; +static const OSSymbol * gIOPMPSAdapterDetailsPMUConfigurationKey; +static const OSSymbol * gIOPMPSAdapterDetailsSourceIDKey; +static const OSSymbol * gIOPMPSAdapterDetailsErrorFlagsKey; +static const OSSymbol * gIOPMPSAdapterDetailsSharedSourceKey; +static const OSSymbol * gIOPMPSAdapterDetailsCloakedKey; +static const OSSymbol * gIOPMPSInvalidWakeSecondsKey; +static const OSSymbol * gIOPMPSPostChargeWaitSecondsKey; +static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey; + #define kIOSleepSupportedKey "IOSleepSupported" #define kIOPMSystemCapabilitiesKey "System Capabilities" @@ -9591,6 +9636,51 @@ static IOPMPowerState patriarchPowerStates[2] = void IORootParent::initialize( void ) { + + gIOPMPSExternalConnectedKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalConnectedKey); + gIOPMPSExternalChargeCapableKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalChargeCapableKey); + gIOPMPSBatteryInstalledKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryInstalledKey); + gIOPMPSIsChargingKey = OSSymbol::withCStringNoCopy(kIOPMPSIsChargingKey); + gIOPMPSAtWarnLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtWarnLevelKey); + gIOPMPSAtCriticalLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtCriticalLevelKey); + gIOPMPSCurrentCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSCurrentCapacityKey); + gIOPMPSMaxCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxCapacityKey); + gIOPMPSDesignCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSDesignCapacityKey); + gIOPMPSTimeRemainingKey = OSSymbol::withCStringNoCopy(kIOPMPSTimeRemainingKey); + gIOPMPSAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAmperageKey); + gIOPMPSVoltageKey = OSSymbol::withCStringNoCopy(kIOPMPSVoltageKey); + gIOPMPSCycleCountKey = OSSymbol::withCStringNoCopy(kIOPMPSCycleCountKey); + gIOPMPSMaxErrKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxErrKey); + gIOPMPSAdapterInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterInfoKey); + gIOPMPSLocationKey = OSSymbol::withCStringNoCopy(kIOPMPSLocationKey); + gIOPMPSErrorConditionKey = OSSymbol::withCStringNoCopy(kIOPMPSErrorConditionKey); + gIOPMPSManufacturerKey = OSSymbol::withCStringNoCopy(kIOPMPSManufacturerKey); + gIOPMPSManufactureDateKey = OSSymbol::withCStringNoCopy(kIOPMPSManufactureDateKey); + gIOPMPSModelKey = OSSymbol::withCStringNoCopy(kIOPMPSModelKey); + gIOPMPSSerialKey = OSSymbol::withCStringNoCopy(kIOPMPSSerialKey); + gIOPMPSLegacyBatteryInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSLegacyBatteryInfoKey); + gIOPMPSBatteryHealthKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryHealthKey); + gIOPMPSHealthConfidenceKey = OSSymbol::withCStringNoCopy(kIOPMPSHealthConfidenceKey); + gIOPMPSCapacityEstimatedKey = OSSymbol::withCStringNoCopy(kIOPMPSCapacityEstimatedKey); + gIOPMPSBatteryChargeStatusKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryChargeStatusKey); + gIOPMPSBatteryTemperatureKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryTemperatureKey); + gIOPMPSAdapterDetailsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsKey); + gIOPMPSChargerConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSChargerConfigurationKey); + gIOPMPSAdapterDetailsIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsIDKey); + gIOPMPSAdapterDetailsWattsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsWattsKey); + gIOPMPSAdapterDetailsRevisionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsRevisionKey); + gIOPMPSAdapterDetailsSerialNumberKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSerialNumberKey); + gIOPMPSAdapterDetailsFamilyKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsFamilyKey); + gIOPMPSAdapterDetailsAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsAmperageKey); + gIOPMPSAdapterDetailsDescriptionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsDescriptionKey); + gIOPMPSAdapterDetailsPMUConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsPMUConfigurationKey); + gIOPMPSAdapterDetailsSourceIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSourceIDKey); + gIOPMPSAdapterDetailsErrorFlagsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsErrorFlagsKey); + gIOPMPSAdapterDetailsSharedSourceKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSharedSourceKey); + gIOPMPSAdapterDetailsCloakedKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsCloakedKey); + gIOPMPSInvalidWakeSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSInvalidWakeSecondsKey); + gIOPMPSPostChargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostChargeWaitSecondsKey); + gIOPMPSPostDishargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostDishargeWaitSecondsKey); } bool IORootParent::start( IOService * nub ) diff --git a/osfmk/arm/machine_routines.c b/osfmk/arm/machine_routines.c index 0a6777ea5..94fc76bf4 100644 --- a/osfmk/arm/machine_routines.c +++ b/osfmk/arm/machine_routines.c @@ -1009,6 +1009,8 @@ ml_delay_should_spin(uint64_t interval) } } +void ml_delay_on_yield(void) {} + boolean_t ml_thread_is64bit(thread_t thread) { return (thread_is_64bit_addr(thread)); diff --git a/osfmk/arm/machine_routines.h b/osfmk/arm/machine_routines.h index 4a7061b65..d5a77dd9b 100644 --- a/osfmk/arm/machine_routines.h +++ b/osfmk/arm/machine_routines.h @@ -444,6 +444,8 @@ void ml_init_lock_timeout(void); boolean_t ml_delay_should_spin(uint64_t interval); +void ml_delay_on_yield(void); + uint32_t ml_get_decrementer(void); #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME diff --git a/osfmk/arm64/machine_routines.c b/osfmk/arm64/machine_routines.c index e9a22430d..bf9633ed5 100644 --- a/osfmk/arm64/machine_routines.c +++ b/osfmk/arm64/machine_routines.c @@ -72,6 +72,8 @@ uint32_t LockTimeOutUsec; uint64_t MutexSpin; boolean_t is_clock_configured = FALSE; +uint32_t yield_delay_us = 42; /* Less than cpu_idle_latency to ensure ml_delay_should_spin is true */ + extern int mach_assert; extern volatile uint32_t debug_enabled; @@ -438,6 +440,8 @@ machine_startup(__unused boot_args * args) default_bg_preemption_rate = boot_arg; } + PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof (yield_delay_us)); + machine_conf(); /* @@ -1811,6 +1815,11 @@ ml_delay_should_spin(uint64_t interval) } } +void +ml_delay_on_yield(void) +{ +} + boolean_t ml_thread_is64bit(thread_t thread) { return (thread_is_64bit_addr(thread)); } diff --git a/osfmk/atm/atm.c b/osfmk/atm/atm.c index fcf59e3d3..0c90d585b 100644 --- a/osfmk/atm/atm.c +++ b/osfmk/atm/atm.c @@ -87,8 +87,6 @@ static kern_return_t atm_value_register(atm_value_t atm_value, atm_task_descript static kern_return_t atm_listener_delete(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); static void atm_link_get_reference(atm_link_object_t link_object) __unused; static void atm_link_dealloc(atm_link_object_t link_object); -kern_return_t atm_invoke_collection(atm_value_t atm_value, mach_atm_subaid_t subaid, uint32_t flags); -kern_return_t atm_send_user_notification(aid_t aid, mach_atm_subaid_t sub_aid, mach_port_t *buffers_array, uint64_t *sizes_array, mach_msg_type_number_t count, uint32_t flags); kern_return_t atm_release_value( @@ -479,43 +477,15 @@ atm_command( uint32_t aid_array_count = 0; atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL; task_t task; - uint32_t collection_flags = ATM_ACTION_LOGFAIL; kern_return_t kr = KERN_SUCCESS; atm_guard_t guard; switch (command) { case ATM_ACTION_COLLECT: - collection_flags = ATM_ACTION_COLLECT; /* Fall through */ - case ATM_ACTION_LOGFAIL: { - mach_atm_subaid_t sub_aid = 0; - - if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) - return KERN_NOT_SUPPORTED; - - /* find the first non-default atm_value */ - for (i = 0; i < value_count; i++) { - atm_value = HANDLE_TO_ATM_VALUE(values[i]); - if (atm_value != VAM_DEFAULT_VALUE) - break; - } - - /* if we are not able to find any atm values - * in stack then this call was made in error - */ - if (atm_value == NULL) { - return KERN_FAILURE; - } - - if (in_content_size >= sizeof(mach_atm_subaid_t)) { - sub_aid = *(mach_atm_subaid_t *)(void *)in_content; - } - - *out_content_size = 0; - kr = atm_invoke_collection(atm_value, sub_aid, collection_flags); - break; - } + case ATM_ACTION_LOGFAIL: + return KERN_NOT_SUPPORTED; case ATM_FIND_MIN_SUB_AID: if ((in_content_size/sizeof(aid_t)) > (*out_content_size/sizeof(mach_atm_subaid_t))) @@ -611,202 +581,6 @@ atm_release( } -/* - * Routine: atm_invoke_collection - * Purpose: Sends a notification with array of memory buffer. - * Note: may block till user daemon responds. - */ -kern_return_t -atm_invoke_collection( - atm_value_t atm_value, - mach_atm_subaid_t sub_aid, - uint32_t flags) -{ - aid_t aid = atm_value->aid; - kern_return_t kr = KERN_SUCCESS; - uint32_t array_count = 0, i = 0, j = 0, requestor_index = 0; - uint64_t *sizes_array = NULL; - atm_link_object_t link_object = NULL; - mach_port_t *mem_array = NULL; - boolean_t need_swap_first = FALSE; - atm_task_descriptor_t requesting_descriptor = current_task()->atm_context; - - lck_mtx_lock(&atm_value->listener_lock); - array_count = atm_value->listener_count; - lck_mtx_unlock(&atm_value->listener_lock); - - if (array_count == 0){ - return KERN_SUCCESS; - } - - mem_array = kalloc(sizeof(mach_port_t) * array_count); - if (mem_array == NULL){ - return KERN_NO_SPACE; - } - - sizes_array = kalloc(sizeof(uint64_t) * array_count); - if (sizes_array == NULL){ - kfree(mem_array, sizeof(mach_port_t) * array_count); - return KERN_NO_SPACE; - } - - lck_mtx_lock(&atm_value->listener_lock); - queue_iterate(&atm_value->listeners, link_object, atm_link_object_t, listeners_element) { - if (i >= array_count){ - break; - } - - if (!need_swap_first && requesting_descriptor == link_object->descriptor){ - assert(requesting_descriptor != NULL); - requestor_index = i; - need_swap_first = TRUE; - } - - sizes_array[i] = link_object->descriptor->trace_buffer_size; - mem_array[i] = ipc_port_copy_send(link_object->descriptor->trace_buffer); - if (!IPC_PORT_VALID(mem_array[i])){ - mem_array[i] = NULL; - } - i++; - } - lck_mtx_unlock(&atm_value->listener_lock); - - /* - * Swap the position of requesting task ahead, diagnostics can - * process its buffers the first. - */ - if (need_swap_first && requestor_index != 0){ - assert(requestor_index < array_count); - mach_port_t tmp_port = mem_array[0]; - uint64_t tmp_size = sizes_array[0]; - mem_array[0] = mem_array[requestor_index]; - sizes_array[0] = sizes_array[requestor_index]; - mem_array[requestor_index] = tmp_port; - sizes_array[requestor_index] = tmp_size; - } - - if (i > 0) { - kr = atm_send_user_notification(aid, sub_aid, mem_array, sizes_array, i, flags); - } - - for (j = 0; j < i; j++) { - if (mem_array[j] != NULL) - ipc_port_release_send(mem_array[j]); - } - - kfree(mem_array, sizeof(mach_port_t) * array_count); - kfree(sizes_array, sizeof(uint64_t) * array_count); - - return kr; -} - -/* - * Routine: atm_send_user_notification - * Purpose: Make an upcall to user space daemon if its listening for atm notifications. - * Returns: KERN_SUCCESS for successful delivery. - * KERN_FAILURE if port is dead or NULL. - */ -kern_return_t -atm_send_user_notification( - aid_t aid, - mach_atm_subaid_t sub_aid, - mach_port_t *buffers_array, - uint64_t *sizes_array, - mach_msg_type_number_t count, - uint32_t flags) -{ - mach_port_t user_port; - int error; - thread_t th = current_thread(); - kern_return_t kr; - - error = host_get_atm_notification_port(host_priv_self(), &user_port); - if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) { - return KERN_FAILURE; - } - - thread_set_honor_qlimit(th); - kr = atm_collect_trace_info(user_port, aid, sub_aid, flags, buffers_array, count, sizes_array, count); - thread_clear_honor_qlimit(th); - - if (kr != KERN_SUCCESS) { - ipc_port_release_send(user_port); - - if (kr == MACH_SEND_TIMED_OUT) { - kr = KERN_SUCCESS; - } - } - - return kr; -} - -/* - * Routine: atm_send_proc_inspect_notification - * Purpose: Make an upcall to user space daemon if its listening for trace - * notifications for per process inspection. - * Returns: KERN_SUCCESS for successful delivery. - * KERN_FAILURE if port is dead or NULL. - */ - -kern_return_t -atm_send_proc_inspect_notification( - task_t task, - int32_t traced_pid, - uint64_t traced_uniqueid) -{ - mach_port_t user_port = MACH_PORT_NULL; - mach_port_t memory_port = MACH_PORT_NULL; - kern_return_t kr; - atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL; - uint64_t buffer_size = 0; - int error; - thread_t th = current_thread(); - - if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) - return KERN_NOT_SUPPORTED; - - /* look for the requested memory in target task */ - if (!task) - return KERN_INVALID_TASK; - - task_lock(task); - if (task->atm_context){ - task_descriptor = task->atm_context; - atm_descriptor_get_reference(task_descriptor); - } - task_unlock(task); - - if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL){ - return KERN_FAILURE; - } - - memory_port = ipc_port_copy_send(task_descriptor->trace_buffer); - buffer_size = task_descriptor->trace_buffer_size; - atm_task_descriptor_dealloc(task_descriptor); - - /* get the communication port */ - error = host_get_atm_notification_port(host_priv_self(), &user_port); - if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) { - ipc_port_release_send(memory_port); - return KERN_FAILURE; - } - - thread_set_honor_qlimit(th); - kr = atm_inspect_process_buffer(user_port, traced_pid, traced_uniqueid, buffer_size, memory_port); - thread_clear_honor_qlimit(th); - - if (kr != KERN_SUCCESS) { - ipc_port_release_send(user_port); - - if (kr == MACH_SEND_TIMED_OUT) { - kr = KERN_SUCCESS; - } - } - - ipc_port_release_send(memory_port); - return kr; -} - /* * Routine: atm_value_alloc_init * Purpose: Allocates an atm value struct and initialize it. diff --git a/osfmk/i386/machine_routines.c b/osfmk/i386/machine_routines.c index e62e821c6..611470eaa 100644 --- a/osfmk/i386/machine_routines.c +++ b/osfmk/i386/machine_routines.c @@ -760,6 +760,8 @@ ml_delay_should_spin(uint64_t interval) return (interval < delay_spin_threshold) ? TRUE : FALSE; } +void ml_delay_on_yield(void) {} + /* * This is called from the machine-independent layer * to perform machine-dependent info updates. Defer to cpu_thread_init(). diff --git a/osfmk/i386/machine_routines.h b/osfmk/i386/machine_routines.h index 802099052..487cc6b61 100644 --- a/osfmk/i386/machine_routines.h +++ b/osfmk/i386/machine_routines.h @@ -88,6 +88,8 @@ void ml_init_delay_spin_threshold(int); boolean_t ml_delay_should_spin(uint64_t interval); +extern void ml_delay_on_yield(void); + vm_offset_t ml_static_ptovirt( vm_offset_t); diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c index 81776c729..7955cf45d 100644 --- a/osfmk/ipc/ipc_kmsg.c +++ b/osfmk/ipc/ipc_kmsg.c @@ -4615,13 +4615,13 @@ ipc_kmsg_copyout_to_kernel( ipc_space_t space) { ipc_object_t dest; - ipc_object_t reply; + mach_port_t reply; mach_msg_type_name_t dest_type; mach_msg_type_name_t reply_type; - mach_port_name_t dest_name, reply_name; + mach_port_name_t dest_name; dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port; - reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port; + reply = kmsg->ikm_header->msgh_local_port; dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits); reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits); @@ -4637,13 +4637,11 @@ ipc_kmsg_copyout_to_kernel( dest_name = MACH_PORT_DEAD; } - reply_name = CAST_MACH_PORT_TO_NAME(reply); - kmsg->ikm_header->msgh_bits = (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) | MACH_MSGH_BITS(reply_type, dest_type)); kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name); - kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name); + kmsg->ikm_header->msgh_remote_port = reply; } #if IKM_SUPPORT_LEGACY diff --git a/osfmk/kern/bsd_kern.c b/osfmk/kern/bsd_kern.c index d017ae520..b28d396c2 100644 --- a/osfmk/kern/bsd_kern.c +++ b/osfmk/kern/bsd_kern.c @@ -1104,7 +1104,7 @@ fill_task_monotonic_rusage(task_t task, rusage_info_current *ri) assert(task != TASK_NULL); - uint64_t counts[MT_CORE_NFIXED] = {}; + uint64_t counts[MT_CORE_NFIXED] = { 0 }; mt_fixed_task_counts(task, counts); #ifdef MT_CORE_INSTRS ri->ri_instructions = counts[MT_CORE_INSTRS]; diff --git a/osfmk/kern/debug.c b/osfmk/kern/debug.c index 0d15f8f8e..d58ac47f7 100644 --- a/osfmk/kern/debug.c +++ b/osfmk/kern/debug.c @@ -645,11 +645,8 @@ void panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...) { va_list panic_str_args; - __assert_only uint32_t th_ref_count; assert_thread_magic(thread); - th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire); - assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count); /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */ thread_reference(thread); diff --git a/osfmk/kern/ipc_mig.c b/osfmk/kern/ipc_mig.c index ddbfa0e5a..818854bf6 100644 --- a/osfmk/kern/ipc_mig.c +++ b/osfmk/kern/ipc_mig.c @@ -500,6 +500,92 @@ mach_msg_rpc_from_kernel_body( return mr; } +/* + * Routine: mach_msg_destroy_from_kernel_proper + * Purpose: + * mach_msg_destroy_from_kernel_proper is used to destroy + * an unwanted/unexpected reply message from a MIG + * kernel-specific user-side stub. It is like ipc_kmsg_destroy(), + * except we no longer have the kmsg - just the contents. + */ +void +mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) +{ + mach_msg_bits_t mbits = msg->msgh_bits; + ipc_object_t object; + + object = (ipc_object_t) msg->msgh_remote_port; + if (IO_VALID(object)) { + ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits)); + } + + /* + * The destination (now in msg->msgh_local_port via + * ipc_kmsg_copyout_to_kernel) has been consumed with + * ipc_object_copyout_dest. + */ + + /* MIG kernel users don't receive vouchers */ + assert(!MACH_PORT_VALID(msg->msgh_voucher_port)); + + /* For simple messages, we're done */ + if ((mbits & MACH_MSGH_BITS_COMPLEX) == 0) { + return; + } + + /* Discard descriptor contents */ + mach_msg_body_t *body = (mach_msg_body_t *)(msg + 1); + mach_msg_descriptor_t *daddr = (mach_msg_descriptor_t *)(body + 1); + mach_msg_size_t i; + + for (i = 0 ; i < body->msgh_descriptor_count; i++, daddr++ ) { + switch (daddr->type.type) { + + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc = &daddr->port; + if (IO_VALID((ipc_object_t) dsc->name)) { + ipc_object_destroy((ipc_object_t) dsc->name, dsc->disposition); + } + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR : { + mach_msg_ool_descriptor_t *dsc = + (mach_msg_ool_descriptor_t *)&daddr->out_of_line; + + if (dsc->size > 0) { + vm_map_copy_discard((vm_map_copy_t) dsc->address); + } else { + assert(dsc->address == (void *) 0); + } + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR : { + ipc_object_t *objects; + mach_msg_type_number_t j; + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = (mach_msg_ool_ports_descriptor_t *)&daddr->ool_ports; + objects = (ipc_object_t *) dsc->address; + + if (dsc->count == 0) { + break; + } + assert(objects != 0); + for (j = 0; j < dsc->count; j++) { + object = objects[j]; + if (IO_VALID(object)) { + ipc_object_destroy(object, dsc->disposition); + } + } + kfree(dsc->address, (vm_size_t) dsc->count * sizeof(mach_port_t)); + break; + } + default : + break; + } + } +} /************** These Calls are set up for kernel-loaded tasks/threads **************/ diff --git a/osfmk/kern/ipc_mig.h b/osfmk/kern/ipc_mig.h index 92fe442c5..cf1af4da4 100644 --- a/osfmk/kern/ipc_mig.h +++ b/osfmk/kern/ipc_mig.h @@ -155,6 +155,12 @@ mach_msg_rpc_from_kernel_proper( #define mach_msg_rpc_from_kernel mach_msg_rpc_from_kernel_proper +extern void +mach_msg_destroy_from_kernel_proper( + mach_msg_header_t *msg); + +#define mach_msg_destroy_from_kernel mach_msg_destroy_from_kernel_proper + #ifdef XNU_KERNEL_PRIVATE extern mach_msg_return_t mach_msg_send_from_kernel_with_options_legacy( mach_msg_header_t *msg, diff --git a/osfmk/kern/kern_monotonic.c b/osfmk/kern/kern_monotonic.c index 0c9d825e9..75315a764 100644 --- a/osfmk/kern/kern_monotonic.c +++ b/osfmk/kern/kern_monotonic.c @@ -199,21 +199,19 @@ mt_fixed_task_counts(task_t task, uint64_t *counts_out) assert(task != TASK_NULL); assert(counts_out != NULL); - uint64_t counts[MT_CORE_NFIXED]; if (!mt_core_supported) { - for (int i = 0; i < MT_CORE_NFIXED; i++) { - counts[i] = 0; - } - return 0; + memset(counts_out, 0, sizeof(*counts_out) * MT_CORE_NFIXED); + return 1; } task_lock(task); + uint64_t counts[MT_CORE_NFIXED] = { 0 }; for (int i = 0; i < MT_CORE_NFIXED; i++) { counts[i] = task->task_monotonic.mtk_counts[i]; } - uint64_t thread_counts[MT_CORE_NFIXED] = {}; + uint64_t thread_counts[MT_CORE_NFIXED] = { 0 }; thread_t thread = THREAD_NULL; thread_t curthread = current_thread(); bool needs_current = false; @@ -357,9 +355,7 @@ void mt_cur_thread_fixed_counts(uint64_t *counts) { if (!mt_core_supported) { - for (int i = 0; i < MT_CORE_NFIXED; i++) { - counts[i] = 0; - } + memset(counts, 0, sizeof(*counts) * MT_CORE_NFIXED); return; } diff --git a/osfmk/kern/ledger.c b/osfmk/kern/ledger.c index 001cad83b..481378d2f 100644 --- a/osfmk/kern/ledger.c +++ b/osfmk/kern/ledger.c @@ -378,7 +378,7 @@ ledger_instantiate(ledger_template_t template, int entry_type) ledger->l_template = template; ledger->l_id = ledger_cnt++; - ledger->l_refs = 1; + os_ref_init(&ledger->l_refs, NULL); ledger->l_size = (int32_t)cnt; template_lock(template); @@ -429,7 +429,7 @@ ledger_reference(ledger_t ledger) { if (!LEDGER_VALID(ledger)) return (KERN_INVALID_ARGUMENT); - OSIncrementAtomic(&ledger->l_refs); + os_ref_retain(&ledger->l_refs); return (KERN_SUCCESS); } @@ -439,7 +439,7 @@ ledger_reference_count(ledger_t ledger) if (!LEDGER_VALID(ledger)) return (-1); - return (ledger->l_refs); + return os_ref_get_count(&ledger->l_refs); } /* @@ -449,16 +449,10 @@ ledger_reference_count(ledger_t ledger) kern_return_t ledger_dereference(ledger_t ledger) { - int v; - if (!LEDGER_VALID(ledger)) return (KERN_INVALID_ARGUMENT); - v = OSDecrementAtomic(&ledger->l_refs); - ASSERT(v >= 1); - - /* Just released the last reference. Free it. */ - if (v == 1) { + if (os_ref_release(&ledger->l_refs) == 0) { if (ledger->l_template->lt_zone) { zfree(ledger->l_template->lt_zone, ledger); } else { diff --git a/osfmk/kern/ledger.h b/osfmk/kern/ledger.h index 78eb4f848..55faa7f52 100644 --- a/osfmk/kern/ledger.h +++ b/osfmk/kern/ledger.h @@ -34,6 +34,10 @@ #include /* ledger_t */ +#ifdef MACH_KERNEL_PRIVATE +#include +#endif /* MACH_KERNEL_PRIVATE */ + #define LEDGER_INFO 0 #define LEDGER_ENTRY_INFO 1 #define LEDGER_TEMPLATE_INFO 2 @@ -92,7 +96,7 @@ struct ledger_entry { struct ledger { uint64_t l_id; - int32_t l_refs; + struct os_refcnt l_refs; int32_t l_size; struct ledger_template *l_template; struct ledger_entry l_entries[0] __attribute__((aligned(8))); diff --git a/osfmk/kern/syscall_subr.c b/osfmk/kern/syscall_subr.c index 1732d7ab2..507d5ec35 100644 --- a/osfmk/kern/syscall_subr.c +++ b/osfmk/kern/syscall_subr.c @@ -110,6 +110,8 @@ swtch_continue(void) result = SCHED(thread_should_yield)(myprocessor, current_thread()); enable_preemption(); + ml_delay_on_yield(); + thread_syscall_return(result); /*NOTREACHED*/ } @@ -147,6 +149,8 @@ swtch_pri_continue(void) result = SCHED(thread_should_yield)(myprocessor, current_thread()); mp_enable_preemption(); + ml_delay_on_yield(); + thread_syscall_return(result); /*NOTREACHED*/ } @@ -182,6 +186,8 @@ thread_switch_continue(void *parameter, __unused int ret) if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS) thread_depress_abort(self); + ml_delay_on_yield(); + thread_syscall_return(KERN_SUCCESS); /*NOTREACHED*/ } @@ -314,10 +320,22 @@ thread_switch( thread_deallocate(thread); } - if (wait_option) + if (wait_option) { assert_wait_timeout((event_t)assert_wait_timeout, interruptible, option_time, scale_factor); - else if (depress_option) - thread_depress_ms(option_time); + } else { + disable_preemption(); + bool should_yield = SCHED(thread_should_yield)(current_processor(), current_thread()); + enable_preemption(); + + if (should_yield == false) { + /* Early-return if yielding to the scheduler will not be beneficial */ + return KERN_SUCCESS; + } + + if (depress_option) { + thread_depress_ms(option_time); + } + } thread_yield_with_continuation(thread_switch_continue, (void *)(intptr_t)option); __builtin_unreachable(); diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index 20eef5136..c80e30d30 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -655,24 +655,24 @@ task_reference_internal(task_t task) void * bt[TASK_REF_BTDEPTH]; int numsaved = 0; + os_ref_retain(&task->ref_count); + numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); - - (void)hw_atomic_add(&(task)->ref_count, 1); btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR, bt, numsaved); } -uint32_t +os_ref_count_t task_deallocate_internal(task_t task) { void * bt[TASK_REF_BTDEPTH]; int numsaved = 0; numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); - btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR, bt, numsaved); - return hw_atomic_sub(&(task)->ref_count, 1); + + return os_ref_release(&task->ref_count); } #endif /* TASK_REFERENCE_LEAK_DEBUG */ @@ -1115,6 +1115,8 @@ init_task_ledgers(void) task_ledger_template = t; } +os_refgrp_decl(static, task_refgrp, "task", NULL); + kern_return_t task_create_internal( task_t parent_task, @@ -1136,7 +1138,7 @@ task_create_internal( return(KERN_RESOURCE_SHORTAGE); /* one ref for just being alive; one for our caller */ - new_task->ref_count = 2; + os_ref_init_count(&new_task->ref_count, &task_refgrp, 2); /* allocate with active entries */ assert(task_ledger_template != NULL); @@ -1530,7 +1532,7 @@ task_deallocate( task_t task) { ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups; - uint32_t refs; + os_ref_count_t refs; if (task == TASK_NULL) return; @@ -1538,31 +1540,24 @@ task_deallocate( refs = task_deallocate_internal(task); #if IMPORTANCE_INHERITANCE - if (refs > 1) - return; - - atomic_load_explicit(&task->ref_count, memory_order_acquire); - if (refs == 1) { /* * If last ref potentially comes from the task's importance, * disconnect it. But more task refs may be added before * that completes, so wait for the reference to go to zero - * naturually (it may happen on a recursive task_deallocate() + * naturally (it may happen on a recursive task_deallocate() * from the ipc_importance_disconnect_task() call). */ if (IIT_NULL != task->task_imp_base) ipc_importance_disconnect_task(task); return; } -#else - if (refs > 0) - return; - - atomic_load_explicit(&task->ref_count, memory_order_acquire); - #endif /* IMPORTANCE_INHERITANCE */ + if (refs > 0) { + return; + } + lck_mtx_lock(&tasks_threads_lock); queue_remove(&terminated_tasks, task, task_t, tasks); terminated_tasks_count--; @@ -3653,22 +3648,13 @@ host_security_set_task_token( kern_return_t task_send_trace_memory( - task_t target_task, + __unused task_t target_task, __unused uint32_t pid, __unused uint64_t uniqueid) { - kern_return_t kr = KERN_INVALID_ARGUMENT; - if (target_task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); - -#if CONFIG_ATM - kr = atm_send_proc_inspect_notification(target_task, - pid, - uniqueid); - -#endif - return (kr); + return KERN_INVALID_ARGUMENT; } + /* * This routine was added, pretty much exclusively, for registering the * RPC glue vector for in-kernel short circuited tasks. Rather than @@ -6202,7 +6188,7 @@ task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor, switch (flavor) { case TASK_INSPECT_BASIC_COUNTS: { struct task_inspect_basic_counts *bc; - uint64_t task_counts[MT_CORE_NFIXED]; + uint64_t task_counts[MT_CORE_NFIXED] = { 0 }; if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) { kr = KERN_INVALID_ARGUMENT; diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h index fe43b2db1..12a216230 100644 --- a/osfmk/kern/task.h +++ b/osfmk/kern/task.h @@ -127,6 +127,7 @@ #include #include #include +#include #ifdef CONFIG_ATM #include @@ -147,7 +148,7 @@ struct _cpu_time_qos_stats { struct task { /* Synchronization/destruction information */ decl_lck_mtx_data(,lock) /* Task's lock */ - _Atomic uint32_t ref_count; /* Number of references to me */ + os_refcnt_t ref_count; /* Number of references to me */ boolean_t active; /* Task has not been terminated */ boolean_t halting; /* Task is being halted */ /* Virtual timers */ @@ -477,13 +478,10 @@ task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); #if TASK_REFERENCE_LEAK_DEBUG extern void task_reference_internal(task_t task); -extern uint32_t task_deallocate_internal(task_t task); +extern os_ref_count_t task_deallocate_internal(task_t task); #else -#define task_reference_internal(task) \ - (void)atomic_fetch_add_explicit(&(task)->ref_count, 1, memory_order_relaxed) - -#define task_deallocate_internal(task) \ - (atomic_fetch_sub_explicit(&task->ref_count, 1, memory_order_release) - 1) +#define task_reference_internal(task) os_ref_retain(&(task)->ref_count) +#define task_deallocate_internal(task) os_ref_release(&(task)->ref_count) #endif #define task_reference(task) \ diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c index 81f934a17..84e0277b0 100644 --- a/osfmk/kern/thread.c +++ b/osfmk/kern/thread.c @@ -244,6 +244,8 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(t */ #define MINIMUM_CPULIMIT_INTERVAL_MS 1 +os_refgrp_decl(static, thread_refgrp, "thread", NULL); + void thread_bootstrap(void) { @@ -257,8 +259,6 @@ thread_bootstrap(void) thread_template.runq = PROCESSOR_NULL; - thread_template.ref_count = 2; - thread_template.reason = AST_NONE; thread_template.at_safe_point = FALSE; thread_template.wait_event = NO_EVENT64; @@ -413,6 +413,7 @@ thread_bootstrap(void) thread_template.th_work_interval = NULL; init_thread = thread_template; + machine_set_current_thread(&init_thread); } @@ -687,49 +688,34 @@ thread_terminate_self(void) /*NOTREACHED*/ } -/* Drop a thread refcount safely without triggering a zfree */ -void -thread_deallocate_safe(thread_t thread) +static bool +thread_ref_release(thread_t thread) { - __assert_only uint32_t th_ref_count; - - if (thread == THREAD_NULL) - return; + if (thread == THREAD_NULL) { + return false; + } assert_thread_magic(thread); - if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1, - memory_order_release) - 1 > 0)) { - return; - } - - th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire); - assert(th_ref_count == 0); - - /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */ - thread_deallocate_enqueue(thread); + return os_ref_release(&thread->ref_count) == 0; } +/* Drop a thread refcount safely without triggering a zfree */ void -thread_deallocate( - thread_t thread) +thread_deallocate_safe(thread_t thread) { - __assert_only uint32_t th_ref_count; - - if (thread == THREAD_NULL) - return; - - assert_thread_magic(thread); - - if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1, - memory_order_release) - 1 > 0)) { - return; - } - - th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire); - assert(th_ref_count == 0); + if (__improbable(thread_ref_release(thread))) { + /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */ + thread_deallocate_enqueue(thread); + } +} - thread_deallocate_complete(thread); +void +thread_deallocate(thread_t thread) +{ + if (__improbable(thread_ref_release(thread))) { + thread_deallocate_complete(thread); + } } void @@ -740,7 +726,7 @@ thread_deallocate_complete( assert_thread_magic(thread); - assert(thread->ref_count == 0); + assert(os_ref_get_count(&thread->ref_count) == 0); assert(thread_owned_workloops_count(thread) == 0); @@ -1332,6 +1318,8 @@ thread_create_internal( if (new_thread != first_thread) *new_thread = thread_template; + os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2); + #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { diff --git a/osfmk/kern/thread.h b/osfmk/kern/thread.h index d2cf4278e..d0a5212a2 100644 --- a/osfmk/kern/thread.h +++ b/osfmk/kern/thread.h @@ -128,6 +128,7 @@ #include #include +#include #include @@ -287,7 +288,7 @@ struct thread { int16_t promotions; /* level of promotion */ int iotier_override; /* atomic operations to set, cleared on ret to user */ - _Atomic uint32_t ref_count; /* number of references to me */ + struct os_refcnt ref_count; /* number of references to me */ lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */ @@ -646,7 +647,7 @@ extern void thread_init(void); extern void thread_daemon_init(void); #define thread_reference_internal(thread) \ - (void)atomic_fetch_add_explicit(&(thread)->ref_count, 1, memory_order_relaxed) + os_ref_retain(&(thread)->ref_count); #define thread_reference(thread) \ MACRO_BEGIN \ diff --git a/osfmk/kern/trustcache.h b/osfmk/kern/trustcache.h index 4fd57d53a..889e407d9 100644 --- a/osfmk/kern/trustcache.h +++ b/osfmk/kern/trustcache.h @@ -73,7 +73,7 @@ struct trust_cache_module1 { #define TC_LOOKUP_RESULT_MASK 0xffL #define TC_LOOKUP_FOUND 1 -#define TC_LOOKUP_FALLBACK 2 +// #define TC_LOOKUP_FALLBACK 2 /* obsolete with removal of legacy static trust caches */ #ifdef XNU_KERNEL_PRIVATE @@ -86,22 +86,6 @@ struct serialized_trust_caches { } __attribute__((__packed__)); -// Legacy Static Trust Cache - -/* This is the old legacy trust cache baked into the AMFI kext. - * We support it for a transitionary period, until external trust caches - * are fully established, and the AMFI trust cache can be removed. */ - -struct legacy_trust_cache_bucket { - uint16_t count; - uint16_t offset; -} __attribute__((__packed__)); - -#define LEGACY_TRUST_CACHE_ENTRY_LEN (CS_CDHASH_LEN-1) -#define LEGACY_TRUST_CACHE_BUCKET_COUNT (256) - -typedef uint8_t pmap_cs_legacy_stc_entry[CS_CDHASH_LEN-1]; // bucketized with first byte - void trust_cache_init(void); uint32_t lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]); diff --git a/osfmk/kperf/kperf.c b/osfmk/kperf/kperf.c index 831f3afd8..7d33d9f1b 100644 --- a/osfmk/kperf/kperf.c +++ b/osfmk/kperf/kperf.c @@ -344,7 +344,7 @@ kperf_port_to_pid(mach_port_name_t portname) } pid_t pid = task_pid(task); /* drop the ref taken by port_name_to_task */ - task_deallocate_internal(task); + (void)task_deallocate_internal(task); return pid; } diff --git a/osfmk/vm/vm_compressor_pager.c b/osfmk/vm/vm_compressor_pager.c index c8ec4fed0..aa9d4662e 100644 --- a/osfmk/vm/vm_compressor_pager.c +++ b/osfmk/vm/vm_compressor_pager.c @@ -707,8 +707,10 @@ vm_compressor_pager_put( { compressor_pager_t pager; compressor_slot_t *slot_p; +#if __arm__ || __arm64__ unsigned int prev_wimg = VM_WIMG_DEFAULT; boolean_t set_cache_attr = FALSE; +#endif compressor_pager_stats.put++; @@ -749,6 +751,7 @@ vm_compressor_pager_put( *compressed_count_delta_p -= 1; } +#if __arm__ || __arm64__ /* * cacheability should be set to the system default (usually writeback) * during compressor operations, both for performance and correctness, @@ -772,6 +775,11 @@ vm_compressor_pager_put( pmap_set_cache_attributes(ppnum, prev_wimg); return KERN_RESOURCE_SHORTAGE; } +#else + if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) { + return KERN_RESOURCE_SHORTAGE; + } +#endif *compressed_count_delta_p += 1; return KERN_SUCCESS; @@ -820,6 +828,7 @@ vm_compressor_pager_get( if (kr == KERN_SUCCESS) { int retval; +#if __arm__ || __arm64__ unsigned int prev_wimg = VM_WIMG_DEFAULT; boolean_t set_cache_attr = FALSE; @@ -835,7 +844,7 @@ vm_compressor_pager_get( set_cache_attr = TRUE; pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT); } - +#endif /* get the page from the compressor */ retval = vm_compressor_get(ppnum, slot_p, flags); if (retval == -1) @@ -846,8 +855,10 @@ vm_compressor_pager_get( assert((flags & C_DONT_BLOCK)); kr = KERN_FAILURE; } +#if __arm__ || __arm64__ if (set_cache_attr) pmap_set_cache_attributes(ppnum, prev_wimg); +#endif } if (kr == KERN_SUCCESS) { diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c index bf722548b..7aaef6cdd 100644 --- a/osfmk/vm/vm_pageout.c +++ b/osfmk/vm/vm_pageout.c @@ -1742,6 +1742,7 @@ void update_vm_info(void) record_memory_pressure(); } +extern boolean_t hibernation_vmqueues_inspection; void vm_page_balance_inactive(int max_to_move) @@ -1750,6 +1751,17 @@ vm_page_balance_inactive(int max_to_move) LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + if (hibernation_vmqueues_inspection == TRUE) { + /* + * It is likely that the hibernation code path is + * dealing with these very queues as we are about + * to move pages around in/from them and completely + * change the linkage of the pages. + * + * And so we skip the rebalancing of these queues. + */ + return; + } vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count + vm_page_inactive_count + vm_page_speculative_count); diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index 748c42754..860bde4d5 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -134,6 +134,8 @@ int speculative_age_index = 0; int speculative_steal_index = 0; struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1]; +boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues. + * Updated and checked behind the vm_page_queues_lock. */ __private_extern__ void vm_page_init_lck_grp(void); @@ -7043,6 +7045,10 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, lck_mtx_lock(&vm_page_queue_free_lock); } + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + + hibernation_vmqueues_inspection = TRUE; + m = (vm_page_t) hibernate_gobble_queue; while (m) { @@ -7325,6 +7331,8 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active; + hibernation_vmqueues_inspection = FALSE; + #if MACH_ASSERT || DEBUG if (!preflight) { diff --git a/tests/monotonic_core.c b/tests/monotonic_core.c index 3feaeba94..9c04f491a 100644 --- a/tests/monotonic_core.c +++ b/tests/monotonic_core.c @@ -155,6 +155,68 @@ T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work", dispatch_main(); } +static void * +spin_thread_self_counts(__unused void *arg) +{ + extern int thread_selfcounts(int, void *, size_t); + uint64_t counts[2] = { 0 }; + while (true) { + (void)thread_selfcounts(1, &counts, sizeof(counts)); + } +} + +static void * +spin_task_inspect(__unused void *arg) +{ + task_t task = mach_task_self(); + uint64_t counts[2] = { 0 }; + unsigned int size = 0; + while (true) { + size = (unsigned int)sizeof(counts); + (void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS, + (task_inspect_info_t)&counts[0], &size); + /* + * Not realistic for a process to see count values with the high bit + * set, but kernel pointers will be that high. + */ + T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63, + "check for valid count entry 1"); + T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63, + "check for valid count entry 2"); + } +} + +T_DECL(core_fixed_stack_leak_race, + "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS") +{ + T_SETUPBEGIN; + + int ncpus = 0; + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus, + &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs"); + T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs"); + pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads)); + + T_QUIET; T_ASSERT_NOTNULL(threads, "allocated space for threads"); + + T_LOG("creating %d threads to attempt to race around task counts", ncpus); + /* + * Have half the threads hammering thread_self_counts and the other half + * trying to get an error to occur inside TASK_INSPECT_BASIC_COUNTS and see + * uninitialized kernel memory. + */ + for (int i = 0; i < ncpus; i++) { + T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL, + i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL), + NULL); + } + + T_SETUPEND; + + sleep(10); + T_PASS("ending test after 10 seconds"); +} + static void perf_sysctl_deltas(const char *sysctl_name, const char *stat_name) {