Skip to content

Commit

Permalink
xnu-1699.26.8
Browse files Browse the repository at this point in the history
  • Loading branch information
Darwin authored and das committed Jun 4, 2017
1 parent f681061 commit eba9946
Show file tree
Hide file tree
Showing 92 changed files with 2,812 additions and 1,473 deletions.
1 change: 0 additions & 1 deletion bsd/dev/i386/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,6 @@ SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, energy_policy,
sizeof(boolean_t),
cpu_thermal, "I", "Energy Efficient Policy Support");


SYSCTL_NODE(_machdep_cpu, OID_AUTO, xsave, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
"xsave");

Expand Down
157 changes: 64 additions & 93 deletions bsd/hfs/hfs_readwrite.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,7 @@ hfs_vnop_write(struct vnop_write_args *ap)
int do_snapshot = 1;
time_t orig_ctime=VTOC(vp)->c_ctime;
int took_truncate_lock = 0;
struct rl_entry *invalid_range;

#if HFS_COMPRESSION
if ( hfs_file_is_compressed(VTOC(vp), 1) ) { /* 1 == don't take the cnode lock */
Expand Down Expand Up @@ -328,7 +329,14 @@ hfs_vnop_write(struct vnop_write_args *ap)

again:
/* Protect against a size change. */
if (ioflag & IO_APPEND) {
/*
* Protect against a size change.
*
* Note: If took_truncate_lock is true, then we previously got the lock shared
* but needed to upgrade to exclusive. So try getting it exclusive from the
* start.
*/
if (ioflag & IO_APPEND || took_truncate_lock) {
hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
}
else {
Expand All @@ -350,17 +358,42 @@ hfs_vnop_write(struct vnop_write_args *ap)
writelimit = offset + resid;
filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize;

/* If the truncate lock is shared, and if we either have virtual
* blocks or will need to extend the file, upgrade the truncate
* to exclusive lock. If upgrade fails, we lose the lock and
* have to get exclusive lock again. Note that we want to
* grab the truncate lock exclusive even if we're not allocating new blocks
* because we could still be growing past the LEOF.
/*
* We may need an exclusive truncate lock for several reasons, all
* of which are because we may be writing to a (portion of a) block
* for the first time, and we need to make sure no readers see the
* prior, uninitialized contents of the block. The cases are:
*
* 1. We have unallocated (delayed allocation) blocks. We may be
* allocating new blocks to the file and writing to them.
* (A more precise check would be whether the range we're writing
* to contains delayed allocation blocks.)
* 2. We need to extend the file. The bytes between the old EOF
* and the new EOF are not yet initialized. This is important
* even if we're not allocating new blocks to the file. If the
* old EOF and new EOF are in the same block, we still need to
* protect that range of bytes until they are written for the
* first time.
* 3. The write overlaps some invalid ranges (delayed zero fill; that
* part of the file has been allocated, but not yet written).
*
* If we had a shared lock with the above cases, we need to try to upgrade
* to an exclusive lock. If the upgrade fails, we will lose the shared
* lock, and will need to take the truncate lock again; the took_truncate_lock
* flag will still be set, causing us to try for an exclusive lock next time.
*
* NOTE: Testing for #3 (delayed zero fill) needs to be done while the cnode
* lock is held, since it protects the range lists.
*/
if ((cp->c_truncatelockowner == HFS_SHARED_OWNER) &&
((fp->ff_unallocblocks != 0) || (writelimit > origFileSize))) {
/* Lock upgrade failed and we lost our shared lock, try again */
((fp->ff_unallocblocks != 0) ||
(writelimit > origFileSize))) {
if (lck_rw_lock_shared_to_exclusive(&cp->c_truncatelock) == FALSE) {
/*
* Lock upgrade failed and we lost our shared lock, try again.
* Note: we do not set took_truncate_lock=0 here. Leaving it
* set to 1 will cause us to try to get the lock exclusive.
*/
goto again;
}
else {
Expand All @@ -374,11 +407,28 @@ hfs_vnop_write(struct vnop_write_args *ap)
}
cnode_locked = 1;

if (cp->c_truncatelockowner == HFS_SHARED_OWNER) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START,
(int)offset, uio_resid(uio), (int)fp->ff_size,
(int)filebytes, 0);
/*
* Now that we have the cnode lock, see if there are delayed zero fill ranges
* overlapping our write. If so, we need the truncate lock exclusive (see above).
*/
if ((cp->c_truncatelockowner == HFS_SHARED_OWNER) &&
(rl_scan(&fp->ff_invalidranges, offset, writelimit-1, &invalid_range) != RL_NOOVERLAP)) {
/*
* When testing, it appeared that calling lck_rw_lock_shared_to_exclusive() causes
* a deadlock, rather than simply returning failure. (That is, it apparently does
* not behave like a "try_lock"). Since this condition is rare, just drop the
* cnode lock and try again. Since took_truncate_lock is set, we will
* automatically take the truncate lock exclusive.
*/
hfs_unlock(cp);
cnode_locked = 0;
hfs_unlock_truncate(cp, 0);
goto again;
}

KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START,
(int)offset, uio_resid(uio), (int)fp->ff_size,
(int)filebytes, 0);

/* Check if we do not need to extend the file */
if (writelimit <= filebytes) {
Expand Down Expand Up @@ -452,7 +502,6 @@ hfs_vnop_write(struct vnop_write_args *ap)
off_t inval_end;
off_t io_start;
int lflag;
struct rl_entry *invalid_range;

if (writelimit > fp->ff_size)
filesize = writelimit;
Expand Down Expand Up @@ -1966,85 +2015,7 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* {

case F_READBOOTSTRAP:
case F_WRITEBOOTSTRAP:
{
struct vnode *devvp = NULL;
user_fbootstraptransfer_t *user_bootstrapp;
int devBlockSize;
int error;
uio_t auio;
daddr64_t blockNumber;
u_int32_t blockOffset;
u_int32_t xfersize;
struct buf *bp;
user_fbootstraptransfer_t user_bootstrap;

if (!vnode_isvroot(vp))
return (EINVAL);
/* LP64 - when caller is a 64 bit process then we are passed a pointer
* to a user_fbootstraptransfer_t else we get a pointer to a
* fbootstraptransfer_t which we munge into a user_fbootstraptransfer_t
*/
if ((hfsmp->hfs_flags & HFS_READ_ONLY)
&& (ap->a_command == F_WRITEBOOTSTRAP)) {
return (EROFS);
}
if (is64bit) {
user_bootstrapp = (user_fbootstraptransfer_t *)ap->a_data;
}
else {
user32_fbootstraptransfer_t *bootstrapp = (user32_fbootstraptransfer_t *)ap->a_data;
user_bootstrapp = &user_bootstrap;
user_bootstrap.fbt_offset = bootstrapp->fbt_offset;
user_bootstrap.fbt_length = bootstrapp->fbt_length;
user_bootstrap.fbt_buffer = CAST_USER_ADDR_T(bootstrapp->fbt_buffer);
}

if ((user_bootstrapp->fbt_offset < 0) || (user_bootstrapp->fbt_offset > 1024) ||
(user_bootstrapp->fbt_length > 1024)) {
return EINVAL;
}

if (user_bootstrapp->fbt_offset + user_bootstrapp->fbt_length > 1024)
return EINVAL;

devvp = VTOHFS(vp)->hfs_devvp;
auio = uio_create(1, user_bootstrapp->fbt_offset,
is64bit ? UIO_USERSPACE64 : UIO_USERSPACE32,
(ap->a_command == F_WRITEBOOTSTRAP) ? UIO_WRITE : UIO_READ);
uio_addiov(auio, user_bootstrapp->fbt_buffer, user_bootstrapp->fbt_length);

devBlockSize = vfs_devblocksize(vnode_mount(vp));

while (uio_resid(auio) > 0) {
blockNumber = uio_offset(auio) / devBlockSize;
error = (int)buf_bread(devvp, blockNumber, devBlockSize, cred, &bp);
if (error) {
if (bp) buf_brelse(bp);
uio_free(auio);
return error;
};

blockOffset = uio_offset(auio) % devBlockSize;
xfersize = devBlockSize - blockOffset;
error = uiomove((caddr_t)buf_dataptr(bp) + blockOffset, (int)xfersize, auio);
if (error) {
buf_brelse(bp);
uio_free(auio);
return error;
};
if (uio_rw(auio) == UIO_WRITE) {
error = VNOP_BWRITE(bp);
if (error) {
uio_free(auio);
return error;
}
} else {
buf_brelse(bp);
};
};
uio_free(auio);
};
return 0;
return 0;

case _IOC(IOC_OUT,'h', 4, 0): /* Create date in local time */
{
Expand Down
7 changes: 7 additions & 0 deletions bsd/hfs/hfscommon/Misc/FileExtentMapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,7 @@ OSErr MapFileBlockC (
//
// Determine the end of the available space. It will either be the end of the extent,
// or the file's PEOF, whichever is smaller.

//
dataEnd = (off_t)((off_t)(nextFABN) * (off_t)(allocBlockSize)); // Assume valid data through end of this extent
if (((off_t)fcb->ff_blocks * (off_t)allocBlockSize) < dataEnd) // Is PEOF shorter?
Expand Down Expand Up @@ -529,6 +530,12 @@ OSErr MapFileBlockC (
if (availableBytes)
{
tmpOff = dataEnd - offset;
/*
* Disallow negative runs.
*/
if (tmpOff <= 0) {
return EINVAL;
}
if (tmpOff > (off_t)(numberOfBytes))
*availableBytes = numberOfBytes; // more there than they asked for, so pin the output
else
Expand Down
2 changes: 0 additions & 2 deletions bsd/kern/kern_fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1373,8 +1373,6 @@ uthread_zone_init(void)
THREAD_CHUNK * sizeof(struct uthread),
"uthreads");
uthread_zone_inited = 1;

zone_change(uthread_zone, Z_NOENCRYPT, TRUE);
}
}

Expand Down
8 changes: 7 additions & 1 deletion bsd/kern/kern_panicinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@

/* prototypes not exported by osfmk/console. */
extern void panic_dialog_test( void );
extern void noroot_icon_test(void);
extern int panic_dialog_set_image( const unsigned char * ptr, unsigned int size );
extern void panic_dialog_get_image( unsigned char ** ptr, unsigned int * size );

Expand All @@ -51,7 +52,6 @@ static int sysctl_dopanicinfo SYSCTL_HANDLER_ARGS;


#define PANIC_IMAGE_SIZE_LIMIT (32 * 4096) /* 128K - Maximum amount of memory consumed for the panic UI */
#define KERN_PANICINFO_TEST (KERN_PANICINFO_IMAGE+2) /* Allow the panic UI to be tested by root without causing a panic */

/* Local data */
static int image_size_limit = PANIC_IMAGE_SIZE_LIMIT;
Expand Down Expand Up @@ -92,6 +92,12 @@ sysctl_dopanicinfo SYSCTL_HANDLER_ARGS
panic_dialog_test();
break;

case KERN_PANICINFO_NOROOT_TEST:
printf("Testing noroot icon \n");

noroot_icon_test();
break;

case KERN_PANICINFO_MAXSIZE:

/* return the image size limits */
Expand Down
16 changes: 10 additions & 6 deletions bsd/kern/kern_proc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2679,9 +2679,11 @@ cs_invalid_page(
if (p->p_csflags & CS_KILL) {
p->p_csflags |= CS_KILLED;
proc_unlock(p);
printf("CODE SIGNING: cs_invalid_page(0x%llx): "
"p=%d[%s] honoring CS_KILL, final status 0x%x\n",
vaddr, p->p_pid, p->p_comm, p->p_csflags);
if (cs_debug) {
printf("CODE SIGNING: cs_invalid_page(0x%llx): "
"p=%d[%s] honoring CS_KILL, final status 0x%x\n",
vaddr, p->p_pid, p->p_comm, p->p_csflags);
}
cs_procs_killed++;
psignal(p, SIGKILL);
proc_lock(p);
Expand All @@ -2690,9 +2692,11 @@ cs_invalid_page(
/* CS_HARD means fail the mapping operation so the process stays valid. */
if (p->p_csflags & CS_HARD) {
proc_unlock(p);
printf("CODE SIGNING: cs_invalid_page(0x%llx): "
"p=%d[%s] honoring CS_HARD\n",
vaddr, p->p_pid, p->p_comm);
if (cs_debug) {
printf("CODE SIGNING: cs_invalid_page(0x%llx): "
"p=%d[%s] honoring CS_HARD\n",
vaddr, p->p_pid, p->p_comm);
}
retval = 1;
} else {
if (p->p_csflags & CS_VALID) {
Expand Down
Loading

0 comments on commit eba9946

Please sign in to comment.