1 /*
2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <libkern/c++/OSCPPDebug.h>
40
41 #include <IOKit/assert.h>
42
43 #include <IOKit/IOReturn.h>
44 #include <IOKit/IOLib.h>
45 #include <IOKit/IOLocks.h>
46 #include <IOKit/IOMapper.h>
47 #include <IOKit/IOBufferMemoryDescriptor.h>
48 #include <IOKit/IOKitDebug.h>
49
50 #include "IOKitKernelInternal.h"
51
52 #ifdef IOALLOCDEBUG
53 #include <libkern/OSDebug.h>
54 #include <sys/sysctl.h>
55 #endif
56
57 extern "C"
58 {
59
60
61 mach_timespec_t IOZeroTvalspec = { 0, 0 };
62
63 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
64
65 extern kern_return_t kmem_suballoc(
66 vm_map_t parent,
67 vm_offset_t *addr,
68 vm_size_t size,
69 boolean_t pageable,
70 boolean_t anywhere,
71 vm_map_t *new_map);
72
73 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
74
75 lck_grp_t *IOLockGroup;
76
77 /*
78 * Global variables for use by iLogger
79 * These symbols are for use only by Apple diagnostic code.
80 * Binary compatibility is not guaranteed for kexts that reference these symbols.
81 */
82
83 void *_giDebugLogInternal = NULL;
84 void *_giDebugLogDataInternal = NULL;
85 void *_giDebugReserved1 = NULL;
86 void *_giDebugReserved2 = NULL;
87
88
89 /*
90 * Static variables for this module.
91 */
92
93 static queue_head_t gIOMallocContiguousEntries;
94 static lck_mtx_t * gIOMallocContiguousEntriesLock;
95
96 enum { kIOMaxPageableMaps = 16 };
97 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
98 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
99
100 /* LP64todo - these need to expand */
101 typedef struct {
102 vm_map_t map;
103 vm_offset_t address;
104 vm_offset_t end;
105 } IOMapData;
106
107 static struct {
108 UInt32 count;
109 UInt32 hint;
110 IOMapData maps[ kIOMaxPageableMaps ];
111 lck_mtx_t * lock;
112 } gIOKitPageableSpace;
113
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
116 void IOLibInit(void)
117 {
118 kern_return_t ret;
119
120 static bool libInitialized;
121
122 if(libInitialized)
123 return;
124
125 gIOKitPageableSpace.maps[0].address = 0;
126 ret = kmem_suballoc(kernel_map,
127 &gIOKitPageableSpace.maps[0].address,
128 kIOPageableMapSize,
129 TRUE,
130 VM_FLAGS_ANYWHERE,
131 &gIOKitPageableSpace.maps[0].map);
132 if (ret != KERN_SUCCESS)
133 panic("failed to allocate iokit pageable map\n");
134
135 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
136
137 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
138 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
139 gIOKitPageableSpace.hint = 0;
140 gIOKitPageableSpace.count = 1;
141
142 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
143 queue_init( &gIOMallocContiguousEntries );
144
145 libInitialized = true;
146 }
147
148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
149
150 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
151 {
152 kern_return_t result;
153 thread_t thread;
154
155 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
156 if (result != KERN_SUCCESS)
157 return (NULL);
158
159 thread_deallocate(thread);
160
161 return (thread);
162 }
163
164
165 void IOExitThread(void)
166 {
167 (void) thread_terminate(current_thread());
168 }
169
170 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
171
172
173 void * IOMalloc(vm_size_t size)
174 {
175 void * address;
176
177 address = (void *)kalloc(size);
178 #if IOALLOCDEBUG
179 if (address) {
180 debug_iomalloc_size += size;
181 }
182 #endif
183 return address;
184 }
185
186 void IOFree(void * address, vm_size_t size)
187 {
188 if (address) {
189 kfree(address, size);
190 #if IOALLOCDEBUG
191 debug_iomalloc_size -= size;
192 #endif
193 }
194 }
195
196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
197
198 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
199 {
200 kern_return_t kr;
201 vm_address_t address;
202 vm_address_t allocationAddress;
203 vm_size_t adjustedSize;
204 vm_offset_t alignMask;
205
206 if (size == 0)
207 return 0;
208 if (alignment == 0)
209 alignment = 1;
210
211 alignMask = alignment - 1;
212 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
213
214 if (adjustedSize >= page_size) {
215
216 kr = kernel_memory_allocate(kernel_map, &address,
217 size, alignMask, 0);
218 if (KERN_SUCCESS != kr)
219 address = 0;
220
221 } else {
222
223 adjustedSize += alignMask;
224
225 if (adjustedSize >= page_size) {
226
227 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
228 adjustedSize, 0, 0);
229 if (KERN_SUCCESS != kr)
230 allocationAddress = 0;
231
232 } else
233 allocationAddress = (vm_address_t) kalloc(adjustedSize);
234
235 if (allocationAddress) {
236 address = (allocationAddress + alignMask
237 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
238 & (~alignMask);
239
240 *((vm_size_t *)(address - sizeof(vm_size_t)
241 - sizeof(vm_address_t))) = adjustedSize;
242 *((vm_address_t *)(address - sizeof(vm_address_t)))
243 = allocationAddress;
244 } else
245 address = 0;
246 }
247
248 assert(0 == (address & alignMask));
249
250 #if IOALLOCDEBUG
251 if( address) {
252 debug_iomalloc_size += size;
253 }
254 #endif
255
256 return (void *) address;
257 }
258
259 void IOFreeAligned(void * address, vm_size_t size)
260 {
261 vm_address_t allocationAddress;
262 vm_size_t adjustedSize;
263
264 if( !address)
265 return;
266
267 assert(size);
268
269 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
270 if (adjustedSize >= page_size) {
271
272 kmem_free( kernel_map, (vm_address_t) address, size);
273
274 } else {
275 adjustedSize = *((vm_size_t *)( (vm_address_t) address
276 - sizeof(vm_address_t) - sizeof(vm_size_t)));
277 allocationAddress = *((vm_address_t *)( (vm_address_t) address
278 - sizeof(vm_address_t) ));
279
280 if (adjustedSize >= page_size)
281 kmem_free( kernel_map, allocationAddress, adjustedSize);
282 else
283 kfree((void *)allocationAddress, adjustedSize);
284 }
285
286 #if IOALLOCDEBUG
287 debug_iomalloc_size -= size;
288 #endif
289 }
290
291 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
292
293 void
294 IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size)
295 {
296 mach_vm_address_t allocationAddress;
297 mach_vm_size_t adjustedSize;
298
299 if (!address)
300 return;
301
302 assert(size);
303
304 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
305 if (adjustedSize >= page_size) {
306
307 kmem_free( kernel_map, (vm_address_t) address, size);
308
309 } else {
310
311 adjustedSize = *((mach_vm_size_t *)
312 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
313 allocationAddress = *((mach_vm_address_t *)
314 (address - sizeof(mach_vm_address_t) ));
315 kfree((void *)allocationAddress, adjustedSize);
316 }
317
318 #if IOALLOCDEBUG
319 debug_iomalloc_size -= size;
320 #endif
321 }
322
323 mach_vm_address_t
324 IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment)
325 {
326 kern_return_t kr;
327 mach_vm_address_t address;
328 mach_vm_address_t allocationAddress;
329 mach_vm_size_t adjustedSize;
330 mach_vm_address_t alignMask;
331
332 if (size == 0)
333 return (0);
334 if (alignment == 0)
335 alignment = 1;
336
337 alignMask = alignment - 1;
338 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
339
340 if (adjustedSize >= page_size)
341 {
342 vm_offset_t virt;
343 adjustedSize = size;
344 if (adjustedSize > page_size)
345 {
346 kr = kmem_alloc_contig(kernel_map, &virt, size,
347 alignMask, 0, 0);
348 }
349 else
350 {
351 kr = kernel_memory_allocate(kernel_map, &virt,
352 size, alignMask, 0);
353 }
354 if (KERN_SUCCESS == kr)
355 address = virt;
356 else
357 address = 0;
358 }
359 else
360 {
361 adjustedSize += alignMask;
362 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
363
364 if (allocationAddress) {
365
366 address = (allocationAddress + alignMask
367 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
368 & (~alignMask);
369
370 if (atop_32(address) != atop_32(address + size - 1))
371 address = round_page_32(address);
372
373 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
374 - sizeof(mach_vm_address_t))) = adjustedSize;
375 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
376 = allocationAddress;
377 } else
378 address = 0;
379 }
380
381 #if IOALLOCDEBUG
382 if (address) {
383 debug_iomalloc_size += size;
384 }
385 #endif
386
387 return (address);
388 }
389
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 struct _IOMallocContiguousEntry
393 {
394 mach_vm_address_t virtualAddr;
395 IOBufferMemoryDescriptor * md;
396 queue_chain_t link;
397 };
398 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
399
400 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
401 IOPhysicalAddress * physicalAddress)
402 {
403 mach_vm_address_t address = 0;
404
405 if (size == 0)
406 return 0;
407 if (alignment == 0)
408 alignment = 1;
409
410 /* Do we want a physical address? */
411 if (!physicalAddress)
412 {
413 address = IOKernelAllocateContiguous(size, alignment);
414 }
415 else do
416 {
417 IOBufferMemoryDescriptor * bmd;
418 mach_vm_address_t physicalMask;
419 vm_offset_t alignMask;
420
421 alignMask = alignment - 1;
422 physicalMask = 0xFFFFFFFF ^ (alignMask & PAGE_MASK);
423 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
424 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
425 if (!bmd)
426 break;
427
428 _IOMallocContiguousEntry *
429 entry = IONew(_IOMallocContiguousEntry, 1);
430 if (!entry)
431 {
432 bmd->release();
433 break;
434 }
435 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
436 entry->md = bmd;
437 lck_mtx_lock(gIOMallocContiguousEntriesLock);
438 queue_enter( &gIOMallocContiguousEntries, entry,
439 _IOMallocContiguousEntry *, link );
440 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
441
442 address = (mach_vm_address_t) entry->virtualAddr;
443 *physicalAddress = bmd->getPhysicalAddress();
444 }
445 while (false);
446
447 return (void *) address;
448 }
449
450 void IOFreeContiguous(void * _address, vm_size_t size)
451 {
452 _IOMallocContiguousEntry * entry;
453 IOMemoryDescriptor * md = NULL;
454
455 mach_vm_address_t address = (mach_vm_address_t) _address;
456
457 if( !address)
458 return;
459
460 assert(size);
461
462 lck_mtx_lock(gIOMallocContiguousEntriesLock);
463 queue_iterate( &gIOMallocContiguousEntries, entry,
464 _IOMallocContiguousEntry *, link )
465 {
466 if( entry->virtualAddr == address ) {
467 md = entry->md;
468 queue_remove( &gIOMallocContiguousEntries, entry,
469 _IOMallocContiguousEntry *, link );
470 break;
471 }
472 }
473 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
474
475 if (md)
476 {
477 md->release();
478 IODelete(entry, _IOMallocContiguousEntry, 1);
479 }
480 else
481 {
482 IOKernelFreeContiguous((mach_vm_address_t) address, size);
483 }
484 }
485
486 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
487
488 kern_return_t IOIteratePageableMaps(vm_size_t size,
489 IOIteratePageableMapsCallback callback, void * ref)
490 {
491 kern_return_t kr = kIOReturnNotReady;
492 vm_size_t segSize;
493 UInt32 attempts;
494 UInt32 index;
495 vm_offset_t min;
496 vm_map_t map;
497
498 if (size > kIOPageableMaxMapSize)
499 return( kIOReturnBadArgument );
500
501 do {
502 index = gIOKitPageableSpace.hint;
503 attempts = gIOKitPageableSpace.count;
504 while( attempts--) {
505 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
506 if( KERN_SUCCESS == kr) {
507 gIOKitPageableSpace.hint = index;
508 break;
509 }
510 if( index)
511 index--;
512 else
513 index = gIOKitPageableSpace.count - 1;
514 }
515 if( KERN_SUCCESS == kr)
516 break;
517
518 lck_mtx_lock( gIOKitPageableSpace.lock );
519
520 index = gIOKitPageableSpace.count;
521 if( index >= (kIOMaxPageableMaps - 1)) {
522 lck_mtx_unlock( gIOKitPageableSpace.lock );
523 break;
524 }
525
526 if( size < kIOPageableMapSize)
527 segSize = kIOPageableMapSize;
528 else
529 segSize = size;
530
531 min = 0;
532 kr = kmem_suballoc(kernel_map,
533 &min,
534 segSize,
535 TRUE,
536 VM_FLAGS_ANYWHERE,
537 &map);
538 if( KERN_SUCCESS != kr) {
539 lck_mtx_unlock( gIOKitPageableSpace.lock );
540 break;
541 }
542
543 gIOKitPageableSpace.maps[index].map = map;
544 gIOKitPageableSpace.maps[index].address = min;
545 gIOKitPageableSpace.maps[index].end = min + segSize;
546 gIOKitPageableSpace.hint = index;
547 gIOKitPageableSpace.count = index + 1;
548
549 lck_mtx_unlock( gIOKitPageableSpace.lock );
550
551 } while( true );
552
553 return kr;
554 }
555
556 struct IOMallocPageableRef
557 {
558 vm_address_t address;
559 vm_size_t size;
560 };
561
562 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
563 {
564 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
565 kern_return_t kr;
566
567 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
568
569 return( kr );
570 }
571
572 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
573 {
574 kern_return_t kr = kIOReturnNotReady;
575 struct IOMallocPageableRef ref;
576
577 if (alignment > page_size)
578 return( 0 );
579 if (size > kIOPageableMaxMapSize)
580 return( 0 );
581
582 ref.size = size;
583 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
584 if( kIOReturnSuccess != kr)
585 ref.address = 0;
586
587 #if IOALLOCDEBUG
588 if( ref.address)
589 debug_iomallocpageable_size += round_page_32(size);
590 #endif
591
592 return( (void *) ref.address );
593 }
594
595 vm_map_t IOPageableMapForAddress( vm_address_t address )
596 {
597 vm_map_t map = 0;
598 UInt32 index;
599
600 for( index = 0; index < gIOKitPageableSpace.count; index++) {
601 if( (address >= gIOKitPageableSpace.maps[index].address)
602 && (address < gIOKitPageableSpace.maps[index].end) ) {
603 map = gIOKitPageableSpace.maps[index].map;
604 break;
605 }
606 }
607 if( !map)
608 IOPanic("IOPageableMapForAddress: null");
609
610 return( map );
611 }
612
613 void IOFreePageable(void * address, vm_size_t size)
614 {
615 vm_map_t map;
616
617 map = IOPageableMapForAddress( (vm_address_t) address);
618 if( map)
619 kmem_free( map, (vm_offset_t) address, size);
620
621 #if IOALLOCDEBUG
622 debug_iomallocpageable_size -= round_page_32(size);
623 #endif
624 }
625
626 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
627
628 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
629 IOByteCount length, IOOptionBits cacheMode )
630 {
631 IOReturn ret = kIOReturnSuccess;
632 ppnum_t pagenum;
633
634 if( task != kernel_task)
635 return( kIOReturnUnsupported );
636
637 length = round_page_32(address + length) - trunc_page_32( address );
638 address = trunc_page_32( address );
639
640 // make map mode
641 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
642
643 while( (kIOReturnSuccess == ret) && (length > 0) ) {
644
645 // Get the physical page number
646 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
647 if( pagenum) {
648 ret = IOUnmapPages( get_task_map(task), address, page_size );
649 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
650 } else
651 ret = kIOReturnVMError;
652
653 address += page_size;
654 length -= page_size;
655 }
656
657 return( ret );
658 }
659
660
661 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
662 IOByteCount length )
663 {
664 if( task != kernel_task)
665 return( kIOReturnUnsupported );
666
667 flush_dcache64( (addr64_t) address, (unsigned) length, false );
668
669 return( kIOReturnSuccess );
670 }
671
672 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
673
674 SInt32 OSKernelStackRemaining( void )
675 {
676 SInt32 stack;
677
678 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
679
680 return( stack );
681 }
682
683 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
684
685 /*
686 * Spin for indicated number of milliseconds.
687 */
688 void IOSleep(unsigned milliseconds)
689 {
690 delay_for_interval(milliseconds, kMillisecondScale);
691 }
692
693 /*
694 * Spin for indicated number of microseconds.
695 */
696 void IODelay(unsigned microseconds)
697 {
698 delay_for_interval(microseconds, kMicrosecondScale);
699 }
700
701 /*
702 * Spin for indicated number of nanoseconds.
703 */
704 void IOPause(unsigned nanoseconds)
705 {
706 delay_for_interval(nanoseconds, kNanosecondScale);
707 }
708
709 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
710
711 void IOLog(const char *format, ...)
712 {
713 va_list ap;
714 extern void conslog_putc(char);
715 extern void logwakeup(void);
716
717 va_start(ap, format);
718 _doprnt(format, &ap, conslog_putc, 16);
719 va_end(ap);
720 }
721
722 void IOPanic(const char *reason)
723 {
724 panic("%s", reason);
725 }
726
727 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
728
729 /*
730 * Convert a integer constant (typically a #define or enum) to a string.
731 */
732 static char noValue[80]; // that's pretty
733
734 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
735 {
736 for( ; regValueArray->name; regValueArray++) {
737 if(regValueArray->value == value)
738 return(regValueArray->name);
739 }
740 snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
741 return((const char *)noValue);
742 }
743
744 IOReturn IOFindValueForName(const char *string,
745 const IONamedValue *regValueArray,
746 int *value)
747 {
748 for( ; regValueArray->name; regValueArray++) {
749 if(!strcmp(regValueArray->name, string)) {
750 *value = regValueArray->value;
751 return kIOReturnSuccess;
752 }
753 }
754 return kIOReturnBadArgument;
755 }
756
757 OSString * IOCopyLogNameForPID(int pid)
758 {
759 char buf[128];
760 size_t len;
761 snprintf(buf, sizeof(buf), "pid %d, ", pid);
762 len = strlen(buf);
763 proc_name(pid, buf + len, sizeof(buf) - len);
764 return (OSString::withCString(buf));
765 }
766
767 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
768
769 IOAlignment IOSizeToAlignment(unsigned int size)
770 {
771 register int shift;
772 const int intsize = sizeof(unsigned int) * 8;
773
774 for (shift = 1; shift < intsize; shift++) {
775 if (size & 0x80000000)
776 return (IOAlignment)(intsize - shift);
777 size <<= 1;
778 }
779 return 0;
780 }
781
782 unsigned int IOAlignmentToSize(IOAlignment align)
783 {
784 unsigned int size;
785
786 for (size = 1; align; align--) {
787 size <<= 1;
788 }
789 return size;
790 }
791
792 } /* extern "C" */
793
794
795
Cache object: d8bb76a6c425200b3eba2df2165386fd
|