1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
37
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
41
42 #include <machine/machine_routines.h>
43
44 #include <libkern/OSAtomic.h>
45
46 // include the correct file to find real_ncpus
47 #if defined(__i386__) || defined(__x86_64__)
48 # include <i386/mp.h>
49 #elif defined(__ppc__) || defined(__ppc64__)
50 # include <ppc/cpu_internal.h>
51 #elif defined(__arm__)
52 # include <arm/cpu_internal.h>
53 #else
54 // fall back on declaring it extern. The linker will sort us out.
55 extern unsigned int real_ncpus;
56 #endif
57
58 // Mask for supported options
59 #define T_CHUD_BIND_OPT_MASK (-1UL)
60
61 #pragma mark **** thread binding ****
62
63 /*
64 * This method will bind a given thread to the requested CPU starting at the
65 * next time quantum. If the thread is the current thread, this method will
66 * force a thread_block(). The result is that if you call this method on the
67 * current thread, you will be on the requested CPU when this method returns.
68 */
69 __private_extern__ kern_return_t
70 chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
71 {
72 processor_t proc = NULL;
73
74 if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
75 return KERN_FAILURE;
76
77 // temporary restriction until after phase 2 of the scheduler
78 if(thread != current_thread())
79 return KERN_FAILURE;
80
81 proc = cpu_to_processor(cpu);
82
83 /*
84 * Potentially racey, but mainly to prevent bind to shutdown
85 * processor.
86 */
87 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
88 !(proc->state == PROCESSOR_SHUTDOWN)) {
89
90 thread_bind(proc);
91
92 /*
93 * If we're trying to bind the current thread, and
94 * we're not on the target cpu, and not at interrupt
95 * context, block the current thread to force a
96 * reschedule on the target CPU.
97 */
98 if(thread == current_thread() &&
99 !(ml_at_interrupt_context() && cpu_number() == cpu)) {
100 (void)thread_block(THREAD_CONTINUE_NULL);
101 }
102 return KERN_SUCCESS;
103 }
104 return KERN_FAILURE;
105 }
106
107 __private_extern__ kern_return_t
108 chudxnu_unbind_thread(thread_t thread, __unused int options)
109 {
110 if(thread == current_thread())
111 thread_bind(PROCESSOR_NULL);
112 return KERN_SUCCESS;
113 }
114
115 __private_extern__ boolean_t
116 chudxnu_thread_get_idle(thread_t thread) {
117 /*
118 * Instantaneous snapshot of the idle state of
119 * a given thread.
120 *
121 * Should be called only on an interrupted or
122 * suspended thread to avoid a race.
123 */
124 return ((thread->state & TH_IDLE) == TH_IDLE);
125 }
126
127 #pragma mark **** task and thread info ****
128
129 __private_extern__ boolean_t
130 chudxnu_is_64bit_task(task_t task)
131 {
132 return (task_has_64BitAddr(task));
133 }
134
135 #define THING_TASK 0
136 #define THING_THREAD 1
137
138 // an exact copy of processor_set_things() except no mig conversion at the end!
139 static kern_return_t
140 chudxnu_private_processor_set_things(
141 processor_set_t pset,
142 mach_port_t **thing_list,
143 mach_msg_type_number_t *count,
144 int type)
145 {
146 unsigned int actual; /* this many things */
147 unsigned int maxthings;
148 unsigned int i;
149
150 vm_size_t size, size_needed;
151 void *addr;
152
153 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
154 return (KERN_INVALID_ARGUMENT);
155
156 size = 0; addr = NULL;
157
158 for (;;) {
159 mutex_lock(&tasks_threads_lock);
160
161 if (type == THING_TASK)
162 maxthings = tasks_count;
163 else
164 maxthings = threads_count;
165
166 /* do we have the memory we need? */
167
168 size_needed = maxthings * sizeof (mach_port_t);
169 if (size_needed <= size)
170 break;
171
172 mutex_unlock(&tasks_threads_lock);
173
174 if (size != 0)
175 kfree(addr, size);
176
177 assert(size_needed > 0);
178 size = size_needed;
179
180 addr = kalloc(size);
181 if (addr == 0)
182 return (KERN_RESOURCE_SHORTAGE);
183 }
184
185 /* OK, have memory and the processor_set is locked & active */
186
187 actual = 0;
188 switch (type) {
189
190 case THING_TASK:
191 {
192 task_t task, *task_list = (task_t *)addr;
193
194 for (task = (task_t)queue_first(&tasks);
195 !queue_end(&tasks, (queue_entry_t)task);
196 task = (task_t)queue_next(&task->tasks)) {
197 task_reference_internal(task);
198 task_list[actual++] = task;
199 }
200
201 break;
202 }
203
204 case THING_THREAD:
205 {
206 thread_t thread, *thread_list = (thread_t *)addr;
207
208 for (i = 0, thread = (thread_t)queue_first(&threads);
209 !queue_end(&threads, (queue_entry_t)thread);
210 thread = (thread_t)queue_next(&thread->threads)) {
211 thread_reference_internal(thread);
212 thread_list[actual++] = thread;
213 }
214
215 break;
216 }
217 }
218
219 mutex_unlock(&tasks_threads_lock);
220
221 if (actual < maxthings)
222 size_needed = actual * sizeof (mach_port_t);
223
224 if (actual == 0) {
225 /* no things, so return null pointer and deallocate memory */
226 *thing_list = NULL;
227 *count = 0;
228
229 if (size != 0)
230 kfree(addr, size);
231 }
232 else {
233 /* if we allocated too much, must copy */
234
235 if (size_needed < size) {
236 void *newaddr;
237
238 newaddr = kalloc(size_needed);
239 if (newaddr == 0) {
240 switch (type) {
241
242 case THING_TASK:
243 {
244 task_t *task_list = (task_t *)addr;
245
246 for (i = 0; i < actual; i++)
247 task_deallocate(task_list[i]);
248 break;
249 }
250
251 case THING_THREAD:
252 {
253 thread_t *thread_list = (thread_t *)addr;
254
255 for (i = 0; i < actual; i++)
256 thread_deallocate(thread_list[i]);
257 break;
258 }
259 }
260
261 kfree(addr, size);
262 return (KERN_RESOURCE_SHORTAGE);
263 }
264
265 bcopy((void *) addr, (void *) newaddr, size_needed);
266 kfree(addr, size);
267 addr = newaddr;
268 }
269
270 *thing_list = (mach_port_t *)addr;
271 *count = actual;
272 }
273
274 return (KERN_SUCCESS);
275 }
276
277 // an exact copy of task_threads() except no mig conversion at the end!
278 static kern_return_t
279 chudxnu_private_task_threads(
280 task_t task,
281 thread_act_array_t *threads_out,
282 mach_msg_type_number_t *count)
283 {
284 mach_msg_type_number_t actual;
285 thread_t *thread_list;
286 thread_t thread;
287 vm_size_t size, size_needed;
288 void *addr;
289 unsigned int i, j;
290
291 if (task == TASK_NULL)
292 return (KERN_INVALID_ARGUMENT);
293
294 size = 0; addr = NULL;
295
296 for (;;) {
297 task_lock(task);
298 if (!task->active) {
299 task_unlock(task);
300
301 if (size != 0)
302 kfree(addr, size);
303
304 return (KERN_FAILURE);
305 }
306
307 actual = task->thread_count;
308
309 /* do we have the memory we need? */
310 size_needed = actual * sizeof (mach_port_t);
311 if (size_needed <= size)
312 break;
313
314 /* unlock the task and allocate more memory */
315 task_unlock(task);
316
317 if (size != 0)
318 kfree(addr, size);
319
320 assert(size_needed > 0);
321 size = size_needed;
322
323 addr = kalloc(size);
324 if (addr == 0)
325 return (KERN_RESOURCE_SHORTAGE);
326 }
327
328 /* OK, have memory and the task is locked & active */
329 thread_list = (thread_t *)addr;
330
331 i = j = 0;
332
333 for (thread = (thread_t)queue_first(&task->threads); i < actual;
334 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
335 thread_reference_internal(thread);
336 thread_list[j++] = thread;
337 }
338
339 assert(queue_end(&task->threads, (queue_entry_t)thread));
340
341 actual = j;
342 size_needed = actual * sizeof (mach_port_t);
343
344 /* can unlock task now that we've got the thread refs */
345 task_unlock(task);
346
347 if (actual == 0) {
348 /* no threads, so return null pointer and deallocate memory */
349
350 *threads_out = NULL;
351 *count = 0;
352
353 if (size != 0)
354 kfree(addr, size);
355 }
356 else {
357 /* if we allocated too much, must copy */
358
359 if (size_needed < size) {
360 void *newaddr;
361
362 newaddr = kalloc(size_needed);
363 if (newaddr == 0) {
364 for (i = 0; i < actual; ++i)
365 thread_deallocate(thread_list[i]);
366 kfree(addr, size);
367 return (KERN_RESOURCE_SHORTAGE);
368 }
369
370 bcopy(addr, newaddr, size_needed);
371 kfree(addr, size);
372 thread_list = (thread_t *)newaddr;
373 }
374
375 *threads_out = thread_list;
376 *count = actual;
377 }
378
379 return (KERN_SUCCESS);
380 }
381
382
383 __private_extern__ kern_return_t
384 chudxnu_all_tasks(
385 task_array_t *task_list,
386 mach_msg_type_number_t *count)
387 {
388 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
389 }
390
391 __private_extern__ kern_return_t
392 chudxnu_free_task_list(
393 task_array_t *task_list,
394 mach_msg_type_number_t *count)
395 {
396 vm_size_t size = (*count)*sizeof(mach_port_t);
397 void *addr = *task_list;
398
399 if(addr) {
400 int i, maxCount = *count;
401 for(i=0; i<maxCount; i++) {
402 task_deallocate((*task_list)[i]);
403 }
404 kfree(addr, size);
405 *task_list = NULL;
406 *count = 0;
407 return KERN_SUCCESS;
408 } else {
409 return KERN_FAILURE;
410 }
411 }
412 __private_extern__ kern_return_t
413 chudxnu_all_threads(
414 thread_array_t *thread_list,
415 mach_msg_type_number_t *count)
416 {
417 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
418 }
419
420 __private_extern__ kern_return_t
421 chudxnu_task_threads(
422 task_t task,
423 thread_array_t *thread_list,
424 mach_msg_type_number_t *count)
425 {
426 return chudxnu_private_task_threads(task, thread_list, count);
427 }
428
429 __private_extern__ kern_return_t
430 chudxnu_free_thread_list(
431 thread_array_t *thread_list,
432 mach_msg_type_number_t *count)
433 {
434 vm_size_t size = (*count)*sizeof(mach_port_t);
435 void *addr = *thread_list;
436
437 if(addr) {
438 int i, maxCount = *count;
439 for(i=0; i<maxCount; i++) {
440 thread_deallocate((*thread_list)[i]);
441 }
442 kfree(addr, size);
443 *thread_list = NULL;
444 *count = 0;
445 return KERN_SUCCESS;
446 } else {
447 return KERN_FAILURE;
448 }
449 }
450
451 __private_extern__ task_t
452 chudxnu_current_task(void)
453 {
454 return current_task();
455 }
456
457 __private_extern__ thread_t
458 chudxnu_current_thread(void)
459 {
460 return current_thread();
461 }
462
463 __private_extern__ task_t
464 chudxnu_task_for_thread(thread_t thread)
465 {
466 return get_threadtask(thread);
467 }
468
469 __private_extern__ kern_return_t
470 chudxnu_thread_info(
471 thread_t thread,
472 thread_flavor_t flavor,
473 thread_info_t thread_info_out,
474 mach_msg_type_number_t *thread_info_count)
475 {
476 return thread_info(thread, flavor, thread_info_out, thread_info_count);
477 }
478
479
480 __private_extern__ kern_return_t
481 chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
482 {
483 *timestamp = thread->last_switch;
484 return KERN_SUCCESS;
485 }
486
487 /* thread marking stuff */
488
489 __private_extern__ boolean_t
490 chudxnu_thread_get_marked(thread_t thread)
491 {
492 if(thread)
493 return ((thread->t_chud & T_CHUD_MARKED) != 0);
494 return FALSE;
495 }
496
497 __private_extern__ boolean_t
498 chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
499 {
500 boolean_t old_val;
501
502 if(thread) {
503 if(new_value) {
504 // set the marked bit
505 old_val = OSBitOrAtomic(T_CHUD_MARKED, (UInt32 *) &(thread->t_chud));
506 } else {
507 // clear the marked bit
508 old_val = OSBitAndAtomic(~T_CHUD_MARKED, (UInt32 *) &(thread->t_chud));
509 }
510 return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
511 }
512 return FALSE;
513 }
514
Cache object: 1a71bc7549d34db910a8f05bf84680be
|