FreeBSD/Linux Kernel Cross Reference
sys/kern/profile.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27 /*
28 * Copyright 1991 by Open Software Foundation,
29 * Grenoble, FRANCE
30 *
31 * All Rights Reserved
32 *
33 * Permission to use, copy, modify, and distribute this software and
34 * its documentation for any purpose and without fee is hereby granted,
35 * provided that the above copyright notice appears in all copies and
36 * that both the copyright notice and this permission notice appear in
37 * supporting documentation, and that the name of OSF or Open Software
38 * Foundation not be used in advertising or publicity pertaining to
39 * distribution of the software without specific, written prior
40 * permission.
41 *
42 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
43 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
44 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
45 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
46 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
47 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
48 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * HISTORY
53 * $Log: profile.c,v $
54 * Revision 2.2 93/01/14 17:36:06 danner
55 * Updated argument casts for calls to assert_wait, thread_sleep and thread_wakeup.
56 * [93/01/12 danner]
57 * Proper spl typing. 64bit cleanup.
58 * [92/12/01 af]
59 *
60 * Revision 2.1 91/09/26 04:48:09 bernadat
61 * Created.
62 *
63 * Profiling support
64 * (Bernard Tabib & Andrei Danes @ gr.osf.org)
65 */
66
67
68 #include <kern/thread.h>
69 #include <kern/queue.h>
70 #include <mach/profil.h>
71 #include <kern/sched_prim.h>
72 #include <ipc/ipc_space.h>
73
74 extern vm_map_t kernel_map; /* can be discarded, defined in <vm/vm_kern.h> */
75
76 thread_t profile_thread_id = THREAD_NULL;
77
78
79 void profile_thread()
80 {
81 struct message {
82 mach_msg_header_t head;
83 mach_msg_type_t type;
84 int arg[SIZE_PROF_BUFFER+1];
85 } msg;
86
87 register spl_t s;
88 buf_to_send_t buf_entry;
89 queue_entry_t prof_queue_entry;
90 prof_data_t pbuf;
91 simple_lock_t lock;
92 msg_return_t mr;
93 int j;
94
95 /* Initialise the queue header for the prof_queue */
96 mpqueue_init(&prof_queue);
97
98 /* Template initialisation of header and type structures */
99 msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
100 msg.head.msgh_size = sizeof(msg);
101 msg.head.msgh_local_port = MACH_PORT_NULL;
102 msg.head.msgh_kind = MACH_MSGH_KIND_NORMAL;
103 msg.head.msgh_id = 666666;
104
105 msg.type.msgt_name = MACH_MSG_TYPE_INTEGER_32;
106 msg.type.msgt_size = 32;
107 msg.type.msgt_number = SIZE_PROF_BUFFER+1;
108 msg.type.msgt_inline = TRUE;
109 msg.type.msgt_longform = FALSE;
110 msg.type.msgt_deallocate = FALSE;
111 msg.type.msgt_unused = 0;
112
113 while (TRUE) {
114
115 /* Dequeue the first buffer. */
116 s = splsched();
117 mpdequeue_head(&prof_queue, &prof_queue_entry);
118 splx(s);
119
120 if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
121 {
122 thread_sleep((event_t) profile_thread, lock, TRUE);
123 if (current_thread()->wait_result != THREAD_AWAKENED)
124 break;
125 }
126 else {
127 task_t curr_task;
128 thread_t curr_th;
129 register int *sample;
130 int curr_buf;
131 int imax;
132
133 curr_th = (thread_t) buf_entry->thread;
134 curr_buf = (int) buf_entry->number;
135 pbuf = curr_th->profil_buffer;
136
137 /* Set the remote port */
138 msg.head.msgh_remote_port = (mach_port_t) pbuf->prof_port;
139
140
141 sample = pbuf->prof_area[curr_buf].p_zone;
142 imax = pbuf->prof_area[curr_buf].p_index;
143 for(j=0 ;j<imax; j++,sample++)
144 msg.arg[j] = *sample;
145
146 /* Let hardclock() know you've finished the dirty job */
147 pbuf->prof_area[curr_buf].p_full = FALSE;
148
149 /*
150 * Store the number of samples actually sent
151 * as the last element of the array.
152 */
153 msg.arg[SIZE_PROF_BUFFER] = imax;
154
155 mr = mach_msg(&(msg.head), MACH_SEND_MSG,
156 sizeof(struct message), 0,
157 MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
158 MACH_PORT_NULL);
159
160 if (mr != MACH_MSG_SUCCESS) {
161 printf("profile_thread: mach_msg failed returned %x\n",(int)mr);
162 }
163
164 if (buf_entry->wakeme)
165 thread_wakeup((event_t) &buf_entry->wakeme);
166 kmem_free(kernel_map, (buf_to_send_t) buf_entry,
167 sizeof(struct buf_to_send));
168
169 }
170
171 }
172 /* The profile thread has been signalled to exit. There may still
173 be sample data queued for us, which we must now throw away.
174 Once we set profile_thread_id to null, hardclock() will stop
175 queueing any additional samples, so we do not need to alter
176 the interrupt level. */
177 profile_thread_id = THREAD_NULL;
178 while (1) {
179 mpdequeue_head(&prof_queue, &prof_queue_entry);
180 if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
181 break;
182 if (buf_entry->wakeme)
183 thread_wakeup((event_t) &buf_entry->wakeme);
184 kmem_free(kernel_map, (buf_to_send_t) buf_entry,
185 sizeof(struct buf_to_send));
186 }
187
188 thread_halt_self();
189 }
190
191
192
193 #include <mach/message.h>
194
195 void
196 send_last_sample_buf(th)
197 thread_t th;
198 {
199 register spl_t s;
200 buf_to_send_t buf_entry;
201 vm_offset_t vm_buf_entry;
202
203 if (th->profil_buffer == NULLPBUF)
204 return;
205
206 /* Ask for the sending of the last PC buffer.
207 * Make a request to the profile_thread by inserting
208 * the buffer in the send queue, and wake it up.
209 * The last buffer must be inserted at the head of the
210 * send queue, so the profile_thread handles it immediatly.
211 */
212 if (kmem_alloc( kernel_map, &vm_buf_entry,
213 sizeof(struct buf_to_send)) != KERN_SUCCESS)
214 return;
215 buf_entry = (buf_to_send_t) vm_buf_entry;
216 buf_entry->thread = (int *) th;
217 buf_entry->number = th->profil_buffer->prof_index;
218
219 /* Watch out in case profile thread exits while we are about to
220 queue data for it. */
221 s = splsched();
222 if (profile_thread_id != THREAD_NULL) {
223 simple_lock_t lock;
224 buf_entry->wakeme = 1;
225 mpenqueue_tail( &prof_queue, &(buf_entry->list));
226 thread_wakeup((event_t) profile_thread);
227 assert_wait((event_t) &buf_entry->wakeme, TRUE);
228 splx(s);
229 thread_block((void (*)()) 0);
230 } else {
231 splx(s);
232 kmem_free(kernel_map, vm_buf_entry, sizeof(struct buf_to_send));
233 }
234 }
235
236 /*
237 * Profile current thread
238 */
239
240 profile(pc) {
241
242 /* Find out which thread has been interrupted. */
243 thread_t it_thread = current_thread();
244 int inout_val = pc;
245 buf_to_send_t buf_entry;
246 vm_offset_t vm_buf_entry;
247 int *val;
248 /*
249 * Test if the current thread is to be sampled
250 */
251 if (it_thread->thread_profiled) {
252 /* Inserts the PC value in the buffer of the thread */
253 set_pbuf_value(it_thread->profil_buffer, &inout_val);
254 switch(inout_val) {
255 case 0:
256 if (profile_thread_id == THREAD_NULL) {
257 reset_pbuf_area(it_thread->profil_buffer);
258 } else printf("ERROR : hardclock : full buffer unsent\n");
259 break;
260 case 1:
261 /* Normal case, value successfully inserted */
262 break;
263 case 2 :
264 /*
265 * The value we have just inserted caused the
266 * buffer to be full, and ready to be sent.
267 * If profile_thread_id is null, the profile
268 * thread has been killed. Since this generally
269 * happens only when the O/S server task of which
270 * it is a part is killed, it is not a great loss
271 * to throw away the data.
272 */
273 if (profile_thread_id == THREAD_NULL ||
274 kmem_alloc(kernel_map,
275 &vm_buf_entry ,
276 sizeof(struct buf_to_send)) !=
277 KERN_SUCCESS) {
278 reset_pbuf_area(it_thread->profil_buffer);
279 break;
280 }
281 buf_entry = (buf_to_send_t) vm_buf_entry;
282 buf_entry->thread = (int *)it_thread;
283 buf_entry->number =
284 (it_thread->profil_buffer)->prof_index;
285 mpenqueue_tail(&prof_queue, &(buf_entry->list));
286
287 /* Switch to another buffer */
288 reset_pbuf_area(it_thread->profil_buffer);
289
290 /* Wake up the profile thread */
291 if (profile_thread_id != THREAD_NULL)
292 thread_wakeup((event_t) profile_thread);
293 break;
294
295 default:
296 printf("ERROR: profile : unexpected case\n");
297 }
298 }
299 }
300
301
302 /* The task parameter in this and the subsequent routine is needed for
303 MiG, even though it is not used in the function itself. */
304
305 kern_return_t
306 mach_sample_thread (task, reply, cur_thread)
307 ipc_space_t task;
308 ipc_object_t reply;
309 thread_t cur_thread;
310 {
311 /*
312 * This routine is called every time that a new thread has made
313 * a request for the sampling service. We must keep track of the
314 * correspondance between it's identity (cur_thread) and the port
315 * we are going to use as a reply port to send out the samples resulting
316 * from its execution.
317 */
318 prof_data_t pbuf;
319 vm_offset_t vmpbuf;
320
321 if (reply != MACH_PORT_NULL) {
322 if (cur_thread->thread_profiled && cur_thread->thread_profiled_own) {
323 if (reply == cur_thread->profil_buffer->prof_port)
324 return KERN_SUCCESS;
325 mach_sample_thread(MACH_PORT_NULL, cur_thread);
326 }
327 /* Start profiling this thread , do the initialization. */
328 alloc_pbuf_area(pbuf, vmpbuf);
329 if ((cur_thread->profil_buffer = pbuf) == NULLPBUF) {
330 printf("ERROR:mach_sample_thread:cannot allocate pbuf\n");
331 return KERN_RESOURCE_SHORTAGE;
332 } else {
333 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
334 printf("ERROR:mach_sample_thread:cannot set pbuf_nb\n");
335 return KERN_FAILURE;
336 }
337 reset_pbuf_area(pbuf);
338 }
339
340 pbuf->prof_port = reply;
341 cur_thread->thread_profiled = TRUE;
342 cur_thread->thread_profiled_own = TRUE;
343 if (profile_thread_id == THREAD_NULL)
344 profile_thread_id = kernel_thread(current_task(), profile_thread);
345 } else {
346 if (!cur_thread->thread_profiled_own)
347 cur_thread->thread_profiled = FALSE;
348 if (!cur_thread->thread_profiled)
349 return KERN_SUCCESS;
350
351 send_last_sample_buf(cur_thread);
352
353 /* Stop profiling this thread, do the cleanup. */
354
355 cur_thread->thread_profiled_own = FALSE;
356 cur_thread->thread_profiled = FALSE;
357 dealloc_pbuf_area(cur_thread->profil_buffer);
358 cur_thread->profil_buffer = NULLPBUF;
359 }
360
361 return KERN_SUCCESS;
362 }
363
364 kern_return_t
365 mach_sample_task (task, reply, cur_task)
366 ipc_space_t task;
367 ipc_object_t reply;
368 task_t cur_task;
369 {
370 prof_data_t pbuf=cur_task->profil_buffer;
371 vm_offset_t vmpbuf;
372 int turnon = (reply != MACH_PORT_NULL);
373
374 if (turnon) {
375 if (cur_task->task_profiled) {
376 if (cur_task->profil_buffer->prof_port == reply)
377 return KERN_SUCCESS;
378 (void) mach_sample_task(task, MACH_PORT_NULL, cur_task);
379 }
380 if (pbuf == NULLPBUF) {
381 alloc_pbuf_area(pbuf, vmpbuf);
382 if (pbuf == NULLPBUF) {
383 return KERN_RESOURCE_SHORTAGE;
384 }
385 cur_task->profil_buffer = pbuf;
386 }
387 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
388 return KERN_FAILURE;
389 }
390 reset_pbuf_area(pbuf);
391 pbuf->prof_port = reply;
392 }
393
394 if (turnon != cur_task->task_profiled) {
395 int actual,i,sentone;
396 thread_t thread;
397
398 if (turnon && profile_thread_id == THREAD_NULL)
399 profile_thread_id =
400 kernel_thread(current_task(), profile_thread);
401 cur_task->task_profiled = turnon;
402 actual = cur_task->thread_count;
403 sentone = 0;
404 for (i=0, thread=(thread_t) queue_first(&cur_task->thread_list);
405 i < actual;
406 i++, thread=(thread_t) queue_next(&thread->thread_list)) {
407 if (!thread->thread_profiled_own) {
408 thread->thread_profiled = turnon;
409 if (turnon)
410 thread->profil_buffer = cur_task->profil_buffer;
411 else if (!sentone) {
412 send_last_sample_buf(thread);
413 sentone = 1;
414 }
415 }
416 }
417 if (!turnon) {
418 dealloc_pbuf_area(pbuf);
419 cur_task->profil_buffer = NULLPBUF;
420 }
421 }
422
423 return KERN_SUCCESS;
424 }
425
Cache object: 6273d8434bc162f9bd8029ebe5c88e60
|