1 /*
2 *
3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
6 *
7 *
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
12 *
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
20 *
21 * Copyright 1994-1998 Network Computing Services, Inc.
22 *
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
25 *
26 * @(#) $FreeBSD: src/sys/dev/hea/eni_transmit.c,v 1.6 1999/12/21 08:24:35 eivind Exp $
27 * @(#) $DragonFly: src/sys/dev/atm/hea/eni_transmit.c,v 1.8 2008/03/01 22:03:13 swildner Exp $
28 */
29
30 /*
31 * Efficient ENI Adapter Support
32 * -----------------------------
33 *
34 * Transmit queue management and PDU output processing
35 *
36 */
37
38
39 #include <netproto/atm/kern_include.h>
40
41 #include "eni_stats.h"
42 #include "eni.h"
43 #include "eni_var.h"
44
45 /*
46 * Make a variable which controls printing of PDUs
47 * as they travel through the driver.
48 */
49 #ifdef DIAGNOSTIC
50 int eni_pdu_print = 0;
51 #endif
52
53 /*
54 * Some PCI chipsets do not handle one or more of the 8WORD or
55 * 4WORD DMA transfer sizes. Default to using only 1WORD transfer
56 * sizes unless the user wishes to experiment.
57 *
58 * Make sure that these have to be changed here in this module.
59 */
60 #define DMA_USE_8WORD
61 #define DMA_USE_4WORD
62
63 /*
64 * Create a DMA list entry
65 *
66 * DMA entries consist of a control word and a physical address.
67 * Control words are comprised of a DMA type, a count of type transfers
68 * to occur, and a variable which for TX requests is the TX channel
69 * number and for RX requests is the VCC number.
70 *
71 * Arguments:
72 * eup pointer to unit structure
73 * rx set if receiving
74 * dma_list pointer to DMA list structure
75 * list_size length of DMA list structure
76 * idx pointer to current list entry
77 * val TX channel or RX vcc
78 * addr virtual DMA address of data buffer
79 * size size in bytes of DMA request to be built
80 *
81 * Returns:
82 * dma_list updated with new entries
83 * idx points to next list entry
84 * -1 no room in DMA list structure or DMA_GET_ADDR failed
85 */
86 int
87 eni_set_dma(Eni_unit *eup, int rx, u_long *dma_list, int list_size, long *idx,
88 int val, u_long addr, int size)
89 {
90 int dsize; /* Size of current DMA request */
91
92 /*
93 * Round up to multiple of word and convert to number
94 * of words rather then number of bytes.
95 */
96 size = ( size + 3 ) >> 2;
97
98 #ifdef DMA_USE_8WORD
99 /*
100 * Check for room in DMA list - we need two entires
101 */
102 if ( *idx + 2 >= list_size )
103 return ( -1 );
104
105 /*
106 * Here is the big win. Move as much data possible with
107 * n 8WORD DMAs.
108 */
109 /*
110 * Check if we can do one or more 8WORD DMAs
111 */
112 dsize = size & ~7;
113 if ( dsize ) {
114 dma_list[(*idx)++] = ( dsize >> 3 ) << DMA_COUNT_SHIFT |
115 val << DMA_VCC_SHIFT | DMA_8WORD;
116 dma_list[*idx] = (u_long)DMA_GET_ADDR ( addr, dsize, 0, 0 );
117 if ( dma_list[*idx] == 0 ) {
118 if ( rx )
119 eup->eu_stats.eni_st_drv.drv_rv_segdma++;
120 else
121 eup->eu_stats.eni_st_drv.drv_xm_segdma++;
122 return ( -1 ); /* DMA_GET_ADDR failed */
123 } else
124 (*idx)++; /* increment index */
125 /*
126 * Adjust addr and size
127 */
128 addr += dsize << 2;
129 size &= 7;
130 }
131 #endif /* DMA_USE_8WORD */
132
133 #ifdef DMA_USE_4WORD
134 /*
135 * Check for room in DMA list - we need two entries
136 */
137 if ( *idx + 2 >= list_size )
138 return ( -1 );
139
140 /*
141 * Kindof a tossup from this point on. Since we hacked as many
142 * 8WORD DMAs off as possible, we are left with 0-7 words
143 * of remaining data. We could do upto one 4WORD with 0-3
144 * words left, or upto three 2WORDS with 0-1 words left,
145 * or upto seven WORDS with nothing left. Someday we should
146 * experiment with performance and see if any particular
147 * combination is a better win then some other...
148 */
149 /*
150 * Check if we can do one or more 4WORD DMAs
151 */
152 dsize = size & ~3;
153 if ( dsize ) {
154 dma_list[(*idx)++] = ( dsize >> 2 ) << DMA_COUNT_SHIFT |
155 val << DMA_VCC_SHIFT | DMA_4WORD;
156 dma_list[*idx] = (u_long)DMA_GET_ADDR ( addr, dsize, 0, 0 );
157 if ( dma_list[*idx] == 0 ) {
158 if ( rx )
159 eup->eu_stats.eni_st_drv.drv_rv_segdma++;
160 else
161 eup->eu_stats.eni_st_drv.drv_xm_segdma++;
162 return ( -1 ); /* DMA_GET_ADDR failed */
163 } else
164 (*idx)++; /* increment index */
165 /*
166 * Adjust addr and size
167 */
168 addr += dsize << 2;
169 size &= 3;
170 }
171 #endif /* DMA_USE_4WORD */
172
173 /*
174 * Check for room in DMA list - we need two entries
175 */
176 if ( *idx + 2 >= list_size )
177 return ( -1 );
178
179 /*
180 * Hard to know if one 2WORD and 0/1 WORD DMA would be better
181 * then 2/3 WORD DMAs. For now, skip 2WORD DMAs in favor of
182 * WORD DMAs.
183 */
184
185 /*
186 * Finish remaining size a 1WORD DMAs
187 */
188 if ( size ) {
189 dma_list[(*idx)++] = ( size ) << DMA_COUNT_SHIFT |
190 val << DMA_VCC_SHIFT | DMA_WORD;
191 dma_list[*idx] = (u_long)DMA_GET_ADDR ( addr, size, 0, 0 );
192 if ( dma_list[*idx] == 0 ) {
193 if ( rx )
194 eup->eu_stats.eni_st_drv.drv_rv_segdma++;
195 else
196 eup->eu_stats.eni_st_drv.drv_xm_segdma++;
197 return ( -1 ); /* DMA_GET_ADDR failed */
198 } else
199 (*idx)++; /* increment index */
200 }
201
202 /*
203 * Inserted descriptor okay
204 */
205 return 0;
206 }
207
208 /*
209 * Drain Transmit queue
210 *
211 * As PDUs are given to the adapter to be transmitted, we
212 * place them into a private ifqueue so that we can free
213 * any resources AFTER we know they've been successfully DMAed.
214 * As part of the output processing, we record the PDUs start
215 * and stop entries in the DMA list, and prevent wrapping. When
216 * we pull the top element off, we simply check that the current
217 * DMA location is outside this PDU and if so, it's okay to free
218 * things.
219 *
220 * PDUs are always in ascending order in the queue.
221 *
222 * Arguments:
223 * eup pointer to device unit structure
224 *
225 * Returns:
226 * none
227 *
228 */
229 void
230 eni_xmit_drain(Eni_unit *eup)
231 {
232 KBuffer *m;
233 Eni_vcc *evp;
234 struct vccb *vcp;
235 u_long pdulen;
236 u_long start, stop;
237 u_long dmap;
238
239 crit_enter();
240
241 /*
242 * Pull the top element (PDU) off
243 */
244 IF_DEQUEUE ( &eup->eu_txqueue, m );
245 /*
246 * As long as there are valid elements
247 */
248 while ( m ) {
249 u_long *up;
250
251 /*
252 * Find start of buffer
253 */
254 KB_DATASTART ( m, up, u_long * );
255
256 /*
257 * First word is the VCC for this PDU
258 */
259 /*
260 * NOTE: There is a potential problem here in that
261 * if the VCC is closed after this buffer was transmitted
262 * but before we get here, that while evp is non-null,
263 * it will not reference a valid vccb. We need to either
264 * delay closing the VCC until all references are removed
265 * from the drain stacks, actually go through the drain
266 * stacks and remove any references, or find someway of
267 * indicating that this vccb is nolonger usable.
268 */
269 evp = (Eni_vcc *)*up++;
270 /*
271 * Second word is the start and stop DMA pointers
272 */
273 start = *up >> 16;
274 stop = *up++ & 0xffff;
275 /*
276 * Find out where the TX engine is at
277 */
278 dmap = eup->eu_midway[MIDWAY_TX_RD];
279 /*
280 * Check to see if TX engine has processed this
281 * PDU yet. Remember that everything is circular
282 * and that stop might be less than start numerically.
283 */
284 if ( start > stop ) {
285 if ( !(dmap >= stop && dmap < start) ) {
286 /*
287 * Haven't finished this PDU yet - replace
288 * it as the head of list.
289 */
290 IF_PREPEND ( &eup->eu_txqueue, m );
291 /*
292 * If this one isn't done, none of the others
293 * are either.
294 */
295 crit_exit();
296 return;
297 }
298 } else {
299 if ( dmap < stop && dmap >= start ) {
300 /*
301 * Haven't finished this PDU yet - replace
302 * it as the head of list.
303 */
304 IF_PREPEND ( &eup->eu_txqueue, m );
305 /*
306 * If this one isn't done, none of the others
307 * are either.
308 */
309 crit_exit();
310 return;
311 }
312 }
313
314 /*
315 * Count the PDU stats for this interface
316 */
317 eup->eu_pif.pif_opdus++;
318 /*
319 * Third word is PDU length from eni_output().
320 */
321 pdulen = *up++;
322 eup->eu_txfirst = (eup->eu_txfirst + *up) &
323 (eup->eu_txsize - 1);
324 eup->eu_pif.pif_obytes += pdulen;
325
326 /*
327 * Now lookup the VCC entry and counts the stats for
328 * this VC.
329 */
330 if ( evp ) {
331 vcp = evp->ev_connvc->cvc_vcc;
332 if ( vcp ) {
333 vcp->vc_opdus++;
334 vcp->vc_obytes += pdulen;
335 /*
336 * If we also have a network interface, count the PDU
337 * there also.
338 */
339 if ( vcp->vc_nif ) {
340 vcp->vc_nif->nif_obytes += pdulen;
341 vcp->vc_nif->nif_if.if_opackets++;
342 vcp->vc_nif->nif_if.if_obytes += pdulen;
343 }
344 }
345 }
346 /*
347 * Free the buffer chain
348 */
349 KB_FREEALL ( m );
350
351 /*
352 * Advance DMA write okay pointer
353 */
354 eup->eu_txdmawr = stop;
355
356 /*
357 * Look for next completed transmit PDU
358 */
359 IF_DEQUEUE ( &eup->eu_txqueue, m );
360 }
361 /*
362 * We've drained the queue...
363 */
364 crit_exit();
365 }
366
367 /*
368 * Output a PDU
369 *
370 * This function is called via the common driver code after receiving a
371 * stack *_DATA* command. The common code has already validated most of
372 * the request so we just need to check a few more ENI-specific details.
373 * Then we just build a segmentation structure for the PDU and place the
374 * address into the DMA_Transmit_queue.
375 *
376 * Arguments:
377 * cup pointer to device common unit
378 * cvp pointer to common VCC entry
379 * m pointer to output PDU buffer chain head
380 *
381 * Returns:
382 * none
383 *
384 */
385 void
386 eni_output(Cmn_unit *cup, Cmn_vcc *cvp, KBuffer *m)
387 {
388 Eni_unit *eup = (Eni_unit *)cup;
389 Eni_vcc *evp = (Eni_vcc *)cvp;
390 int pdulen = 0;
391 u_long size;
392 u_long buf_avail;
393 u_long dma_rd, dma_wr;
394 u_long dma[TEMP_DMA_SIZE];
395 int aal5, i;
396 long j;
397 u_long dma_avail;
398 u_long dma_start;
399 Eni_mem tx_send;
400 u_long *up;
401 KBuffer *m0 = m, *m1, *mprev = NULL;
402 caddr_t cp, bfr;
403 u_int len, align;
404 int compressed = 0;
405
406 #ifdef DIAGNOSTIC
407 if ( eni_pdu_print )
408 atm_dev_pdu_print ( cup, cvp, m, "eni output" );
409 #endif
410
411 /*
412 * Re-entry point for after buffer compression (if needed)
413 */
414 retry:
415
416 /*
417 * We can avoid traversing the buffer list twice by building
418 * the middle (minus header and trailer) dma list at the
419 * same time we massage address and size alignments. Since
420 * this list remains local until we determine we've enough
421 * room, we're not going to trash anything by not checking
422 * sizes, etc. yet. Skip first entry to be used later to skip
423 * descriptor word.
424 */
425 j = 2;
426 /*
427 * Do data positioning for address and length alignment
428 */
429 while ( m ) {
430 u_long buf_addr; /* For passing addr to eni_set_dma() */
431
432 /*
433 * Get rid of any zero length buffers
434 */
435 if ( KB_LEN ( m ) == 0 ) {
436 if ( mprev ) {
437 KB_UNLINK ( m, mprev, m1 );
438 } else {
439 KB_UNLINKHEAD ( m, m1 );
440 m0 = m1;
441 }
442 m = m1;
443 continue;
444 }
445 /*
446 * Get start of data onto full-word alignment
447 */
448 KB_DATASTART ( m, cp, caddr_t );
449 if ((align = ((u_int)cp) & (sizeof(u_long)-1)) != 0) {
450 /*
451 * Gotta slide the data up
452 */
453 eup->eu_stats.eni_st_drv.drv_xm_segnoal++;
454 bfr = cp - align;
455 KM_COPY ( cp, bfr, KB_LEN ( m ) );
456 KB_HEADMOVE ( m, -align );
457 } else {
458 /*
459 * Data already aligned
460 */
461 bfr = cp;
462 }
463 /*
464 * Now work on getting the data length correct
465 */
466 len = KB_LEN ( m );
467 while ( ( align = ( len & (sizeof(u_long)-1))) &&
468 (m1 = KB_NEXT ( m ) ) ) {
469
470 /*
471 * Have to move some data from following buffer(s)
472 * to word-fill this buffer
473 */
474 u_int ncopy = MIN ( sizeof(u_long) - align,
475 KB_LEN ( m1 ) );
476
477 if ( ncopy ) {
478 /*
479 * Move data to current buffer
480 */
481 caddr_t dest;
482
483 eup->eu_stats.eni_st_drv.drv_xm_seglen++;
484 KB_DATASTART ( m1, cp, caddr_t );
485 dest = bfr + len;
486 KB_HEADADJ ( m1, -ncopy );
487 KB_TAILADJ ( m, ncopy );
488 len += ncopy;
489 while ( ncopy-- ) {
490 *dest++ = *cp++;
491 }
492 }
493
494 /*
495 * If we've drained the buffer, free it
496 */
497 if ( KB_LEN ( m1 ) == 0 ) {
498 KBuffer *m2;
499
500 KB_UNLINK ( m1, m, m2 );
501 }
502 }
503
504 /*
505 * Address and size are now aligned. Build dma list
506 * using TX channel 0. Also, round length up to a word
507 * size which should only effect the last buffer in the
508 * chain. This works because the PDU length is maintained
509 * separately and we're not really adjusting the buffer's
510 * idea of its length.
511 */
512 KB_DATASTART ( m, buf_addr, u_long );
513 if ( eni_set_dma ( eup, 0, dma, TEMP_DMA_SIZE, &j, 0,
514 buf_addr, KB_LEN ( m ) ) < 0 ) {
515 /*
516 * Failed to build DMA list. First, we'll try to
517 * compress the buffer chain into a smaller number
518 * of buffers. After compressing, we'll try to send
519 * the new buffer chain. If we still fail, then
520 * we'll drop the pdu.
521 */
522 if ( compressed ) {
523 #ifdef DO_LOG
524 log ( LOG_ERR,
525 "eni_output: eni_set_dma failed\n" );
526 #endif
527 eup->eu_pif.pif_oerrors++;
528 KB_FREEALL ( m0 );
529 return;
530 }
531
532 eup->eu_stats.eni_st_drv.drv_xm_maxpdu++;
533
534 m = atm_dev_compress ( m0 );
535 if ( m == NULL ) {
536 #ifdef DO_LOG
537 log ( LOG_ERR,
538 "eni_output: atm_dev_compress() failed\n" );
539 #endif
540 eup->eu_pif.pif_oerrors++;
541 return;
542 }
543
544 /*
545 * Reset to new head of buffer chain
546 */
547 m0 = m;
548
549 /*
550 * Indicate we've been through here
551 */
552 compressed = 1;
553
554 /*
555 * Retry to build the DMA descriptors for the newly
556 * compressed buffer chain
557 */
558 goto retry;
559
560 }
561
562 /*
563 * Now count the length
564 */
565 pdulen += KB_LEN ( m );
566
567 /*
568 * Bump counters and get ready for next buffer
569 */
570 mprev = m;
571 m = KB_NEXT ( m );
572 }
573
574 /*
575 * Get a buffer to use in a private queue so that we can
576 * reclaim resources after the DMA has finished.
577 */
578 KB_ALLOC ( m, ENI_SMALL_BSIZE, KB_F_NOWAIT, KB_T_DATA );
579 if ( m ) {
580 /*
581 * Link the PDU onto our new head
582 */
583 KB_NEXT ( m ) = m0;
584 } else {
585 /*
586 * Drop this PDU and let the sender try again.
587 */
588 eup->eu_stats.eni_st_drv.drv_xm_norsc++;
589 #ifdef DO_LOG
590 log(LOG_ERR, "eni_output: Unable to allocate drain buffer.\n");
591 #endif
592 eup->eu_pif.pif_oerrors++;
593 KB_FREEALL ( m0 );
594 return;
595 }
596
597 crit_enter();
598
599 /*
600 * Calculate size of buffer necessary to store PDU. If this
601 * is an AAL5 PDU, we'll need to know where to stuff the length
602 * value in the trailer.
603 */
604 /*
605 * AAL5 PDUs need an extra two words for control/length and
606 * CRC. Check for AAL5 and add requirements here.
607 */
608 if ((aal5 = (evp->ev_connvc->cvc_attr.aal.type == ATM_AAL5)) != 0)
609 size = pdulen + 2 * sizeof(long);
610 else
611 size = pdulen;
612 /*
613 * Pad to next complete cell boundary
614 */
615 size += (BYTES_PER_CELL - 1);
616 size -= size % BYTES_PER_CELL;
617 /*
618 * Convert size to words and add 2 words overhead for every
619 * PDU (descriptor and cell header).
620 */
621 size = (size >> 2) + 2;
622
623 /*
624 * First, check to see if there's enough buffer space to
625 * store the PDU. We do this by checking to see if the size
626 * required crosses the eu_txfirst pointer. However, we don't
627 * want to exactly fill the buffer, because we won't be able to
628 * distinguish between a full and empty buffer.
629 */
630 if ( eup->eu_txpos == eup->eu_txfirst )
631 buf_avail = eup->eu_txsize;
632 else
633 if ( eup->eu_txpos > eup->eu_txfirst )
634 buf_avail = eup->eu_txsize - ( eup->eu_txpos - eup->eu_txfirst );
635 else
636 buf_avail = eup->eu_txfirst - eup->eu_txpos;
637
638 if ( size >= buf_avail )
639 {
640 /*
641 * No buffer space in the adapter to store this PDU.
642 * Drop PDU and return.
643 */
644 eup->eu_stats.eni_st_drv.drv_xm_nobuf++;
645 #ifdef DO_LOG
646 log ( LOG_ERR,
647 "eni_output: not enough room in buffer\n" );
648 #endif
649 eup->eu_pif.pif_oerrors++;
650 KB_FREEALL ( m );
651 crit_exit();
652 return;
653 }
654
655 /*
656 * Find out where current DMA pointers are at
657 */
658 dma_start = dma_wr = eup->eu_midway[MIDWAY_TX_WR];
659 dma_rd = eup->eu_midway[MIDWAY_TX_RD];
660
661 /*
662 * Figure out how much DMA room we have available
663 */
664 if ( dma_rd == dma_wr ) { /* Queue is empty */
665 dma_avail = DMA_LIST_SIZE;
666 } else {
667 dma_avail = ( dma_rd + DMA_LIST_SIZE - dma_wr )
668 & ( DMA_LIST_SIZE - 1 );
669 }
670 /*
671 * Check to see if we can describe this PDU or if we're:
672 * out of room, will wrap past recovered resources.
673 */
674 if ( dma_avail < (j / 2 + 4) ||
675 ( dma_wr + (j / 2 + 4) > eup->eu_txdmawr + DMA_LIST_SIZE ) ) {
676 /*
677 * No space to insert DMA list into queue. Drop this PDU.
678 */
679 eup->eu_stats.eni_st_drv.drv_xm_nodma++;
680 #ifdef DO_LOG
681 log ( LOG_ERR,
682 "eni_output: not enough room in DMA queue\n" );
683 #endif
684 eup->eu_pif.pif_oerrors++;
685 KB_FREEALL( m );
686 crit_exit();
687 return;
688 }
689
690 /*
691 * Create DMA descriptor for header. There is a descriptor word
692 * and also a cell header word which we'll set manually.
693 */
694 dma[0] = (((int)(eup->eu_txpos + 2) & (eup->eu_txsize-1)) <<
695 DMA_COUNT_SHIFT) | DMA_JK;
696 dma[1] = 0;
697
698 /*
699 * JK for AAL5 trailer. Set END bit as well.
700 */
701 if ( aal5 ) {
702 dma[j++] = (((int)(eup->eu_txpos+size) & (eup->eu_txsize-1)) <<
703 DMA_COUNT_SHIFT) | DMA_END_BIT | DMA_JK;
704 dma[j++] = 0;
705 } else {
706 dma[j-2] |= DMA_END_BIT; /* Backup and set END bit */
707 }
708
709 /*
710 * Find out where in adapter memory this TX buffer starts.
711 */
712 tx_send = (Eni_mem)
713 ((((int)eup->eu_midway[MIDWAY_TXPLACE] & 0x7ff) << ENI_LOC_PREDIV) +
714 (int)eup->eu_ram);
715
716 /*
717 * Set descriptor word
718 */
719 tx_send[eup->eu_txpos] =
720 (MIDWAY_UNQ_ID << 28) | (aal5 ? 1 << 27 : 0)
721 | (size / WORDS_PER_CELL);
722 /*
723 * Set cell header
724 */
725 tx_send[(eup->eu_txpos+1)&(eup->eu_txsize-1)] =
726 evp->ev_connvc->cvc_vcc->vc_vci << 4;
727
728 /*
729 * We've got all our resources, count the stats
730 */
731 if ( aal5 ) {
732 /*
733 * If this is an AAL5 PDU, we need to set the length
734 */
735 tx_send[(eup->eu_txpos+size-2) &
736 (eup->eu_txsize-1)] = pdulen;
737 /*
738 * Increment AAL5 stats
739 */
740 eup->eu_stats.eni_st_aal5.aal5_pdu_xmit++;
741 eup->eu_stats.eni_st_aal5.aal5_xmit += (size - 2) / WORDS_PER_CELL;
742 } else {
743 /*
744 * Increment AAL0 stats
745 */
746 eup->eu_stats.eni_st_aal0.aal0_xmit += (size - 2) / WORDS_PER_CELL;
747 }
748 /*
749 * Increment ATM stats
750 */
751 eup->eu_stats.eni_st_atm.atm_xmit += (size - 2) / WORDS_PER_CELL;
752
753 /*
754 * Store the DMA list
755 */
756 j = j >> 1;
757 for ( i = 0; i < j; i++ ) {
758 eup->eu_txdma[dma_wr*2] = dma[i*2];
759 eup->eu_txdma[dma_wr*2+1] = dma[i*2+1];
760 dma_wr = (dma_wr+1) & (DMA_LIST_SIZE-1);
761 }
762
763 /*
764 * Build drain buffer
765 *
766 * We toss four words in to help keep track of this
767 * PDU. The first is a pointer to the VC control block
768 * so we can find which VCI this went out on, the second
769 * is the start and stop pointers for the DMA list which
770 * describes this PDU, the third is the PDU length
771 * since we'll want to know that for stats gathering,
772 * and the fourth is the number of DMA words.
773 */
774 KB_DATASTART ( m, up, u_long * );
775 *up++ = (u_long)cvp;
776 *up++ = dma_start << 16 | dma_wr;
777 *up++ = pdulen;
778 *up = size;
779
780 /*
781 * Set length of our buffer
782 */
783 KB_LEN ( m ) = 4 * sizeof ( long );
784
785 /*
786 * Place buffers onto transmit queue for draining
787 */
788 IF_ENQUEUE ( &eup->eu_txqueue, m );
789
790 /*
791 * Update next word to be stored
792 */
793 eup->eu_txpos = ((eup->eu_txpos + size) & (eup->eu_txsize - 1));
794
795 /*
796 * Update MIDWAY_TX_WR pointer
797 */
798 eup->eu_midway[MIDWAY_TX_WR] = dma_wr;
799
800 crit_exit();
801 }
802
Cache object: 2b3c593ed1a5882b1da502e04e1da6d9
|