1 /*
2 *
3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
6 *
7 *
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
12 *
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
20 *
21 * Copyright 1994-1998 Network Computing Services, Inc.
22 *
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
25 *
26 * @(#) $FreeBSD$
27 *
28 */
29
30 /*
31 * Core ATM Services
32 * -----------------
33 *
34 * ATM device support functions
35 *
36 */
37
38 #include <netatm/kern_include.h>
39 #include <net/bpf.h>
40
41 #ifndef lint
42 __RCSID("@(#) $FreeBSD$");
43 #endif
44
45
46 /*
47 * Private structures for managing allocated kernel memory resources
48 *
49 * For each allocation of kernel memory, one Mem_ent will be used.
50 * The Mem_ent structures will be allocated in blocks inside of a
51 * Mem_blk structure.
52 */
53 #define MEM_NMEMENT 10 /* How many Mem_ent's in a Mem_blk */
54
55 struct mem_ent {
56 void *me_kaddr; /* Allocated memory address */
57 u_int me_ksize; /* Allocated memory length */
58 void *me_uaddr; /* Memory address returned to caller */
59 u_int me_flags; /* Flags (see below) */
60 };
61 typedef struct mem_ent Mem_ent;
62
63 /*
64 * Memory entry flags
65 */
66 #define MEF_NONCACHE 1 /* Memory is noncacheable */
67
68
69 struct mem_blk {
70 struct mem_blk *mb_next; /* Next block in chain */
71 Mem_ent mb_mement[MEM_NMEMENT]; /* Allocated memory entries */
72 };
73 typedef struct mem_blk Mem_blk;
74
75 static Mem_blk *atm_mem_head = NULL;
76
77 static struct t_atm_cause atm_dev_cause = {
78 T_ATM_ITU_CODING,
79 T_ATM_LOC_USER,
80 T_ATM_CAUSE_VPCI_VCI_ASSIGNMENT_FAILURE,
81 {0, 0, 0, 0}
82 };
83
84
85 /*
86 * ATM Device Stack Instantiation
87 *
88 * Called at splnet.
89 *
90 * Arguments
91 * ssp pointer to array of stack definition pointers
92 * for connection
93 * ssp[0] points to upper layer's stack definition
94 * ssp[1] points to this layer's stack definition
95 * ssp[2] points to lower layer's stack definition
96 * cvcp pointer to connection vcc for this stack
97 *
98 * Returns
99 * 0 instantiation successful
100 * err instantiation failed - reason indicated
101 *
102 */
103 int
104 atm_dev_inst(ssp, cvcp)
105 struct stack_defn **ssp;
106 Atm_connvc *cvcp;
107 {
108 Cmn_unit *cup = (Cmn_unit *)cvcp->cvc_attr.nif->nif_pif;
109 Cmn_vcc *cvp;
110 int err;
111
112 /*
113 * Check to see if device has been initialized
114 */
115 if ((cup->cu_flags & CUF_INITED) == 0)
116 return ( EIO );
117
118 /*
119 * Validate lower SAP
120 */
121 /*
122 * Device driver is the lowest layer - no need to validate
123 */
124
125 /*
126 * Validate PVC vpi.vci
127 */
128 if (cvcp->cvc_attr.called.addr.address_format == T_ATM_PVC_ADDR) {
129 /*
130 * Look through existing circuits - return error if found
131 */
132 Atm_addr_pvc *pp;
133
134 pp = (Atm_addr_pvc *)cvcp->cvc_attr.called.addr.address;
135 if (atm_dev_vcc_find(cup, ATM_PVC_GET_VPI(pp),
136 ATM_PVC_GET_VCI(pp), 0))
137 return ( EADDRINUSE );
138 }
139
140 /*
141 * Validate our SAP type
142 */
143 switch ((*(ssp+1))->sd_sap) {
144 case SAP_CPCS_AAL3_4:
145 case SAP_CPCS_AAL5:
146 case SAP_ATM:
147 break;
148 default:
149 return (EINVAL);
150 }
151
152 /*
153 * Allocate a VCC control block
154 */
155 if ( ( cvp = (Cmn_vcc *)atm_allocate(cup->cu_vcc_pool) ) == NULL )
156 return ( ENOMEM );
157
158 cvp->cv_state = CVS_INST;
159 cvp->cv_toku = (*ssp)->sd_toku;
160 cvp->cv_upper = (*ssp)->sd_upper;
161 cvp->cv_connvc = cvcp;
162
163 /*
164 * Let device have a look at the connection request
165 */
166 err = (*cup->cu_instvcc)(cup, cvp);
167 if (err) {
168 atm_free((caddr_t)cvp);
169 return (err);
170 }
171
172 /*
173 * Looks good so far, so link in device VCC
174 */
175 LINK2TAIL ( cvp, Cmn_vcc, cup->cu_vcc, cv_next );
176
177 /*
178 * Save my token
179 */
180 (*++ssp)->sd_toku = cvp;
181
182 /*
183 * Pass instantiation down the stack
184 */
185 /*
186 * No need - we're the lowest point.
187 */
188 /* err = (*(ssp + 1))->sd_inst(ssp, cvcp); */
189
190 /*
191 * Save the lower layer's interface info
192 */
193 /*
194 * No need - we're the lowest point
195 */
196 /* cvp->cv_lower = (*++ssp)->sd_lower; */
197 /* cvp->cv_tok1 = (*ssp)->sd_toku; */
198
199 return (0);
200 }
201
202
203 /*
204 * ATM Device Stack Command Handler
205 *
206 * Arguments
207 * cmd stack command code
208 * tok session token (Cmn_vcc)
209 * arg1 command specific argument
210 * arg2 command specific argument
211 *
212 * Returns
213 * none
214 *
215 */
216 /*ARGSUSED*/
217 void
218 atm_dev_lower(cmd, tok, arg1, arg2)
219 int cmd;
220 void *tok;
221 int arg1;
222 int arg2;
223 {
224 Cmn_vcc *cvp = (Cmn_vcc *)tok;
225 Atm_connvc *cvcp = cvp->cv_connvc;
226 Cmn_unit *cup = (Cmn_unit *)cvcp->cvc_attr.nif->nif_pif;
227 struct vccb *vcp;
228 u_int state;
229 int s;
230
231 switch ( cmd ) {
232
233 case CPCS_INIT:
234 /*
235 * Sanity check
236 */
237 if ( cvp->cv_state != CVS_INST ) {
238 log ( LOG_ERR,
239 "atm_dev_lower: INIT: tok=%p, state=%d\n",
240 tok, cvp->cv_state );
241 break;
242 }
243
244 vcp = cvp->cv_connvc->cvc_vcc;
245
246 /*
247 * Validate SVC vpi.vci
248 */
249 if ( vcp->vc_type & VCC_SVC ) {
250
251 if (atm_dev_vcc_find(cup, vcp->vc_vpi, vcp->vc_vci,
252 vcp->vc_type & (VCC_IN | VCC_OUT))
253 != cvp){
254 log ( LOG_ERR,
255 "atm_dev_lower: dup SVC (%d,%d) tok=%p\n",
256 vcp->vc_vpi, vcp->vc_vci, tok );
257 atm_cm_abort(cvp->cv_connvc, &atm_dev_cause);
258 break;
259 }
260 }
261
262 /*
263 * Tell the device to open the VCC
264 */
265 cvp->cv_state = CVS_INITED;
266 s = splimp();
267 if ((*cup->cu_openvcc)(cup, cvp)) {
268 atm_cm_abort(cvp->cv_connvc, &atm_dev_cause);
269 (void) splx(s);
270 break;
271 }
272 (void) splx(s);
273 break;
274
275 case CPCS_TERM: {
276 KBuffer *m, *prev, *next;
277 int *ip;
278
279 s = splimp();
280
281 /*
282 * Disconnect the VCC - ignore return code
283 */
284 if ((cvp->cv_state == CVS_INITED) ||
285 (cvp->cv_state == CVS_ACTIVE)) {
286 (void) (*cup->cu_closevcc)(cup, cvp);
287 }
288 cvp->cv_state = CVS_TERM;
289
290 /*
291 * Remove from interface list
292 */
293 UNLINK ( cvp, Cmn_vcc, cup->cu_vcc, cv_next );
294
295 /*
296 * Free any buffers from this VCC on the ATM interrupt queue
297 */
298 prev = NULL;
299 for (m = atm_intrq.ifq_head; m; m = next) {
300 next = KB_QNEXT(m);
301
302 /*
303 * See if this entry is for the terminating VCC
304 */
305 KB_DATASTART(m, ip, int *);
306 ip++;
307 if (*ip == (int)cvp) {
308 /*
309 * Yep, so dequeue the entry
310 */
311 if (prev == NULL)
312 atm_intrq.ifq_head = next;
313 else
314 KB_QNEXT(prev) = next;
315
316 if (next == NULL)
317 atm_intrq.ifq_tail = prev;
318
319 atm_intrq.ifq_len--;
320
321 /*
322 * Free the unwanted buffers
323 */
324 KB_FREEALL(m);
325 } else {
326 prev = m;
327 }
328 }
329 (void) splx(s);
330
331 /*
332 * Free VCC resources
333 */
334 (void) atm_free((caddr_t)cvp);
335 break;
336 }
337
338 case CPCS_UNITDATA_INV:
339
340 /*
341 * Sanity check
342 *
343 * Use temp state variable since we dont want to lock out
344 * interrupts, but initial VC activation interrupt may
345 * happen here, changing state somewhere in the middle.
346 */
347 state = cvp->cv_state;
348 if ((state != CVS_ACTIVE) &&
349 (state != CVS_INITED)) {
350 log ( LOG_ERR,
351 "atm_dev_lower: UNITDATA: tok=%p, state=%d\n",
352 tok, state );
353 KB_FREEALL((KBuffer *)arg1);
354 break;
355 }
356
357 /*
358 * Send the packet to the interface's bpf if this vc has one.
359 */
360 if (cvcp->cvc_vcc != NULL && cvcp->cvc_vcc->vc_nif != NULL) {
361 struct ifnet *ifp =
362 (struct ifnet *)cvcp->cvc_vcc->vc_nif;
363
364 if (ifp->if_bpf)
365 bpf_mtap(ifp, (KBuffer *)arg1);
366 }
367
368 /*
369 * Hand the data off to the device
370 */
371 (*cup->cu_output)(cup, cvp, (KBuffer *)arg1);
372
373 break;
374
375 case CPCS_UABORT_INV:
376 log ( LOG_ERR,
377 "atm_dev_lower: unimplemented stack cmd 0x%x, tok=%p\n",
378 cmd, tok );
379 break;
380
381 default:
382 log ( LOG_ERR,
383 "atm_dev_lower: unknown stack cmd 0x%x, tok=%p\n",
384 cmd, tok );
385
386 }
387
388 return;
389 }
390
391
392
393 /*
394 * Allocate kernel memory block
395 *
396 * This function will allocate a kernel memory block of the type specified
397 * in the flags parameter. The returned address will point to a memory
398 * block of the requested size and alignment. The memory block will also
399 * be zeroed. The alloc/free functions will manage/mask both the OS-specific
400 * kernel memory management requirements and the bookkeeping required to
401 * deal with data alignment issues.
402 *
403 * This function should not be called from interrupt level.
404 *
405 * Arguments:
406 * size size of memory block to allocate
407 * align data alignment requirement
408 * flags allocation flags (ATM_DEV_*)
409 *
410 * Returns:
411 * uaddr pointer to aligned memory block
412 * NULL unable to allocate memory
413 *
414 */
415 void *
416 atm_dev_alloc(size, align, flags)
417 u_int size;
418 u_int align;
419 u_int flags;
420 {
421 Mem_blk *mbp;
422 Mem_ent *mep;
423 u_int kalign, ksize;
424 int s, i;
425
426 s = splimp();
427
428 /*
429 * Find a free Mem_ent
430 */
431 mep = NULL;
432 for (mbp = atm_mem_head; mbp && mep == NULL; mbp = mbp->mb_next) {
433 for (i = 0; i < MEM_NMEMENT; i++) {
434 if (mbp->mb_mement[i].me_uaddr == NULL) {
435 mep = &mbp->mb_mement[i];
436 break;
437 }
438 }
439 }
440
441 /*
442 * If there are no free Mem_ent's, then allocate a new Mem_blk
443 * and link it into the chain
444 */
445 if (mep == NULL) {
446 mbp = (Mem_blk *) KM_ALLOC(sizeof(Mem_blk), M_DEVBUF, M_NOWAIT);
447 if (mbp == NULL) {
448 log(LOG_ERR, "atm_dev_alloc: Mem_blk failure\n");
449 (void) splx(s);
450 return (NULL);
451 }
452 KM_ZERO(mbp, sizeof(Mem_blk));
453
454 mbp->mb_next = atm_mem_head;
455 atm_mem_head = mbp;
456 mep = mbp->mb_mement;
457 }
458
459 /*
460 * Now we need to get the kernel's allocation alignment minimum
461 *
462 * This is obviously very OS-specific stuff
463 */
464 #ifdef sun
465 if (flags & ATM_DEV_NONCACHE) {
466 /* Byte-aligned */
467 kalign = sizeof(long);
468 } else {
469 /* Doubleword-aligned */
470 kalign = sizeof(double);
471 }
472 #elif (defined(BSD) && (BSD >= 199103))
473 kalign = MINALLOCSIZE;
474 #else
475 #error Unsupported/unconfigured OS
476 #endif
477
478 /*
479 * Figure out how much memory we must allocate to satify the
480 * user's size and alignment needs
481 */
482 if (align <= kalign)
483 ksize = size;
484 else
485 ksize = size + align - kalign;
486
487 /*
488 * Finally, go get the memory
489 */
490 if (flags & ATM_DEV_NONCACHE) {
491 #ifdef sun
492 mep->me_kaddr = IOPBALLOC(ksize);
493 #elif defined(__i386__)
494 mep->me_kaddr = KM_ALLOC(ksize, M_DEVBUF, M_NOWAIT);
495 #else
496 #error Unsupported/unconfigured OS
497 #endif
498 } else {
499 mep->me_kaddr = KM_ALLOC(ksize, M_DEVBUF, M_NOWAIT);
500 }
501
502 if (mep->me_kaddr == NULL) {
503 log(LOG_ERR, "atm_dev_alloc: %skernel memory unavailable\n",
504 (flags & ATM_DEV_NONCACHE) ? "non-cacheable " : "");
505 (void) splx(s);
506 return (NULL);
507 }
508
509 /*
510 * Calculate correct alignment address to pass back to user
511 */
512 mep->me_uaddr = (void *) roundup((u_int)mep->me_kaddr, align);
513 mep->me_ksize = ksize;
514 mep->me_flags = flags;
515
516 /*
517 * Clear memory for user
518 */
519 KM_ZERO(mep->me_uaddr, size);
520
521 ATM_DEBUG4("atm_dev_alloc: size=%d, align=%d, flags=%d, uaddr=%p\n",
522 size, align, flags, mep->me_uaddr);
523
524 (void) splx(s);
525
526 return (mep->me_uaddr);
527 }
528
529
530 /*
531 * Free kernel memory block
532 *
533 * This function will free a kernel memory block previously allocated by
534 * the atm_dev_alloc function.
535 *
536 * This function should not be called from interrupt level.
537 *
538 * Arguments:
539 * uaddr pointer to allocated aligned memory block
540 *
541 * Returns:
542 * none
543 *
544 */
545 void
546 atm_dev_free(uaddr)
547 void *uaddr;
548 {
549 Mem_blk *mbp;
550 Mem_ent *mep;
551 int s, i;
552
553 ATM_DEBUG1("atm_dev_free: uaddr=%p\n", uaddr);
554
555 s = splimp();
556
557 /*
558 * Protect ourselves...
559 */
560 if (uaddr == NULL)
561 panic("atm_dev_free: trying to free null address");
562
563 /*
564 * Find our associated entry
565 */
566 mep = NULL;
567 for (mbp = atm_mem_head; mbp && mep == NULL; mbp = mbp->mb_next) {
568 for (i = 0; i < MEM_NMEMENT; i++) {
569 if (mbp->mb_mement[i].me_uaddr == uaddr) {
570 mep = &mbp->mb_mement[i];
571 break;
572 }
573 }
574 }
575
576 /*
577 * If we didn't find our entry, then unceremoniously let the caller
578 * know they screwed up (it certainly couldn't be a bug here...)
579 */
580 if (mep == NULL)
581 panic("atm_dev_free: trying to free unknown address");
582
583 /*
584 * Give the memory space back to the kernel
585 */
586 if (mep->me_flags & ATM_DEV_NONCACHE) {
587 #ifdef sun
588 IOPBFREE(mep->me_kaddr, mep->me_ksize);
589 #elif defined(__i386__)
590 KM_FREE(mep->me_kaddr, mep->me_ksize, M_DEVBUF);
591 #else
592 #error Unsupported/unconfigured OS
593 #endif
594 } else {
595 KM_FREE(mep->me_kaddr, mep->me_ksize, M_DEVBUF);
596 }
597
598 /*
599 * Free our entry
600 */
601 mep->me_uaddr = NULL;
602
603 (void) splx(s);
604
605 return;
606 }
607
608
609 #ifdef sun4m
610
611 typedef int (*func_t)();
612
613 /*
614 * Map an address into DVMA space
615 *
616 * This function will take a kernel virtual address and map it to
617 * a DMA virtual address which can be used during SBus DMA cycles.
618 *
619 * Arguments:
620 * addr kernel virtual address
621 * len length of DVMA space requested
622 * flags allocation flags (ATM_DEV_*)
623 *
624 * Returns:
625 * a DVMA address
626 * NULL unable to map into DMA space
627 *
628 */
629 void *
630 atm_dma_map(addr, len, flags)
631 caddr_t addr;
632 int len;
633 int flags;
634 {
635 if (flags & ATM_DEV_NONCACHE)
636 /*
637 * Non-cacheable memory is already DMA'able
638 */
639 return ((void *)addr);
640 else
641 return ((void *)mb_nbmapalloc(bigsbusmap, addr, len,
642 MDR_BIGSBUS|MB_CANTWAIT, (func_t)NULL, (caddr_t)NULL));
643 }
644
645
646 /*
647 * Free a DVMA map address
648 *
649 * This function will free DVMA map resources (addresses) previously
650 * allocated with atm_dma_map().
651 *
652 * Arguments:
653 * addr DMA virtual address
654 * flags allocation flags (ATM_DEV_*)
655 *
656 * Returns:
657 * none
658 *
659 */
660 void
661 atm_dma_free(addr, flags)
662 caddr_t addr;
663 int flags;
664 {
665 if ((flags & ATM_DEV_NONCACHE) == 0)
666 mb_mapfree(bigsbusmap, (int)&addr);
667
668 return;
669 }
670 #endif /* sun4m */
671
672
673 /*
674 * Compress buffer chain
675 *
676 * This function will compress a supplied buffer chain into a minimum number
677 * of kernel buffers. Typically, this function will be used because the
678 * number of buffers in an output buffer chain is too large for a device's
679 * DMA capabilities. This should only be called as a last resort, since
680 * all the data copying will surely kill any hopes of decent performance.
681 *
682 * Arguments:
683 * m pointer to source buffer chain
684 *
685 * Returns:
686 * n pointer to compressed buffer chain
687 *
688 */
689 KBuffer *
690 atm_dev_compress(m)
691 KBuffer *m;
692 {
693 KBuffer *n, *n0, **np;
694 int len, space;
695 caddr_t src, dst;
696
697 n = n0 = NULL;
698 np = &n0;
699 dst = NULL;
700 space = 0;
701
702 /*
703 * Copy each source buffer into compressed chain
704 */
705 while (m) {
706
707 if (space == 0) {
708
709 /*
710 * Allocate another buffer for compressed chain
711 */
712 KB_ALLOCEXT(n, ATM_DEV_CMPR_LG, KB_F_NOWAIT, KB_T_DATA);
713 if (n) {
714 space = ATM_DEV_CMPR_LG;
715 } else {
716 KB_ALLOC(n, ATM_DEV_CMPR_SM, KB_F_NOWAIT,
717 KB_T_DATA);
718 if (n) {
719 space = ATM_DEV_CMPR_SM;
720 } else {
721 /*
722 * Unable to get any new buffers, so
723 * just return the partially compressed
724 * chain and hope...
725 */
726 *np = m;
727 break;
728 }
729 }
730
731 KB_HEADSET(n, 0);
732 KB_LEN(n) = 0;
733 KB_BFRSTART(n, dst, caddr_t);
734
735 *np = n;
736 np = &KB_NEXT(n);
737 }
738
739 /*
740 * Copy what we can from source buffer
741 */
742 len = MIN(space, KB_LEN(m));
743 KB_DATASTART(m, src, caddr_t);
744 KM_COPY(src, dst, len);
745
746 /*
747 * Adjust for copied data
748 */
749 dst += len;
750 space -= len;
751
752 KB_HEADADJ(m, -len);
753 KB_TAILADJ(n, len);
754
755 /*
756 * If we've exhausted our current source buffer, free it
757 * and move to the next one
758 */
759 if (KB_LEN(m) == 0) {
760 KB_FREEONE(m, m);
761 }
762 }
763
764 return (n0);
765 }
766
767
768 /*
769 * Locate VCC entry
770 *
771 * This function will return the VCC entry for a specified interface and
772 * VPI/VCI value.
773 *
774 * Arguments:
775 * cup pointer to interface unit structure
776 * vpi VPI value
777 * vci VCI value
778 * type VCC type
779 *
780 * Returns:
781 * vcp pointer to located VCC entry matching
782 * NULL no VCC found
783 *
784 */
785 Cmn_vcc *
786 atm_dev_vcc_find(cup, vpi, vci, type)
787 Cmn_unit *cup;
788 u_int vpi;
789 u_int vci;
790 u_int type;
791 {
792 Cmn_vcc *cvp;
793 int s = splnet();
794
795 /*
796 * Go find VCC
797 *
798 * (Probably should stick in a hash table some time)
799 */
800 for (cvp = cup->cu_vcc; cvp; cvp = cvp->cv_next) {
801 struct vccb *vcp;
802
803 vcp = cvp->cv_connvc->cvc_vcc;
804 if ((vcp->vc_vci == vci) && (vcp->vc_vpi == vpi) &&
805 ((vcp->vc_type & type) == type))
806 break;
807 }
808
809 (void) splx(s);
810 return (cvp);
811 }
812
813
814 #ifdef notdef
815 /*
816 * Module unloading notification
817 *
818 * This function must be called just prior to unloading the module from
819 * memory. All allocated memory will be freed here and anything else that
820 * needs cleaning up.
821 *
822 * Arguments:
823 * none
824 *
825 * Returns:
826 * none
827 *
828 */
829 void
830 atm_unload()
831 {
832 Mem_blk *mbp;
833 Mem_ent *mep;
834 int s, i;
835
836 s = splimp();
837
838 /*
839 * Free up all of our memory management storage
840 */
841 while (mbp = atm_mem_head) {
842
843 /*
844 * Make sure users have freed up all of their memory
845 */
846 for (i = 0; i < MEM_NMEMENT; i++) {
847 if (mbp->mb_mement[i].me_uaddr != NULL) {
848 panic("atm_unload: unfreed memory");
849 }
850 }
851
852 atm_mem_head = mbp->mb_next;
853
854 /*
855 * Hand this block back to the kernel
856 */
857 KM_FREE((caddr_t) mbp, sizeof(Mem_blk), M_DEVBUF);
858 }
859
860 (void) splx(s);
861
862 return;
863 }
864 #endif /* notdef */
865
866
867 /*
868 * Print a PDU
869 *
870 * Arguments:
871 * cup pointer to device unit
872 * cvp pointer to VCC control block
873 * m pointer to pdu buffer chain
874 * msg pointer to message string
875 *
876 * Returns:
877 * none
878 *
879 */
880 void
881 atm_dev_pdu_print(const Cmn_unit *cup, const Cmn_vcc *cvp,
882 const KBuffer *m, const char *msg)
883 {
884 char buf[128];
885
886 snprintf(buf, sizeof(buf), "%s vcc=(%d,%d)", msg,
887 cvp->cv_connvc->cvc_vcc->vc_vpi,
888 cvp->cv_connvc->cvc_vcc->vc_vci);
889
890 atm_pdu_print(m, buf);
891 }
Cache object: 6674a6f9b4b288a321a0caab512b8fc4
|