FreeBSD/Linux Kernel Cross Reference
sys/mips/nlm/hal/fmn.c
1 /*-
2 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
3 * reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * NETLOGIC_BSD */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
33 #include <sys/systm.h>
34
35 #include <machine/cpufunc.h>
36 #include <mips/nlm/hal/mips-extns.h>
37 #include <mips/nlm/hal/haldefs.h>
38 #include <mips/nlm/hal/iomap.h>
39 #include <mips/nlm/hal/fmn.h>
40
41 /* XLP can take upto 16K of FMN messages per hardware queue, as spill.
42 * But, configuring all 16K causes the total spill memory to required
43 * to blow upto 192MB for single chip configuration, and 768MB in four
44 * chip configuration. Hence for now, we will setup the per queue spill
45 * as 1K FMN messages. With this, the total spill memory needed for 1024
46 * hardware queues (with 12bytes per single entry FMN message) becomes
47 * (1*1024)*12*1024queues = 12MB. For the four chip config, the memory
48 * needed = 12 * 4 = 48MB.
49 */
50 uint64_t nlm_cms_spill_total_messages = 1 * 1024;
51
52 /* On a XLP832, we have the following FMN stations:
53 * CPU stations: 8
54 * PCIE0 stations: 1
55 * PCIE1 stations: 1
56 * PCIE2 stations: 1
57 * PCIE3 stations: 1
58 * GDX stations: 1
59 * CRYPTO stations: 1
60 * RSA stations: 1
61 * CMP stations: 1
62 * POE stations: 1
63 * NAE stations: 1
64 * ==================
65 * Total : 18 stations per chip
66 *
67 * For all 4 nodes, there are 18*4 = 72 FMN stations
68 */
69 uint32_t nlm_cms_total_stations = 18 * 4 /*xlp_num_nodes*/;
70 uint32_t cms_onchip_seg_availability[CMS_ON_CHIP_PER_QUEUE_SPACE];
71
72 /**
73 * Takes inputs as node, queue_size and maximum number of queues.
74 * Calculates the base, start & end and returns the same for a
75 * defined qid.
76 *
77 * The output queues are maintained in the internal output buffer
78 * which is a on-chip SRAM structure. For the actial hardware
79 * internal implementation, It is a structure which consists
80 * of eight banks of 4096-entry x message-width SRAMs. The SRAM
81 * implementation is designed to run at 1GHz with a 1-cycle read/write
82 * access. A read/write transaction can be initiated for each bank
83 * every cycle for a total of eight accesses per cycle. Successive
84 * entries of the same output queue are placed in successive banks.
85 * This is done to spread different read & write accesses to same/different
86 * output queue over as many different banks as possible so that they
87 * can be scheduled concurrently. Spreading the accesses to as many banks
88 * as possible to maximize the concurrency internally is important for
89 * achieving the desired peak throughput. This is done by h/w implementation
90 * itself.
91 *
92 * Output queues are allocated from this internal output buffer by
93 * software. The total capacity of the output buffer is 32K-entry.
94 * Each output queue can be sized from 32-entry to 1024-entry in
95 * increments of 32-entry. This is done by specifying a Start & a
96 * End pointer: pointers to the first & last 32-entry chunks allocated
97 * to the output queue.
98 *
99 * To optimize the storage required for 1024 OQ pointers, the upper 5-bits
100 * are shared by the Start & the End pointer. The side-effect of this
101 * optimization is that an OQ can't cross a 1024-entry boundary. Also, the
102 * lower 5-bits don't need to be specified in the Start & the End pointer
103 * as the allocation is in increments of 32-entries.
104 *
105 * Queue occupancy is tracked by a Head & a Tail pointer. Tail pointer
106 * indicates the location to which next entry will be written & Head
107 * pointer indicates the location from which next entry will be read. When
108 * these pointers reach the top of the allocated space (indicated by the
109 * End pointer), they are reset to the bottom of the allocated space
110 * (indicated by the Start pointer).
111 *
112 * Output queue pointer information:
113 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
114 *
115 * 14 10 9 5 4 0
116 * ------------------
117 * | base ptr |
118 * ------------------
119 * ----------------
120 * | start ptr |
121 * ----------------
122 * ----------------
123 * | end ptr |
124 * ----------------
125 * ------------------------------------
126 * | head ptr |
127 * ------------------------------------
128 * ------------------------------------
129 * | tail ptr |
130 * ------------------------------------
131 * Note:
132 * A total of 1024 segments can sit on one software-visible "bank"
133 * of internal SRAM. Each segment contains 32 entries. Also note
134 * that sw-visible "banks" are not the same as the actual internal
135 * 8-bank implementation of hardware. It is an optimization of
136 * internal access.
137 *
138 */
139
140 void nlm_cms_setup_credits(uint64_t base, int destid, int srcid, int credit)
141 {
142 uint32_t val;
143
144 val = ((credit << 24) | (destid << 12) | (srcid << 0));
145 nlm_write_cms_reg(base, CMS_OUTPUTQ_CREDIT_CFG, val);
146
147 }
148
149 int nlm_cms_config_onchip_queue (uint64_t base, uint64_t spill_base,
150 int qid, int spill_en)
151 {
152
153 /* Configure 32 as onchip queue depth */
154 nlm_cms_alloc_onchip_q(base, qid, 1);
155
156 /* Spill configuration */
157 if (spill_en) {
158 /* Configure 4*4KB = 16K as spill size */
159 nlm_cms_alloc_spill_q(base, qid, spill_base, 4);
160 }
161
162 #if 0
163 /* configure credits for src cpu0, on this queue */
164 nlm_cms_setup_credits(base, qid, CMS_CPU0_SRC_STID,
165 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
166 nlm_cms_spill_total_messages));
167
168 /* configure credits for src cpu1, on this queue */
169 nlm_cms_setup_credits(base, qid, CMS_CPU1_SRC_STID,
170 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
171 nlm_cms_spill_total_messages));
172
173 /* configure credits for src cpu2, on this queue */
174 nlm_cms_setup_credits(base, qid, CMS_CPU2_SRC_STID,
175 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
176 nlm_cms_spill_total_messages));
177
178 /* configure credits for src cpu3, on this queue */
179 nlm_cms_setup_credits(base, qid, CMS_CPU3_SRC_STID,
180 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
181 nlm_cms_spill_total_messages));
182
183 /* configure credits for src cpu4, on this queue */
184 nlm_cms_setup_credits(base, qid, CMS_CPU4_SRC_STID,
185 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
186 nlm_cms_spill_total_messages));
187
188 /* configure credits for src cpu5, on this queue */
189 nlm_cms_setup_credits(base, qid, CMS_CPU5_SRC_STID,
190 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
191 nlm_cms_spill_total_messages));
192
193 /* configure credits for src cpu6, on this queue */
194 nlm_cms_setup_credits(base, qid, CMS_CPU6_SRC_STID,
195 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
196 nlm_cms_spill_total_messages));
197
198 /* configure credits for src cpu7, on this queue */
199 nlm_cms_setup_credits(base, qid, CMS_CPU7_SRC_STID,
200 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
201 nlm_cms_spill_total_messages));
202
203 /* configure credits for src pcie0, on this queue */
204 nlm_cms_setup_credits(base, qid, CMS_PCIE0_SRC_STID,
205 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
206 nlm_cms_spill_total_messages));
207
208 /* configure credits for src pcie1, on this queue */
209 nlm_cms_setup_credits(base, qid, CMS_PCIE1_SRC_STID,
210 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
211 nlm_cms_spill_total_messages));
212
213 /* configure credits for src pcie2, on this queue */
214 nlm_cms_setup_credits(base, qid, CMS_PCIE2_SRC_STID,
215 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
216 nlm_cms_spill_total_messages));
217
218 /* configure credits for src pcie3, on this queue */
219 nlm_cms_setup_credits(base, qid, CMS_PCIE3_SRC_STID,
220 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
221 nlm_cms_spill_total_messages));
222
223 /* configure credits for src dte, on this queue */
224 nlm_cms_setup_credits(base, qid, CMS_DTE_SRC_STID,
225 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
226 nlm_cms_spill_total_messages));
227
228 /* configure credits for src rsa_ecc, on this queue */
229 nlm_cms_setup_credits(base, qid, CMS_RSA_ECC_SRC_STID,
230 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
231 nlm_cms_spill_total_messages));
232
233 /* configure credits for src crypto, on this queue */
234 nlm_cms_setup_credits(base, qid, CMS_CRYPTO_SRC_STID,
235 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
236 nlm_cms_spill_total_messages));
237
238 /* configure credits for src cmp, on this queue */
239 nlm_cms_setup_credits(base, qid, CMS_CMP_SRC_STID,
240 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
241 nlm_cms_spill_total_messages));
242
243 /* configure credits for src poe, on this queue */
244 nlm_cms_setup_credits(base, qid, CMS_POE_SRC_STID,
245 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
246 nlm_cms_spill_total_messages));
247
248 /* configure credits for src nae, on this queue */
249 nlm_cms_setup_credits(base, qid, CMS_NAE_SRC_STID,
250 CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
251 nlm_cms_spill_total_messages));
252 #endif
253
254 return 0;
255 }
256
257 /*
258 * base - CMS module base address for this node.
259 * qid - is the output queue id otherwise called as vc id
260 * spill_base - is the 40-bit physical address of spill memory. Must be
261 4KB aligned.
262 * nsegs - No of segments where a "1" indicates 4KB. Spill size must be
263 * a multiple of 4KB.
264 */
265 int nlm_cms_alloc_spill_q(uint64_t base, int qid, uint64_t spill_base,
266 int nsegs)
267 {
268 uint64_t queue_config;
269 uint32_t spill_start;
270
271 if(nsegs > CMS_MAX_SPILL_SEGMENTS_PER_QUEUE) {
272 return 1;
273 }
274
275 queue_config = nlm_read_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)));
276
277 spill_start = ((spill_base >> 12) & 0x3F);
278 /* Spill configuration */
279 queue_config = (((uint64_t)CMS_SPILL_ENA << 62) |
280 (((spill_base >> 18) & 0x3FFFFF) << 27) |
281 (spill_start + nsegs - 1) << 21 |
282 (spill_start << 15));
283
284 nlm_write_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)),queue_config);
285
286 return 0;
287 }
288
289 /*
290 * base - CMS module base address for this node.
291 * qid - is the output queue id otherwise called as vc id
292 * nsegs - No of segments where a "1" indicates 32 credits. On chip
293 * credits must be a multiple of 32.
294 */
295 int nlm_cms_alloc_onchip_q(uint64_t base, int qid, int nsegs)
296 {
297 static uint32_t curr_end = 0;
298 uint64_t queue_config;
299 int onchipbase, start, last;
300 uint8_t i;
301
302 if( ((curr_end + nsegs) > CMS_MAX_ONCHIP_SEGMENTS) ||
303 (nsegs > CMS_ON_CHIP_PER_QUEUE_SPACE) ) {
304 /* Invalid configuration */
305 return 1;
306 }
307 if(((curr_end % 32) + nsegs - 1) <= 31) {
308 onchipbase = (curr_end / 32);
309 start = (curr_end % 32);
310 curr_end += nsegs;
311 } else {
312 onchipbase = (curr_end / 32) + 1;
313 start = 0;
314 curr_end = ((onchipbase * 32) + nsegs);
315 }
316 last = start + nsegs - 1;
317
318 for(i = start;i <= last;i++) {
319 if(cms_onchip_seg_availability[onchipbase] & (1 << i)) {
320 /* Conflict!!! segment is already allocated */
321 return 1;
322 }
323 }
324 /* Update the availability bitmap as consumed */
325 for(i = start; i <= last; i++) {
326 cms_onchip_seg_availability[onchipbase] |= (1 << i);
327 }
328
329 queue_config = nlm_read_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)));
330
331 /* On chip configuration */
332 queue_config = (((uint64_t)CMS_QUEUE_ENA << 63) |
333 ((onchipbase & 0x1f) << 10) |
334 ((last & 0x1f) << 5) |
335 (start & 0x1f));
336
337 nlm_write_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)),queue_config);
338
339 return 0;
340 }
341
342 void nlm_cms_default_setup(int node, uint64_t spill_base, int spill_en,
343 int popq_en)
344 {
345 int j, k, vc;
346 int queue;
347 uint64_t base;
348
349 base = nlm_get_cms_regbase(node);
350 for(j=0; j<1024; j++) {
351 printf("Qid:0x%04d Val:0x%016jx\n",j,
352 (uintmax_t)nlm_cms_get_onchip_queue (base, j));
353 }
354 /* Enable all cpu push queues */
355 for (j=0; j<XLP_MAX_CORES; j++)
356 for (k=0; k<XLP_MAX_THREADS; k++)
357 for (vc=0; vc<CMS_MAX_VCPU_VC; vc++) {
358 /* TODO : remove this once SMP works */
359 if( (j == 0) && (k == 0) )
360 continue;
361 queue = CMS_CPU_PUSHQ(node, j, k, vc);
362 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
363 }
364
365 /* Enable pcie 0 push queue */
366 for (j=CMS_PCIE0_QID(0); j<CMS_PCIE0_MAXQID; j++) {
367 queue = CMS_IO_PUSHQ(node, j);
368 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
369 }
370
371 /* Enable pcie 1 push queue */
372 for (j=CMS_PCIE1_QID(0); j<CMS_PCIE1_MAXQID; j++) {
373 queue = CMS_IO_PUSHQ(node, j);
374 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
375 }
376
377 /* Enable pcie 2 push queue */
378 for (j=CMS_PCIE2_QID(0); j<CMS_PCIE2_MAXQID; j++) {
379 queue = CMS_IO_PUSHQ(node, j);
380 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
381 }
382
383 /* Enable pcie 3 push queue */
384 for (j=CMS_PCIE3_QID(0); j<CMS_PCIE3_MAXQID; j++) {
385 queue = CMS_IO_PUSHQ(node, j);
386 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
387 }
388
389 /* Enable DTE push queue */
390 for (j=CMS_DTE_QID(0); j<CMS_DTE_MAXQID; j++) {
391 queue = CMS_IO_PUSHQ(node, j);
392 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
393 }
394
395 /* Enable RSA/ECC push queue */
396 for (j=CMS_RSA_ECC_QID(0); j<CMS_RSA_ECC_MAXQID; j++) {
397 queue = CMS_IO_PUSHQ(node, j);
398 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
399 }
400
401 /* Enable crypto push queue */
402 for (j=CMS_CRYPTO_QID(0); j<CMS_CRYPTO_MAXQID; j++) {
403 queue = CMS_IO_PUSHQ(node, j);
404 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
405 }
406
407 /* Enable CMP push queue */
408 for (j=CMS_CMP_QID(0); j<CMS_CMP_MAXQID; j++) {
409 queue = CMS_IO_PUSHQ(node, j);
410 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
411 }
412
413 /* Enable POE push queue */
414 for (j=CMS_POE_QID(0); j<CMS_POE_MAXQID; j++) {
415 queue = CMS_IO_PUSHQ(node, j);
416 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
417 }
418
419 /* Enable NAE push queue */
420 for (j=CMS_NAE_QID(0); j<CMS_NAE_MAXQID; j++) {
421 queue = CMS_IO_PUSHQ(node, j);
422 nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
423 }
424
425 /* Enable all pop queues */
426 if (popq_en) {
427 for (j=CMS_POPQ_QID(0); j<CMS_POPQ_MAXQID; j++) {
428 queue = CMS_POPQ(node, j);
429 nlm_cms_config_onchip_queue(base, spill_base, queue,
430 spill_en);
431 }
432 }
433 }
434
435 uint64_t nlm_cms_get_onchip_queue (uint64_t base, int qid)
436 {
437 return nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
438 }
439
440 void nlm_cms_set_onchip_queue (uint64_t base, int qid, uint64_t val)
441 {
442 uint64_t rdval;
443
444 rdval = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
445 rdval |= val;
446 nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), rdval);
447 }
448
449 void nlm_cms_per_queue_level_intr(uint64_t base, int qid, int sub_type,
450 int intr_val)
451 {
452 uint64_t val;
453
454 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
455
456 val |= (((uint64_t)sub_type<<54) |
457 ((uint64_t)intr_val<<56));
458
459 nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val);
460 }
461
462 void nlm_cms_level_intr(int node, int sub_type, int intr_val)
463 {
464 int j, k, vc;
465 int queue;
466 uint64_t base;
467
468 base = nlm_get_cms_regbase(node);
469 /* setup level intr config on all cpu push queues */
470 for (j=0; j<XLP_MAX_CORES; j++)
471 for (k=0; k<XLP_MAX_THREADS; k++)
472 for (vc=0; vc<CMS_MAX_VCPU_VC; vc++) {
473 queue = CMS_CPU_PUSHQ(node, j, k, vc);
474 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
475 }
476
477 /* setup level intr config on all pcie 0 push queue */
478 for (j=CMS_PCIE0_QID(0); j<CMS_PCIE0_MAXQID; j++) {
479 queue = CMS_IO_PUSHQ(node, j);
480 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
481 }
482
483 /* setup level intr config on all pcie 1 push queue */
484 for (j=CMS_PCIE1_QID(0); j<CMS_PCIE1_MAXQID; j++) {
485 queue = CMS_IO_PUSHQ(node, j);
486 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
487 }
488
489 /* setup level intr config on all pcie 2 push queue */
490 for (j=CMS_PCIE2_QID(0); j<CMS_PCIE2_MAXQID; j++) {
491 queue = CMS_IO_PUSHQ(node, j);
492 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
493 }
494
495 /* setup level intr config on all pcie 3 push queue */
496 for (j=CMS_PCIE3_QID(0); j<CMS_PCIE3_MAXQID; j++) {
497 queue = CMS_IO_PUSHQ(node, j);
498 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
499 }
500
501 /* setup level intr config on all DTE push queue */
502 for (j=CMS_DTE_QID(0); j<CMS_DTE_MAXQID; j++) {
503 queue = CMS_IO_PUSHQ(node, j);
504 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
505 }
506
507 /* setup level intr config on all RSA/ECC push queue */
508 for (j=CMS_RSA_ECC_QID(0); j<CMS_RSA_ECC_MAXQID; j++) {
509 queue = CMS_IO_PUSHQ(node, j);
510 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
511 }
512
513 /* setup level intr config on all crypto push queue */
514 for (j=CMS_CRYPTO_QID(0); j<CMS_CRYPTO_MAXQID; j++) {
515 queue = CMS_IO_PUSHQ(node, j);
516 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
517 }
518
519 /* setup level intr config on all CMP push queue */
520 for (j=CMS_CMP_QID(0); j<CMS_CMP_MAXQID; j++) {
521 queue = CMS_IO_PUSHQ(node, j);
522 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
523 }
524
525 /* setup level intr config on all POE push queue */
526 for (j=CMS_POE_QID(0); j<CMS_POE_MAXQID; j++) {
527 queue = CMS_IO_PUSHQ(node, j);
528 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
529 }
530
531 /* setup level intr config on all NAE push queue */
532 for (j=CMS_NAE_QID(0); j<CMS_NAE_MAXQID; j++) {
533 queue = CMS_IO_PUSHQ(node, j);
534 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
535 }
536
537 /* setup level intr config on all pop queues */
538 for (j=CMS_POPQ_QID(0); j<CMS_POPQ_MAXQID; j++) {
539 queue = CMS_POPQ(node, j);
540 nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
541 }
542 }
543
544 void nlm_cms_per_queue_timer_intr(uint64_t base, int qid, int sub_type,
545 int intr_val)
546 {
547 uint64_t val;
548
549 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
550
551 val |= (((uint64_t)sub_type<<49) |
552 ((uint64_t)intr_val<<51));
553
554 nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val);
555 }
556
557 void nlm_cms_timer_intr(int node, int en, int sub_type, int intr_val)
558 {
559 int j, k, vc;
560 int queue;
561 uint64_t base;
562
563 base = nlm_get_cms_regbase(node);
564 /* setup timer intr config on all cpu push queues */
565 for (j=0; j<XLP_MAX_CORES; j++)
566 for (k=0; k<XLP_MAX_THREADS; k++)
567 for (vc=0; vc<CMS_MAX_VCPU_VC; vc++) {
568 queue = CMS_CPU_PUSHQ(node, j, k, vc);
569 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
570 }
571
572 /* setup timer intr config on all pcie 0 push queue */
573 for (j=CMS_PCIE0_QID(0); j<CMS_PCIE0_MAXQID; j++) {
574 queue = CMS_IO_PUSHQ(node, j);
575 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
576 }
577
578 /* setup timer intr config on all pcie 1 push queue */
579 for (j=CMS_PCIE1_QID(0); j<CMS_PCIE1_MAXQID; j++) {
580 queue = CMS_IO_PUSHQ(node, j);
581 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
582 }
583
584 /* setup timer intr config on all pcie 2 push queue */
585 for (j=CMS_PCIE2_QID(0); j<CMS_PCIE2_MAXQID; j++) {
586 queue = CMS_IO_PUSHQ(node, j);
587 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
588 }
589
590 /* setup timer intr config on all pcie 3 push queue */
591 for (j=CMS_PCIE3_QID(0); j<CMS_PCIE3_MAXQID; j++) {
592 queue = CMS_IO_PUSHQ(node, j);
593 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
594 }
595
596 /* setup timer intr config on all DTE push queue */
597 for (j=CMS_DTE_QID(0); j<CMS_DTE_MAXQID; j++) {
598 queue = CMS_IO_PUSHQ(node, j);
599 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
600 }
601
602 /* setup timer intr config on all RSA/ECC push queue */
603 for (j=CMS_RSA_ECC_QID(0); j<CMS_RSA_ECC_MAXQID; j++) {
604 queue = CMS_IO_PUSHQ(node, j);
605 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
606 }
607
608 /* setup timer intr config on all crypto push queue */
609 for (j=CMS_CRYPTO_QID(0); j<CMS_CRYPTO_MAXQID; j++) {
610 queue = CMS_IO_PUSHQ(node, j);
611 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
612 }
613
614 /* setup timer intr config on all CMP push queue */
615 for (j=CMS_CMP_QID(0); j<CMS_CMP_MAXQID; j++) {
616 queue = CMS_IO_PUSHQ(node, j);
617 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
618 }
619
620 /* setup timer intr config on all POE push queue */
621 for (j=CMS_POE_QID(0); j<CMS_POE_MAXQID; j++) {
622 queue = CMS_IO_PUSHQ(node, j);
623 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
624 }
625
626 /* setup timer intr config on all NAE push queue */
627 for (j=CMS_NAE_QID(0); j<CMS_NAE_MAXQID; j++) {
628 queue = CMS_IO_PUSHQ(node, j);
629 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
630 }
631
632 /* setup timer intr config on all pop queues */
633 for (j=CMS_POPQ_QID(0); j<CMS_POPQ_MAXQID; j++) {
634 queue = CMS_POPQ(node, j);
635 nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
636 }
637 }
638
639 /* returns 1 if interrupt has been generated for this output queue */
640 int nlm_cms_outputq_intr_check(uint64_t base, int qid)
641 {
642 uint64_t val;
643 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
644
645 return ((val >> 59) & 0x1);
646 }
647
648 void nlm_cms_outputq_clr_intr(uint64_t base, int qid)
649 {
650 uint64_t val;
651 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
652 val |= (1ULL<<59);
653 nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val);
654 }
655
656 void nlm_cms_illegal_dst_error_intr(uint64_t base, int en)
657 {
658 uint64_t val;
659
660 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
661 val |= (en<<8);
662 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
663 }
664
665 void nlm_cms_timeout_error_intr(uint64_t base, int en)
666 {
667 uint64_t val;
668
669 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
670 val |= (en<<7);
671 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
672 }
673
674 void nlm_cms_biu_error_resp_intr(uint64_t base, int en)
675 {
676 uint64_t val;
677
678 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
679 val |= (en<<6);
680 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
681 }
682
683 void nlm_cms_spill_uncorrectable_ecc_error_intr(uint64_t base, int en)
684 {
685 uint64_t val;
686
687 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
688 val |= (en<<5) | (en<<3);
689 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
690 }
691
692 void nlm_cms_spill_correctable_ecc_error_intr(uint64_t base, int en)
693 {
694 uint64_t val;
695
696 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
697 val |= (en<<4) | (en<<2);
698 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
699 }
700
701 void nlm_cms_outputq_uncorrectable_ecc_error_intr(uint64_t base, int en)
702 {
703 uint64_t val;
704
705 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
706 val |= (en<<1);
707 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
708 }
709
710 void nlm_cms_outputq_correctable_ecc_error_intr(uint64_t base, int en)
711 {
712 uint64_t val;
713
714 val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
715 val |= (en<<0);
716 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
717 }
718
719 uint64_t nlm_cms_network_error_status(uint64_t base)
720 {
721 return nlm_read_cms_reg(base, CMS_MSG_ERR);
722 }
723
724 int nlm_cms_get_net_error_code(uint64_t err)
725 {
726 return ((err >> 12) & 0xf);
727 }
728
729 int nlm_cms_get_net_error_syndrome(uint64_t err)
730 {
731 return ((err >> 32) & 0x1ff);
732 }
733
734 int nlm_cms_get_net_error_ramindex(uint64_t err)
735 {
736 return ((err >> 44) & 0x7fff);
737 }
738
739 int nlm_cms_get_net_error_outputq(uint64_t err)
740 {
741 return ((err >> 16) & 0xfff);
742 }
743
744 /*========================= FMN Tracing related APIs ================*/
745
746 void nlm_cms_trace_setup(uint64_t base, int en, uint64_t trace_base,
747 uint64_t trace_limit, int match_dstid_en,
748 int dst_id, int match_srcid_en, int src_id,
749 int wrap)
750 {
751 uint64_t val;
752
753 nlm_write_cms_reg(base, CMS_TRACE_BASE_ADDR, trace_base);
754 nlm_write_cms_reg(base, CMS_TRACE_LIMIT_ADDR, trace_limit);
755
756 val = nlm_read_cms_reg(base, CMS_TRACE_CONFIG);
757 val |= (((uint64_t)match_dstid_en << 39) |
758 ((dst_id & 0xfff) << 24) |
759 (match_srcid_en << 23) |
760 ((src_id & 0xfff) << 8) |
761 (wrap << 1) |
762 (en << 0));
763 nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
764 }
765
766 void nlm_cms_endian_byte_swap (uint64_t base, int en)
767 {
768 nlm_write_cms_reg(base, CMS_MSG_ENDIAN_SWAP, en);
769 }
Cache object: 3fed16131274a15ee70e844cd0b62f7e
|