1 /*-
2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <linux/printk.h>
32
33 #include <dev/mlx5/mlx5_en/en.h>
34
35 struct mlx5_cqe64 *
36 mlx5e_get_cqe(struct mlx5e_cq *cq)
37 {
38 struct mlx5_cqe64 *cqe;
39
40 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq));
41
42 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
43 return (NULL);
44
45 /* ensure cqe content is read after cqe ownership bit */
46 atomic_thread_fence_acq();
47
48 return (cqe);
49 }
50
51 void
52 mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event)
53 {
54 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
55
56 mlx5_en_err(cq->priv->ifp, "cqn=0x%.6x event=0x%.2x\n",
57 mcq->cqn, event);
58 }
59
60 void
61 mlx5e_dump_err_cqe(struct mlx5e_cq *cq, u32 qn, const struct mlx5_err_cqe *err_cqe)
62 {
63 u32 ci;
64
65 /* Don't print flushed in error syndromes. */
66 if (err_cqe->vendor_err_synd == 0xf9 && err_cqe->syndrome == 0x05)
67 return;
68 /* Don't print when the queue is set to error state by software. */
69 if (err_cqe->vendor_err_synd == 0xf5 && err_cqe->syndrome == 0x05)
70 return;
71
72 ci = (cq->wq.cc - 1) & cq->wq.sz_m1;
73
74 mlx5_en_err(cq->priv->ifp,
75 "Error CQE on CQN 0x%x, CI 0x%x, QN 0x%x, OPCODE 0x%x, SYNDROME 0x%x, VENDOR SYNDROME 0x%x\n",
76 cq->mcq.cqn, ci, qn, err_cqe->op_own >> 4,
77 err_cqe->syndrome, err_cqe->vendor_err_synd);
78
79 print_hex_dump(NULL, NULL, DUMP_PREFIX_OFFSET,
80 16, 1, err_cqe, sizeof(*err_cqe), false);
81 }
Cache object: 74ce7938d51b98d3cc87ce950aac5bb6
|