1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
5 * Copyright (c) 2005 Intel Corporation. All rights reserved.
6 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
7 * Copyright (c) 2009 HNR Consulting. All rights reserved.
8 * Copyright (c) 2014 Intel Corporation. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #define LINUXKPI_PARAM_PREFIX ibcore_
43 #define KBUILD_MODNAME "ibcore"
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/dma-mapping.h>
48 #include <linux/slab.h>
49 #include <linux/module.h>
50 #include <rdma/ib_cache.h>
51
52 #include "mad_priv.h"
53 #include "mad_rmpp.h"
54 #include "smi.h"
55 #include "opa_smi.h"
56 #include "agent.h"
57 #include "core_priv.h"
58
59 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
60 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
61
62 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
63 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
64 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
65 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
66
67 static struct list_head ib_mad_port_list;
68 static u32 ib_mad_client_id = 0;
69
70 /* Port list lock */
71 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
72
73 /* Forward declarations */
74 static int method_in_use(struct ib_mad_mgmt_method_table **method,
75 struct ib_mad_reg_req *mad_reg_req);
76 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77 static struct ib_mad_agent_private *find_mad_agent(
78 struct ib_mad_port_private *port_priv,
79 const struct ib_mad_hdr *mad);
80 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81 struct ib_mad_private *mad);
82 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
83 static void timeout_sends(struct work_struct *work);
84 static void local_completions(struct work_struct *work);
85 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv,
87 u8 mgmt_class);
88 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89 struct ib_mad_agent_private *agent_priv);
90 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
91 struct ib_wc *wc);
92 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
93
94 /*
95 * Returns a ib_mad_port_private structure or NULL for a device/port
96 * Assumes ib_mad_port_list_lock is being held
97 */
98 static inline struct ib_mad_port_private *
99 __ib_get_mad_port(struct ib_device *device, int port_num)
100 {
101 struct ib_mad_port_private *entry;
102
103 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104 if (entry->device == device && entry->port_num == port_num)
105 return entry;
106 }
107 return NULL;
108 }
109
110 /*
111 * Wrapper function to return a ib_mad_port_private structure or NULL
112 * for a device/port
113 */
114 static inline struct ib_mad_port_private *
115 ib_get_mad_port(struct ib_device *device, int port_num)
116 {
117 struct ib_mad_port_private *entry;
118 unsigned long flags;
119
120 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121 entry = __ib_get_mad_port(device, port_num);
122 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
123
124 return entry;
125 }
126
127 static inline u8 convert_mgmt_class(u8 mgmt_class)
128 {
129 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
131 0 : mgmt_class;
132 }
133
134 static int get_spl_qp_index(enum ib_qp_type qp_type)
135 {
136 switch (qp_type)
137 {
138 case IB_QPT_SMI:
139 return 0;
140 case IB_QPT_GSI:
141 return 1;
142 default:
143 return -1;
144 }
145 }
146
147 static int vendor_class_index(u8 mgmt_class)
148 {
149 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
150 }
151
152 static int is_vendor_class(u8 mgmt_class)
153 {
154 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
156 return 0;
157 return 1;
158 }
159
160 static int is_vendor_oui(char *oui)
161 {
162 if (oui[0] || oui[1] || oui[2])
163 return 1;
164 return 0;
165 }
166
167 static int is_vendor_method_in_use(
168 struct ib_mad_mgmt_vendor_class *vendor_class,
169 struct ib_mad_reg_req *mad_reg_req)
170 {
171 struct ib_mad_mgmt_method_table *method;
172 int i;
173
174 for (i = 0; i < MAX_MGMT_OUI; i++) {
175 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176 method = vendor_class->method_table[i];
177 if (method) {
178 if (method_in_use(&method, mad_reg_req))
179 return 1;
180 else
181 break;
182 }
183 }
184 }
185 return 0;
186 }
187
188 int ib_response_mad(const struct ib_mad_hdr *hdr)
189 {
190 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
194 }
195 EXPORT_SYMBOL(ib_response_mad);
196
197 /*
198 * ib_register_mad_agent - Register to send/receive MADs
199 */
200 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
201 u8 port_num,
202 enum ib_qp_type qp_type,
203 struct ib_mad_reg_req *mad_reg_req,
204 u8 rmpp_version,
205 ib_mad_send_handler send_handler,
206 ib_mad_recv_handler recv_handler,
207 void *context,
208 u32 registration_flags)
209 {
210 struct ib_mad_port_private *port_priv;
211 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
212 struct ib_mad_agent_private *mad_agent_priv;
213 struct ib_mad_reg_req *reg_req = NULL;
214 struct ib_mad_mgmt_class_table *class;
215 struct ib_mad_mgmt_vendor_class_table *vendor;
216 struct ib_mad_mgmt_vendor_class *vendor_class;
217 struct ib_mad_mgmt_method_table *method;
218 int ret2, qpn;
219 unsigned long flags;
220 u8 mgmt_class, vclass;
221
222 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
223 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
224 return ERR_PTR(-EPROTONOSUPPORT);
225
226 /* Validate parameters */
227 qpn = get_spl_qp_index(qp_type);
228 if (qpn == -1) {
229 dev_notice(&device->dev,
230 "ib_register_mad_agent: invalid QP Type %d\n",
231 qp_type);
232 goto error1;
233 }
234
235 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
236 dev_notice(&device->dev,
237 "ib_register_mad_agent: invalid RMPP Version %u\n",
238 rmpp_version);
239 goto error1;
240 }
241
242 /* Validate MAD registration request if supplied */
243 if (mad_reg_req) {
244 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
245 dev_notice(&device->dev,
246 "ib_register_mad_agent: invalid Class Version %u\n",
247 mad_reg_req->mgmt_class_version);
248 goto error1;
249 }
250 if (!recv_handler) {
251 dev_notice(&device->dev,
252 "ib_register_mad_agent: no recv_handler\n");
253 goto error1;
254 }
255 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
256 /*
257 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
258 * one in this range currently allowed
259 */
260 if (mad_reg_req->mgmt_class !=
261 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
264 mad_reg_req->mgmt_class);
265 goto error1;
266 }
267 } else if (mad_reg_req->mgmt_class == 0) {
268 /*
269 * Class 0 is reserved in IBA and is used for
270 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
271 */
272 dev_notice(&device->dev,
273 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
274 goto error1;
275 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
276 /*
277 * If class is in "new" vendor range,
278 * ensure supplied OUI is not zero
279 */
280 if (!is_vendor_oui(mad_reg_req->oui)) {
281 dev_notice(&device->dev,
282 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
283 mad_reg_req->mgmt_class);
284 goto error1;
285 }
286 }
287 /* Make sure class supplied is consistent with RMPP */
288 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
289 if (rmpp_version) {
290 dev_notice(&device->dev,
291 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
292 mad_reg_req->mgmt_class);
293 goto error1;
294 }
295 }
296
297 /* Make sure class supplied is consistent with QP type */
298 if (qp_type == IB_QPT_SMI) {
299 if ((mad_reg_req->mgmt_class !=
300 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
301 (mad_reg_req->mgmt_class !=
302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303 dev_notice(&device->dev,
304 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
305 mad_reg_req->mgmt_class);
306 goto error1;
307 }
308 } else {
309 if ((mad_reg_req->mgmt_class ==
310 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
311 (mad_reg_req->mgmt_class ==
312 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
313 dev_notice(&device->dev,
314 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
315 mad_reg_req->mgmt_class);
316 goto error1;
317 }
318 }
319 } else {
320 /* No registration request supplied */
321 if (!send_handler)
322 goto error1;
323 if (registration_flags & IB_MAD_USER_RMPP)
324 goto error1;
325 }
326
327 /* Validate device and port */
328 port_priv = ib_get_mad_port(device, port_num);
329 if (!port_priv) {
330 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
331 ret = ERR_PTR(-ENODEV);
332 goto error1;
333 }
334
335 /* Verify the QP requested is supported. For example, Ethernet devices
336 * will not have QP0 */
337 if (!port_priv->qp_info[qpn].qp) {
338 dev_notice(&device->dev,
339 "ib_register_mad_agent: QP %d not supported\n", qpn);
340 ret = ERR_PTR(-EPROTONOSUPPORT);
341 goto error1;
342 }
343
344 /* Allocate structures */
345 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
346 if (!mad_agent_priv) {
347 ret = ERR_PTR(-ENOMEM);
348 goto error1;
349 }
350
351 if (mad_reg_req) {
352 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
353 if (!reg_req) {
354 ret = ERR_PTR(-ENOMEM);
355 goto error3;
356 }
357 }
358
359 /* Now, fill in the various structures */
360 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
361 mad_agent_priv->reg_req = reg_req;
362 mad_agent_priv->agent.rmpp_version = rmpp_version;
363 mad_agent_priv->agent.device = device;
364 mad_agent_priv->agent.recv_handler = recv_handler;
365 mad_agent_priv->agent.send_handler = send_handler;
366 mad_agent_priv->agent.context = context;
367 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
368 mad_agent_priv->agent.port_num = port_num;
369 mad_agent_priv->agent.flags = registration_flags;
370 spin_lock_init(&mad_agent_priv->lock);
371 INIT_LIST_HEAD(&mad_agent_priv->send_list);
372 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
373 INIT_LIST_HEAD(&mad_agent_priv->done_list);
374 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
375 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
376 INIT_LIST_HEAD(&mad_agent_priv->local_list);
377 INIT_WORK(&mad_agent_priv->local_work, local_completions);
378 atomic_set(&mad_agent_priv->refcount, 1);
379 init_completion(&mad_agent_priv->comp);
380
381 spin_lock_irqsave(&port_priv->reg_lock, flags);
382 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
383
384 /*
385 * Make sure MAD registration (if supplied)
386 * is non overlapping with any existing ones
387 */
388 if (mad_reg_req) {
389 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
390 if (!is_vendor_class(mgmt_class)) {
391 class = port_priv->version[mad_reg_req->
392 mgmt_class_version].class;
393 if (class) {
394 method = class->method_table[mgmt_class];
395 if (method) {
396 if (method_in_use(&method,
397 mad_reg_req))
398 goto error4;
399 }
400 }
401 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
402 mgmt_class);
403 } else {
404 /* "New" vendor class range */
405 vendor = port_priv->version[mad_reg_req->
406 mgmt_class_version].vendor;
407 if (vendor) {
408 vclass = vendor_class_index(mgmt_class);
409 vendor_class = vendor->vendor_class[vclass];
410 if (vendor_class) {
411 if (is_vendor_method_in_use(
412 vendor_class,
413 mad_reg_req))
414 goto error4;
415 }
416 }
417 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
418 }
419 if (ret2) {
420 ret = ERR_PTR(ret2);
421 goto error4;
422 }
423 }
424
425 /* Add mad agent into port's agent list */
426 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
427 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
428
429 return &mad_agent_priv->agent;
430
431 error4:
432 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
433 kfree(reg_req);
434 error3:
435 kfree(mad_agent_priv);
436 error1:
437 return ret;
438 }
439 EXPORT_SYMBOL(ib_register_mad_agent);
440
441 static inline int is_snooping_sends(int mad_snoop_flags)
442 {
443 return (mad_snoop_flags &
444 (/*IB_MAD_SNOOP_POSTED_SENDS |
445 IB_MAD_SNOOP_RMPP_SENDS |*/
446 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
447 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
448 }
449
450 static inline int is_snooping_recvs(int mad_snoop_flags)
451 {
452 return (mad_snoop_flags &
453 (IB_MAD_SNOOP_RECVS /*|
454 IB_MAD_SNOOP_RMPP_RECVS*/));
455 }
456
457 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
458 struct ib_mad_snoop_private *mad_snoop_priv)
459 {
460 struct ib_mad_snoop_private **new_snoop_table;
461 unsigned long flags;
462 int i;
463
464 spin_lock_irqsave(&qp_info->snoop_lock, flags);
465 /* Check for empty slot in array. */
466 for (i = 0; i < qp_info->snoop_table_size; i++)
467 if (!qp_info->snoop_table[i])
468 break;
469
470 if (i == qp_info->snoop_table_size) {
471 /* Grow table. */
472 new_snoop_table = krealloc(qp_info->snoop_table,
473 sizeof mad_snoop_priv *
474 (qp_info->snoop_table_size + 1),
475 GFP_ATOMIC);
476 if (!new_snoop_table) {
477 i = -ENOMEM;
478 goto out;
479 }
480
481 qp_info->snoop_table = new_snoop_table;
482 qp_info->snoop_table_size++;
483 }
484 qp_info->snoop_table[i] = mad_snoop_priv;
485 atomic_inc(&qp_info->snoop_count);
486 out:
487 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
488 return i;
489 }
490
491 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
492 u8 port_num,
493 enum ib_qp_type qp_type,
494 int mad_snoop_flags,
495 ib_mad_snoop_handler snoop_handler,
496 ib_mad_recv_handler recv_handler,
497 void *context)
498 {
499 struct ib_mad_port_private *port_priv;
500 struct ib_mad_agent *ret;
501 struct ib_mad_snoop_private *mad_snoop_priv;
502 int qpn;
503
504 /* Validate parameters */
505 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
506 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
507 ret = ERR_PTR(-EINVAL);
508 goto error1;
509 }
510 qpn = get_spl_qp_index(qp_type);
511 if (qpn == -1) {
512 ret = ERR_PTR(-EINVAL);
513 goto error1;
514 }
515 port_priv = ib_get_mad_port(device, port_num);
516 if (!port_priv) {
517 ret = ERR_PTR(-ENODEV);
518 goto error1;
519 }
520 /* Allocate structures */
521 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
522 if (!mad_snoop_priv) {
523 ret = ERR_PTR(-ENOMEM);
524 goto error1;
525 }
526
527 /* Now, fill in the various structures */
528 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
529 mad_snoop_priv->agent.device = device;
530 mad_snoop_priv->agent.recv_handler = recv_handler;
531 mad_snoop_priv->agent.snoop_handler = snoop_handler;
532 mad_snoop_priv->agent.context = context;
533 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
534 mad_snoop_priv->agent.port_num = port_num;
535 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
536 init_completion(&mad_snoop_priv->comp);
537 mad_snoop_priv->snoop_index = register_snoop_agent(
538 &port_priv->qp_info[qpn],
539 mad_snoop_priv);
540 if (mad_snoop_priv->snoop_index < 0) {
541 ret = ERR_PTR(mad_snoop_priv->snoop_index);
542 goto error2;
543 }
544
545 atomic_set(&mad_snoop_priv->refcount, 1);
546 return &mad_snoop_priv->agent;
547
548 error2:
549 kfree(mad_snoop_priv);
550 error1:
551 return ret;
552 }
553 EXPORT_SYMBOL(ib_register_mad_snoop);
554
555 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
556 {
557 if (atomic_dec_and_test(&mad_agent_priv->refcount))
558 complete(&mad_agent_priv->comp);
559 }
560
561 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
562 {
563 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
564 complete(&mad_snoop_priv->comp);
565 }
566
567 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
568 {
569 struct ib_mad_port_private *port_priv;
570 unsigned long flags;
571
572 /* Note that we could still be handling received MADs */
573
574 /*
575 * Canceling all sends results in dropping received response
576 * MADs, preventing us from queuing additional work
577 */
578 cancel_mads(mad_agent_priv);
579 port_priv = mad_agent_priv->qp_info->port_priv;
580 cancel_delayed_work_sync(&mad_agent_priv->timed_work);
581
582 spin_lock_irqsave(&port_priv->reg_lock, flags);
583 remove_mad_reg_req(mad_agent_priv);
584 list_del(&mad_agent_priv->agent_list);
585 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
586
587 flush_workqueue(port_priv->wq);
588 ib_cancel_rmpp_recvs(mad_agent_priv);
589
590 deref_mad_agent(mad_agent_priv);
591 wait_for_completion(&mad_agent_priv->comp);
592
593 kfree(mad_agent_priv->reg_req);
594 kfree(mad_agent_priv);
595 }
596
597 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
598 {
599 struct ib_mad_qp_info *qp_info;
600 unsigned long flags;
601
602 qp_info = mad_snoop_priv->qp_info;
603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
604 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
605 atomic_dec(&qp_info->snoop_count);
606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607
608 deref_snoop_agent(mad_snoop_priv);
609 wait_for_completion(&mad_snoop_priv->comp);
610
611 kfree(mad_snoop_priv);
612 }
613
614 /*
615 * ib_unregister_mad_agent - Unregisters a client from using MAD services
616 */
617 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
618 {
619 struct ib_mad_agent_private *mad_agent_priv;
620 struct ib_mad_snoop_private *mad_snoop_priv;
621
622 /* If the TID is zero, the agent can only snoop. */
623 if (mad_agent->hi_tid) {
624 mad_agent_priv = container_of(mad_agent,
625 struct ib_mad_agent_private,
626 agent);
627 unregister_mad_agent(mad_agent_priv);
628 } else {
629 mad_snoop_priv = container_of(mad_agent,
630 struct ib_mad_snoop_private,
631 agent);
632 unregister_mad_snoop(mad_snoop_priv);
633 }
634 return 0;
635 }
636 EXPORT_SYMBOL(ib_unregister_mad_agent);
637
638 static void dequeue_mad(struct ib_mad_list_head *mad_list)
639 {
640 struct ib_mad_queue *mad_queue;
641 unsigned long flags;
642
643 BUG_ON(!mad_list->mad_queue);
644 mad_queue = mad_list->mad_queue;
645 spin_lock_irqsave(&mad_queue->lock, flags);
646 list_del(&mad_list->list);
647 mad_queue->count--;
648 spin_unlock_irqrestore(&mad_queue->lock, flags);
649 }
650
651 static void snoop_send(struct ib_mad_qp_info *qp_info,
652 struct ib_mad_send_buf *send_buf,
653 struct ib_mad_send_wc *mad_send_wc,
654 int mad_snoop_flags)
655 {
656 struct ib_mad_snoop_private *mad_snoop_priv;
657 unsigned long flags;
658 int i;
659
660 spin_lock_irqsave(&qp_info->snoop_lock, flags);
661 for (i = 0; i < qp_info->snoop_table_size; i++) {
662 mad_snoop_priv = qp_info->snoop_table[i];
663 if (!mad_snoop_priv ||
664 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
665 continue;
666
667 atomic_inc(&mad_snoop_priv->refcount);
668 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
669 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
670 send_buf, mad_send_wc);
671 deref_snoop_agent(mad_snoop_priv);
672 spin_lock_irqsave(&qp_info->snoop_lock, flags);
673 }
674 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
675 }
676
677 static void snoop_recv(struct ib_mad_qp_info *qp_info,
678 struct ib_mad_recv_wc *mad_recv_wc,
679 int mad_snoop_flags)
680 {
681 struct ib_mad_snoop_private *mad_snoop_priv;
682 unsigned long flags;
683 int i;
684
685 spin_lock_irqsave(&qp_info->snoop_lock, flags);
686 for (i = 0; i < qp_info->snoop_table_size; i++) {
687 mad_snoop_priv = qp_info->snoop_table[i];
688 if (!mad_snoop_priv ||
689 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
690 continue;
691
692 atomic_inc(&mad_snoop_priv->refcount);
693 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
694 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
695 mad_recv_wc);
696 deref_snoop_agent(mad_snoop_priv);
697 spin_lock_irqsave(&qp_info->snoop_lock, flags);
698 }
699 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
700 }
701
702 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
703 u16 pkey_index, u8 port_num, struct ib_wc *wc)
704 {
705 memset(wc, 0, sizeof *wc);
706 wc->wr_cqe = cqe;
707 wc->status = IB_WC_SUCCESS;
708 wc->opcode = IB_WC_RECV;
709 wc->pkey_index = pkey_index;
710 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
711 wc->src_qp = IB_QP0;
712 wc->qp = qp;
713 wc->slid = slid;
714 wc->sl = 0;
715 wc->dlid_path_bits = 0;
716 wc->port_num = port_num;
717 }
718
719 static size_t mad_priv_size(const struct ib_mad_private *mp)
720 {
721 return sizeof(struct ib_mad_private) + mp->mad_size;
722 }
723
724 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
725 {
726 size_t size = sizeof(struct ib_mad_private) + mad_size;
727 struct ib_mad_private *ret = kzalloc(size, flags);
728
729 if (ret)
730 ret->mad_size = mad_size;
731
732 return ret;
733 }
734
735 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
736 {
737 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
738 }
739
740 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
741 {
742 return sizeof(struct ib_grh) + mp->mad_size;
743 }
744
745 /*
746 * Return 0 if SMP is to be sent
747 * Return 1 if SMP was consumed locally (whether or not solicited)
748 * Return < 0 if error
749 */
750 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
751 struct ib_mad_send_wr_private *mad_send_wr)
752 {
753 int ret = 0;
754 struct ib_smp *smp = mad_send_wr->send_buf.mad;
755 struct opa_smp *opa_smp = (struct opa_smp *)smp;
756 unsigned long flags;
757 struct ib_mad_local_private *local;
758 struct ib_mad_private *mad_priv;
759 struct ib_mad_port_private *port_priv;
760 struct ib_mad_agent_private *recv_mad_agent = NULL;
761 struct ib_device *device = mad_agent_priv->agent.device;
762 u8 port_num;
763 struct ib_wc mad_wc;
764 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
765 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
766 u16 out_mad_pkey_index = 0;
767 u16 drslid;
768 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
769 mad_agent_priv->qp_info->port_priv->port_num);
770
771 if (rdma_cap_ib_switch(device) &&
772 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
773 port_num = send_wr->port_num;
774 else
775 port_num = mad_agent_priv->agent.port_num;
776
777 /*
778 * Directed route handling starts if the initial LID routed part of
779 * a request or the ending LID routed part of a response is empty.
780 * If we are at the start of the LID routed part, don't update the
781 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
782 */
783 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
784 u32 opa_drslid;
785
786 if ((opa_get_smp_direction(opa_smp)
787 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
788 OPA_LID_PERMISSIVE &&
789 opa_smi_handle_dr_smp_send(opa_smp,
790 rdma_cap_ib_switch(device),
791 port_num) == IB_SMI_DISCARD) {
792 ret = -EINVAL;
793 dev_err(&device->dev, "OPA Invalid directed route\n");
794 goto out;
795 }
796 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
797 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
798 opa_drslid & 0xffff0000) {
799 ret = -EINVAL;
800 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
801 opa_drslid);
802 goto out;
803 }
804 drslid = (u16)(opa_drslid & 0x0000ffff);
805
806 /* Check to post send on QP or process locally */
807 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
808 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
809 goto out;
810 } else {
811 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
812 IB_LID_PERMISSIVE &&
813 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
814 IB_SMI_DISCARD) {
815 ret = -EINVAL;
816 dev_err(&device->dev, "Invalid directed route\n");
817 goto out;
818 }
819 drslid = be16_to_cpu(smp->dr_slid);
820
821 /* Check to post send on QP or process locally */
822 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
823 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
824 goto out;
825 }
826
827 local = kmalloc(sizeof *local, GFP_ATOMIC);
828 if (!local) {
829 ret = -ENOMEM;
830 goto out;
831 }
832 local->mad_priv = NULL;
833 local->recv_mad_agent = NULL;
834 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
835 if (!mad_priv) {
836 ret = -ENOMEM;
837 kfree(local);
838 goto out;
839 }
840
841 build_smp_wc(mad_agent_priv->agent.qp,
842 send_wr->wr.wr_cqe, drslid,
843 send_wr->pkey_index,
844 send_wr->port_num, &mad_wc);
845
846 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
847 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
848 + mad_send_wr->send_buf.data_len
849 + sizeof(struct ib_grh);
850 }
851
852 /* No GRH for DR SMP */
853 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
854 (const struct ib_mad_hdr *)smp, mad_size,
855 (struct ib_mad_hdr *)mad_priv->mad,
856 &mad_size, &out_mad_pkey_index);
857 switch (ret)
858 {
859 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
860 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
861 mad_agent_priv->agent.recv_handler) {
862 local->mad_priv = mad_priv;
863 local->recv_mad_agent = mad_agent_priv;
864 /*
865 * Reference MAD agent until receive
866 * side of local completion handled
867 */
868 atomic_inc(&mad_agent_priv->refcount);
869 } else
870 kfree(mad_priv);
871 break;
872 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
873 kfree(mad_priv);
874 break;
875 case IB_MAD_RESULT_SUCCESS:
876 /* Treat like an incoming receive MAD */
877 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
878 mad_agent_priv->agent.port_num);
879 if (port_priv) {
880 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
881 recv_mad_agent = find_mad_agent(port_priv,
882 (const struct ib_mad_hdr *)mad_priv->mad);
883 }
884 if (!port_priv || !recv_mad_agent) {
885 /*
886 * No receiving agent so drop packet and
887 * generate send completion.
888 */
889 kfree(mad_priv);
890 break;
891 }
892 local->mad_priv = mad_priv;
893 local->recv_mad_agent = recv_mad_agent;
894 break;
895 default:
896 kfree(mad_priv);
897 kfree(local);
898 ret = -EINVAL;
899 goto out;
900 }
901
902 local->mad_send_wr = mad_send_wr;
903 if (opa) {
904 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
905 local->return_wc_byte_len = mad_size;
906 }
907 /* Reference MAD agent until send side of local completion handled */
908 atomic_inc(&mad_agent_priv->refcount);
909 /* Queue local completion to local list */
910 spin_lock_irqsave(&mad_agent_priv->lock, flags);
911 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
912 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
913 queue_work(mad_agent_priv->qp_info->port_priv->wq,
914 &mad_agent_priv->local_work);
915 ret = 1;
916 out:
917 return ret;
918 }
919
920 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
921 {
922 int seg_size, pad;
923
924 seg_size = mad_size - hdr_len;
925 if (data_len && seg_size) {
926 pad = seg_size - data_len % seg_size;
927 return pad == seg_size ? 0 : pad;
928 } else
929 return seg_size;
930 }
931
932 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
933 {
934 struct ib_rmpp_segment *s, *t;
935
936 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
937 list_del(&s->list);
938 kfree(s);
939 }
940 }
941
942 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
943 size_t mad_size, gfp_t gfp_mask)
944 {
945 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
946 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
947 struct ib_rmpp_segment *seg = NULL;
948 int left, seg_size, pad;
949
950 send_buf->seg_size = mad_size - send_buf->hdr_len;
951 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
952 seg_size = send_buf->seg_size;
953 pad = send_wr->pad;
954
955 /* Allocate data segments. */
956 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
957 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
958 if (!seg) {
959 free_send_rmpp_list(send_wr);
960 return -ENOMEM;
961 }
962 seg->num = ++send_buf->seg_count;
963 list_add_tail(&seg->list, &send_wr->rmpp_list);
964 }
965
966 /* Zero any padding */
967 if (pad)
968 memset(seg->data + seg_size - pad, 0, pad);
969
970 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
971 agent.rmpp_version;
972 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
973 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
974
975 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
976 struct ib_rmpp_segment, list);
977 send_wr->last_ack_seg = send_wr->cur_seg;
978 return 0;
979 }
980
981 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
982 {
983 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
984 }
985 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
986
987 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
988 u32 remote_qpn, u16 pkey_index,
989 int rmpp_active,
990 int hdr_len, int data_len,
991 gfp_t gfp_mask,
992 u8 base_version)
993 {
994 struct ib_mad_agent_private *mad_agent_priv;
995 struct ib_mad_send_wr_private *mad_send_wr;
996 int pad, message_size, ret, size;
997 void *buf;
998 size_t mad_size;
999 bool opa;
1000
1001 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1002 agent);
1003
1004 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1005
1006 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1007 mad_size = sizeof(struct opa_mad);
1008 else
1009 mad_size = sizeof(struct ib_mad);
1010
1011 pad = get_pad_size(hdr_len, data_len, mad_size);
1012 message_size = hdr_len + data_len + pad;
1013
1014 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1015 if (!rmpp_active && message_size > mad_size)
1016 return ERR_PTR(-EINVAL);
1017 } else
1018 if (rmpp_active || message_size > mad_size)
1019 return ERR_PTR(-EINVAL);
1020
1021 size = rmpp_active ? hdr_len : mad_size;
1022 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1023 if (!buf)
1024 return ERR_PTR(-ENOMEM);
1025
1026 mad_send_wr = (struct ib_mad_send_wr_private *)((char *)buf + size);
1027 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1028 mad_send_wr->send_buf.mad = buf;
1029 mad_send_wr->send_buf.hdr_len = hdr_len;
1030 mad_send_wr->send_buf.data_len = data_len;
1031 mad_send_wr->pad = pad;
1032
1033 mad_send_wr->mad_agent_priv = mad_agent_priv;
1034 mad_send_wr->sg_list[0].length = hdr_len;
1035 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1036
1037 /* OPA MADs don't have to be the full 2048 bytes */
1038 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1039 data_len < mad_size - hdr_len)
1040 mad_send_wr->sg_list[1].length = data_len;
1041 else
1042 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1043
1044 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1045
1046 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1047
1048 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1049 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1050 mad_send_wr->send_wr.wr.num_sge = 2;
1051 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1052 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1053 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1054 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1055 mad_send_wr->send_wr.pkey_index = pkey_index;
1056
1057 if (rmpp_active) {
1058 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1059 if (ret) {
1060 kfree(buf);
1061 return ERR_PTR(ret);
1062 }
1063 }
1064
1065 mad_send_wr->send_buf.mad_agent = mad_agent;
1066 atomic_inc(&mad_agent_priv->refcount);
1067 return &mad_send_wr->send_buf;
1068 }
1069 EXPORT_SYMBOL(ib_create_send_mad);
1070
1071 int ib_get_mad_data_offset(u8 mgmt_class)
1072 {
1073 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1074 return IB_MGMT_SA_HDR;
1075 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1076 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1077 (mgmt_class == IB_MGMT_CLASS_BIS))
1078 return IB_MGMT_DEVICE_HDR;
1079 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1080 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1081 return IB_MGMT_VENDOR_HDR;
1082 else
1083 return IB_MGMT_MAD_HDR;
1084 }
1085 EXPORT_SYMBOL(ib_get_mad_data_offset);
1086
1087 int ib_is_mad_class_rmpp(u8 mgmt_class)
1088 {
1089 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1090 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1091 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1092 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1093 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1094 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1095 return 1;
1096 return 0;
1097 }
1098 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1099
1100 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1101 {
1102 struct ib_mad_send_wr_private *mad_send_wr;
1103 struct list_head *list;
1104
1105 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1106 send_buf);
1107 list = &mad_send_wr->cur_seg->list;
1108
1109 if (mad_send_wr->cur_seg->num < seg_num) {
1110 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1111 if (mad_send_wr->cur_seg->num == seg_num)
1112 break;
1113 } else if (mad_send_wr->cur_seg->num > seg_num) {
1114 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1115 if (mad_send_wr->cur_seg->num == seg_num)
1116 break;
1117 }
1118 return mad_send_wr->cur_seg->data;
1119 }
1120 EXPORT_SYMBOL(ib_get_rmpp_segment);
1121
1122 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1123 {
1124 if (mad_send_wr->send_buf.seg_count)
1125 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1126 mad_send_wr->seg_num);
1127 else
1128 return (char *)mad_send_wr->send_buf.mad +
1129 mad_send_wr->send_buf.hdr_len;
1130 }
1131
1132 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1133 {
1134 struct ib_mad_agent_private *mad_agent_priv;
1135 struct ib_mad_send_wr_private *mad_send_wr;
1136
1137 mad_agent_priv = container_of(send_buf->mad_agent,
1138 struct ib_mad_agent_private, agent);
1139 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1140 send_buf);
1141
1142 free_send_rmpp_list(mad_send_wr);
1143 kfree(send_buf->mad);
1144 deref_mad_agent(mad_agent_priv);
1145 }
1146 EXPORT_SYMBOL(ib_free_send_mad);
1147
1148 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1149 {
1150 struct ib_mad_qp_info *qp_info;
1151 struct list_head *list;
1152 const struct ib_send_wr *bad_send_wr;
1153 struct ib_mad_agent *mad_agent;
1154 struct ib_sge *sge;
1155 unsigned long flags;
1156 int ret;
1157
1158 /* Set WR ID to find mad_send_wr upon completion */
1159 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1160 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1161 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1162 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1163
1164 mad_agent = mad_send_wr->send_buf.mad_agent;
1165 sge = mad_send_wr->sg_list;
1166 sge[0].addr = ib_dma_map_single(mad_agent->device,
1167 mad_send_wr->send_buf.mad,
1168 sge[0].length,
1169 DMA_TO_DEVICE);
1170 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1171 return -ENOMEM;
1172
1173 mad_send_wr->header_mapping = sge[0].addr;
1174
1175 sge[1].addr = ib_dma_map_single(mad_agent->device,
1176 ib_get_payload(mad_send_wr),
1177 sge[1].length,
1178 DMA_TO_DEVICE);
1179 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1180 ib_dma_unmap_single(mad_agent->device,
1181 mad_send_wr->header_mapping,
1182 sge[0].length, DMA_TO_DEVICE);
1183 return -ENOMEM;
1184 }
1185 mad_send_wr->payload_mapping = sge[1].addr;
1186
1187 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1188 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1189 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1190 &bad_send_wr);
1191 list = &qp_info->send_queue.list;
1192 } else {
1193 ret = 0;
1194 list = &qp_info->overflow_list;
1195 }
1196
1197 if (!ret) {
1198 qp_info->send_queue.count++;
1199 list_add_tail(&mad_send_wr->mad_list.list, list);
1200 }
1201 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1202 if (ret) {
1203 ib_dma_unmap_single(mad_agent->device,
1204 mad_send_wr->header_mapping,
1205 sge[0].length, DMA_TO_DEVICE);
1206 ib_dma_unmap_single(mad_agent->device,
1207 mad_send_wr->payload_mapping,
1208 sge[1].length, DMA_TO_DEVICE);
1209 }
1210 return ret;
1211 }
1212
1213 /*
1214 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1215 * with the registered client
1216 */
1217 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1218 struct ib_mad_send_buf **bad_send_buf)
1219 {
1220 struct ib_mad_agent_private *mad_agent_priv;
1221 struct ib_mad_send_buf *next_send_buf;
1222 struct ib_mad_send_wr_private *mad_send_wr;
1223 unsigned long flags;
1224 int ret = -EINVAL;
1225
1226 /* Walk list of send WRs and post each on send list */
1227 for (; send_buf; send_buf = next_send_buf) {
1228
1229 mad_send_wr = container_of(send_buf,
1230 struct ib_mad_send_wr_private,
1231 send_buf);
1232 mad_agent_priv = mad_send_wr->mad_agent_priv;
1233
1234 if (!send_buf->mad_agent->send_handler ||
1235 (send_buf->timeout_ms &&
1236 !send_buf->mad_agent->recv_handler)) {
1237 ret = -EINVAL;
1238 goto error;
1239 }
1240
1241 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1242 if (mad_agent_priv->agent.rmpp_version) {
1243 ret = -EINVAL;
1244 goto error;
1245 }
1246 }
1247
1248 /*
1249 * Save pointer to next work request to post in case the
1250 * current one completes, and the user modifies the work
1251 * request associated with the completion
1252 */
1253 next_send_buf = send_buf->next;
1254 mad_send_wr->send_wr.ah = send_buf->ah;
1255
1256 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1257 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1258 ret = handle_outgoing_dr_smp(mad_agent_priv,
1259 mad_send_wr);
1260 if (ret < 0) /* error */
1261 goto error;
1262 else if (ret == 1) /* locally consumed */
1263 continue;
1264 }
1265
1266 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1267 /* Timeout will be updated after send completes */
1268 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1269 mad_send_wr->max_retries = send_buf->retries;
1270 mad_send_wr->retries_left = send_buf->retries;
1271 send_buf->retries = 0;
1272 /* Reference for work request to QP + response */
1273 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1274 mad_send_wr->status = IB_WC_SUCCESS;
1275
1276 /* Reference MAD agent until send completes */
1277 atomic_inc(&mad_agent_priv->refcount);
1278 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1279 list_add_tail(&mad_send_wr->agent_list,
1280 &mad_agent_priv->send_list);
1281 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1282
1283 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1284 ret = ib_send_rmpp_mad(mad_send_wr);
1285 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1286 ret = ib_send_mad(mad_send_wr);
1287 } else
1288 ret = ib_send_mad(mad_send_wr);
1289 if (ret < 0) {
1290 /* Fail send request */
1291 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1292 list_del(&mad_send_wr->agent_list);
1293 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1294 atomic_dec(&mad_agent_priv->refcount);
1295 goto error;
1296 }
1297 }
1298 return 0;
1299 error:
1300 if (bad_send_buf)
1301 *bad_send_buf = send_buf;
1302 return ret;
1303 }
1304 EXPORT_SYMBOL(ib_post_send_mad);
1305
1306 /*
1307 * ib_free_recv_mad - Returns data buffers used to receive
1308 * a MAD to the access layer
1309 */
1310 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1311 {
1312 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1313 struct ib_mad_private_header *mad_priv_hdr;
1314 struct ib_mad_private *priv;
1315 struct list_head free_list;
1316
1317 INIT_LIST_HEAD(&free_list);
1318 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1319
1320 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1321 &free_list, list) {
1322 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1323 recv_buf);
1324 mad_priv_hdr = container_of(mad_recv_wc,
1325 struct ib_mad_private_header,
1326 recv_wc);
1327 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1328 header);
1329 kfree(priv);
1330 }
1331 }
1332 EXPORT_SYMBOL(ib_free_recv_mad);
1333
1334 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1335 u8 rmpp_version,
1336 ib_mad_send_handler send_handler,
1337 ib_mad_recv_handler recv_handler,
1338 void *context)
1339 {
1340 return ERR_PTR(-EINVAL); /* XXX: for now */
1341 }
1342 EXPORT_SYMBOL(ib_redirect_mad_qp);
1343
1344 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1345 struct ib_wc *wc)
1346 {
1347 dev_err(&mad_agent->device->dev,
1348 "ib_process_mad_wc() not implemented yet\n");
1349 return 0;
1350 }
1351 EXPORT_SYMBOL(ib_process_mad_wc);
1352
1353 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1354 struct ib_mad_reg_req *mad_reg_req)
1355 {
1356 int i;
1357
1358 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1359 if ((*method)->agent[i]) {
1360 pr_err("Method %d already in use\n", i);
1361 return -EINVAL;
1362 }
1363 }
1364 return 0;
1365 }
1366
1367 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1368 {
1369 /* Allocate management method table */
1370 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1371 return (*method) ? 0 : (-ENOMEM);
1372 }
1373
1374 /*
1375 * Check to see if there are any methods still in use
1376 */
1377 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1378 {
1379 int i;
1380
1381 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1382 if (method->agent[i])
1383 return 1;
1384 return 0;
1385 }
1386
1387 /*
1388 * Check to see if there are any method tables for this class still in use
1389 */
1390 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1391 {
1392 int i;
1393
1394 for (i = 0; i < MAX_MGMT_CLASS; i++)
1395 if (class->method_table[i])
1396 return 1;
1397 return 0;
1398 }
1399
1400 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1401 {
1402 int i;
1403
1404 for (i = 0; i < MAX_MGMT_OUI; i++)
1405 if (vendor_class->method_table[i])
1406 return 1;
1407 return 0;
1408 }
1409
1410 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1411 const char *oui)
1412 {
1413 int i;
1414
1415 for (i = 0; i < MAX_MGMT_OUI; i++)
1416 /* Is there matching OUI for this vendor class ? */
1417 if (!memcmp(vendor_class->oui[i], oui, 3))
1418 return i;
1419
1420 return -1;
1421 }
1422
1423 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1424 {
1425 int i;
1426
1427 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1428 if (vendor->vendor_class[i])
1429 return 1;
1430
1431 return 0;
1432 }
1433
1434 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1435 struct ib_mad_agent_private *agent)
1436 {
1437 int i;
1438
1439 /* Remove any methods for this mad agent */
1440 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1441 if (method->agent[i] == agent) {
1442 method->agent[i] = NULL;
1443 }
1444 }
1445 }
1446
1447 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1448 struct ib_mad_agent_private *agent_priv,
1449 u8 mgmt_class)
1450 {
1451 struct ib_mad_port_private *port_priv;
1452 struct ib_mad_mgmt_class_table **class;
1453 struct ib_mad_mgmt_method_table **method;
1454 int i, ret;
1455
1456 port_priv = agent_priv->qp_info->port_priv;
1457 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1458 if (!*class) {
1459 /* Allocate management class table for "new" class version */
1460 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1461 if (!*class) {
1462 ret = -ENOMEM;
1463 goto error1;
1464 }
1465
1466 /* Allocate method table for this management class */
1467 method = &(*class)->method_table[mgmt_class];
1468 if ((ret = allocate_method_table(method)))
1469 goto error2;
1470 } else {
1471 method = &(*class)->method_table[mgmt_class];
1472 if (!*method) {
1473 /* Allocate method table for this management class */
1474 if ((ret = allocate_method_table(method)))
1475 goto error1;
1476 }
1477 }
1478
1479 /* Now, make sure methods are not already in use */
1480 if (method_in_use(method, mad_reg_req))
1481 goto error3;
1482
1483 /* Finally, add in methods being registered */
1484 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1485 (*method)->agent[i] = agent_priv;
1486
1487 return 0;
1488
1489 error3:
1490 /* Remove any methods for this mad agent */
1491 remove_methods_mad_agent(*method, agent_priv);
1492 /* Now, check to see if there are any methods in use */
1493 if (!check_method_table(*method)) {
1494 /* If not, release management method table */
1495 kfree(*method);
1496 *method = NULL;
1497 }
1498 ret = -EINVAL;
1499 goto error1;
1500 error2:
1501 kfree(*class);
1502 *class = NULL;
1503 error1:
1504 return ret;
1505 }
1506
1507 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1508 struct ib_mad_agent_private *agent_priv)
1509 {
1510 struct ib_mad_port_private *port_priv;
1511 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1512 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1513 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1514 struct ib_mad_mgmt_method_table **method;
1515 int i, ret = -ENOMEM;
1516 u8 vclass;
1517
1518 /* "New" vendor (with OUI) class */
1519 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1520 port_priv = agent_priv->qp_info->port_priv;
1521 vendor_table = &port_priv->version[
1522 mad_reg_req->mgmt_class_version].vendor;
1523 if (!*vendor_table) {
1524 /* Allocate mgmt vendor class table for "new" class version */
1525 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1526 if (!vendor)
1527 goto error1;
1528
1529 *vendor_table = vendor;
1530 }
1531 if (!(*vendor_table)->vendor_class[vclass]) {
1532 /* Allocate table for this management vendor class */
1533 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1534 if (!vendor_class)
1535 goto error2;
1536
1537 (*vendor_table)->vendor_class[vclass] = vendor_class;
1538 }
1539 for (i = 0; i < MAX_MGMT_OUI; i++) {
1540 /* Is there matching OUI for this vendor class ? */
1541 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1542 mad_reg_req->oui, 3)) {
1543 method = &(*vendor_table)->vendor_class[
1544 vclass]->method_table[i];
1545 if (!*method)
1546 goto error3;
1547 goto check_in_use;
1548 }
1549 }
1550 for (i = 0; i < MAX_MGMT_OUI; i++) {
1551 /* OUI slot available ? */
1552 if (!is_vendor_oui((*vendor_table)->vendor_class[
1553 vclass]->oui[i])) {
1554 method = &(*vendor_table)->vendor_class[
1555 vclass]->method_table[i];
1556 /* Allocate method table for this OUI */
1557 if (!*method) {
1558 ret = allocate_method_table(method);
1559 if (ret)
1560 goto error3;
1561 }
1562 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1563 mad_reg_req->oui, 3);
1564 goto check_in_use;
1565 }
1566 }
1567 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1568 goto error3;
1569
1570 check_in_use:
1571 /* Now, make sure methods are not already in use */
1572 if (method_in_use(method, mad_reg_req))
1573 goto error4;
1574
1575 /* Finally, add in methods being registered */
1576 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1577 (*method)->agent[i] = agent_priv;
1578
1579 return 0;
1580
1581 error4:
1582 /* Remove any methods for this mad agent */
1583 remove_methods_mad_agent(*method, agent_priv);
1584 /* Now, check to see if there are any methods in use */
1585 if (!check_method_table(*method)) {
1586 /* If not, release management method table */
1587 kfree(*method);
1588 *method = NULL;
1589 }
1590 ret = -EINVAL;
1591 error3:
1592 if (vendor_class) {
1593 (*vendor_table)->vendor_class[vclass] = NULL;
1594 kfree(vendor_class);
1595 }
1596 error2:
1597 if (vendor) {
1598 *vendor_table = NULL;
1599 kfree(vendor);
1600 }
1601 error1:
1602 return ret;
1603 }
1604
1605 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1606 {
1607 struct ib_mad_port_private *port_priv;
1608 struct ib_mad_mgmt_class_table *class;
1609 struct ib_mad_mgmt_method_table *method;
1610 struct ib_mad_mgmt_vendor_class_table *vendor;
1611 struct ib_mad_mgmt_vendor_class *vendor_class;
1612 int index;
1613 u8 mgmt_class;
1614
1615 /*
1616 * Was MAD registration request supplied
1617 * with original registration ?
1618 */
1619 if (!agent_priv->reg_req) {
1620 goto out;
1621 }
1622
1623 port_priv = agent_priv->qp_info->port_priv;
1624 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1625 class = port_priv->version[
1626 agent_priv->reg_req->mgmt_class_version].class;
1627 if (!class)
1628 goto vendor_check;
1629
1630 method = class->method_table[mgmt_class];
1631 if (method) {
1632 /* Remove any methods for this mad agent */
1633 remove_methods_mad_agent(method, agent_priv);
1634 /* Now, check to see if there are any methods still in use */
1635 if (!check_method_table(method)) {
1636 /* If not, release management method table */
1637 kfree(method);
1638 class->method_table[mgmt_class] = NULL;
1639 /* Any management classes left ? */
1640 if (!check_class_table(class)) {
1641 /* If not, release management class table */
1642 kfree(class);
1643 port_priv->version[
1644 agent_priv->reg_req->
1645 mgmt_class_version].class = NULL;
1646 }
1647 }
1648 }
1649
1650 vendor_check:
1651 if (!is_vendor_class(mgmt_class))
1652 goto out;
1653
1654 /* normalize mgmt_class to vendor range 2 */
1655 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1656 vendor = port_priv->version[
1657 agent_priv->reg_req->mgmt_class_version].vendor;
1658
1659 if (!vendor)
1660 goto out;
1661
1662 vendor_class = vendor->vendor_class[mgmt_class];
1663 if (vendor_class) {
1664 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1665 if (index < 0)
1666 goto out;
1667 method = vendor_class->method_table[index];
1668 if (method) {
1669 /* Remove any methods for this mad agent */
1670 remove_methods_mad_agent(method, agent_priv);
1671 /*
1672 * Now, check to see if there are
1673 * any methods still in use
1674 */
1675 if (!check_method_table(method)) {
1676 /* If not, release management method table */
1677 kfree(method);
1678 vendor_class->method_table[index] = NULL;
1679 memset(vendor_class->oui[index], 0, 3);
1680 /* Any OUIs left ? */
1681 if (!check_vendor_class(vendor_class)) {
1682 /* If not, release vendor class table */
1683 kfree(vendor_class);
1684 vendor->vendor_class[mgmt_class] = NULL;
1685 /* Any other vendor classes left ? */
1686 if (!check_vendor_table(vendor)) {
1687 kfree(vendor);
1688 port_priv->version[
1689 agent_priv->reg_req->
1690 mgmt_class_version].
1691 vendor = NULL;
1692 }
1693 }
1694 }
1695 }
1696 }
1697
1698 out:
1699 return;
1700 }
1701
1702 static struct ib_mad_agent_private *
1703 find_mad_agent(struct ib_mad_port_private *port_priv,
1704 const struct ib_mad_hdr *mad_hdr)
1705 {
1706 struct ib_mad_agent_private *mad_agent = NULL;
1707 unsigned long flags;
1708
1709 spin_lock_irqsave(&port_priv->reg_lock, flags);
1710 if (ib_response_mad(mad_hdr)) {
1711 u32 hi_tid;
1712 struct ib_mad_agent_private *entry;
1713
1714 /*
1715 * Routing is based on high 32 bits of transaction ID
1716 * of MAD.
1717 */
1718 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1719 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1720 if (entry->agent.hi_tid == hi_tid) {
1721 mad_agent = entry;
1722 break;
1723 }
1724 }
1725 } else {
1726 struct ib_mad_mgmt_class_table *class;
1727 struct ib_mad_mgmt_method_table *method;
1728 struct ib_mad_mgmt_vendor_class_table *vendor;
1729 struct ib_mad_mgmt_vendor_class *vendor_class;
1730 const struct ib_vendor_mad *vendor_mad;
1731 int index;
1732
1733 /*
1734 * Routing is based on version, class, and method
1735 * For "newer" vendor MADs, also based on OUI
1736 */
1737 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1738 goto out;
1739 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1740 class = port_priv->version[
1741 mad_hdr->class_version].class;
1742 if (!class)
1743 goto out;
1744 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1745 ARRAY_SIZE(class->method_table))
1746 goto out;
1747 method = class->method_table[convert_mgmt_class(
1748 mad_hdr->mgmt_class)];
1749 if (method)
1750 mad_agent = method->agent[mad_hdr->method &
1751 ~IB_MGMT_METHOD_RESP];
1752 } else {
1753 vendor = port_priv->version[
1754 mad_hdr->class_version].vendor;
1755 if (!vendor)
1756 goto out;
1757 vendor_class = vendor->vendor_class[vendor_class_index(
1758 mad_hdr->mgmt_class)];
1759 if (!vendor_class)
1760 goto out;
1761 /* Find matching OUI */
1762 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1763 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1764 if (index == -1)
1765 goto out;
1766 method = vendor_class->method_table[index];
1767 if (method) {
1768 mad_agent = method->agent[mad_hdr->method &
1769 ~IB_MGMT_METHOD_RESP];
1770 }
1771 }
1772 }
1773
1774 if (mad_agent) {
1775 if (mad_agent->agent.recv_handler)
1776 atomic_inc(&mad_agent->refcount);
1777 else {
1778 dev_notice(&port_priv->device->dev,
1779 "No receive handler for client %p on port %d\n",
1780 &mad_agent->agent, port_priv->port_num);
1781 mad_agent = NULL;
1782 }
1783 }
1784 out:
1785 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1786
1787 return mad_agent;
1788 }
1789
1790 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1791 const struct ib_mad_qp_info *qp_info,
1792 bool opa)
1793 {
1794 int valid = 0;
1795 u32 qp_num = qp_info->qp->qp_num;
1796
1797 /* Make sure MAD base version is understood */
1798 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1799 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1800 pr_err("MAD received with unsupported base version %d %s\n",
1801 mad_hdr->base_version, opa ? "(opa)" : "");
1802 goto out;
1803 }
1804
1805 /* Filter SMI packets sent to other than QP0 */
1806 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1807 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1808 if (qp_num == 0)
1809 valid = 1;
1810 } else {
1811 /* CM attributes other than ClassPortInfo only use Send method */
1812 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1813 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1814 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1815 goto out;
1816 /* Filter GSI packets sent to QP0 */
1817 if (qp_num != 0)
1818 valid = 1;
1819 }
1820
1821 out:
1822 return valid;
1823 }
1824
1825 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1826 const struct ib_mad_hdr *mad_hdr)
1827 {
1828 const struct ib_rmpp_mad *rmpp_mad;
1829
1830 rmpp_mad = (const struct ib_rmpp_mad *)mad_hdr;
1831 return !mad_agent_priv->agent.rmpp_version ||
1832 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1833 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1834 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1835 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1836 }
1837
1838 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1839 const struct ib_mad_recv_wc *rwc)
1840 {
1841 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1842 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1843 }
1844
1845 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1846 const struct ib_mad_send_wr_private *wr,
1847 const struct ib_mad_recv_wc *rwc )
1848 {
1849 struct ib_ah_attr attr;
1850 u8 send_resp, rcv_resp;
1851 union ib_gid sgid;
1852 struct ib_device *device = mad_agent_priv->agent.device;
1853 u8 port_num = mad_agent_priv->agent.port_num;
1854 u8 lmc;
1855
1856 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1857 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1858
1859 if (send_resp == rcv_resp)
1860 /* both requests, or both responses. GIDs different */
1861 return 0;
1862
1863 if (ib_query_ah(wr->send_buf.ah, &attr))
1864 /* Assume not equal, to avoid false positives. */
1865 return 0;
1866
1867 if (!!(attr.ah_flags & IB_AH_GRH) !=
1868 !!(rwc->wc->wc_flags & IB_WC_GRH))
1869 /* one has GID, other does not. Assume different */
1870 return 0;
1871
1872 if (!send_resp && rcv_resp) {
1873 /* is request/response. */
1874 if (!(attr.ah_flags & IB_AH_GRH)) {
1875 if (ib_get_cached_lmc(device, port_num, &lmc))
1876 return 0;
1877 return (!lmc || !((attr.src_path_bits ^
1878 rwc->wc->dlid_path_bits) &
1879 ((1 << lmc) - 1)));
1880 } else {
1881 if (ib_get_cached_gid(device, port_num,
1882 attr.grh.sgid_index, &sgid, NULL))
1883 return 0;
1884 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1885 16);
1886 }
1887 }
1888
1889 if (!(attr.ah_flags & IB_AH_GRH))
1890 return attr.dlid == rwc->wc->slid;
1891 else
1892 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1893 16);
1894 }
1895
1896 static inline int is_direct(u8 class)
1897 {
1898 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1899 }
1900
1901 struct ib_mad_send_wr_private*
1902 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1903 const struct ib_mad_recv_wc *wc)
1904 {
1905 struct ib_mad_send_wr_private *wr;
1906 const struct ib_mad_hdr *mad_hdr;
1907
1908 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1909
1910 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1911 if ((wr->tid == mad_hdr->tid) &&
1912 rcv_has_same_class(wr, wc) &&
1913 /*
1914 * Don't check GID for direct routed MADs.
1915 * These might have permissive LIDs.
1916 */
1917 (is_direct(mad_hdr->mgmt_class) ||
1918 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1919 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1920 }
1921
1922 /*
1923 * It's possible to receive the response before we've
1924 * been notified that the send has completed
1925 */
1926 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1927 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1928 wr->tid == mad_hdr->tid &&
1929 wr->timeout &&
1930 rcv_has_same_class(wr, wc) &&
1931 /*
1932 * Don't check GID for direct routed MADs.
1933 * These might have permissive LIDs.
1934 */
1935 (is_direct(mad_hdr->mgmt_class) ||
1936 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1937 /* Verify request has not been canceled */
1938 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1939 }
1940 return NULL;
1941 }
1942
1943 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1944 {
1945 mad_send_wr->timeout = 0;
1946 if (mad_send_wr->refcount == 1)
1947 list_move_tail(&mad_send_wr->agent_list,
1948 &mad_send_wr->mad_agent_priv->done_list);
1949 }
1950
1951 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1952 struct ib_mad_recv_wc *mad_recv_wc)
1953 {
1954 struct ib_mad_send_wr_private *mad_send_wr;
1955 struct ib_mad_send_wc mad_send_wc;
1956 unsigned long flags;
1957
1958 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1959 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1960 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1961 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1962 mad_recv_wc);
1963 if (!mad_recv_wc) {
1964 deref_mad_agent(mad_agent_priv);
1965 return;
1966 }
1967 }
1968
1969 /* Complete corresponding request */
1970 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1971 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1972 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1973 if (!mad_send_wr) {
1974 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1975 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1976 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1977 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1978 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1979 /* user rmpp is in effect
1980 * and this is an active RMPP MAD
1981 */
1982 mad_agent_priv->agent.recv_handler(
1983 &mad_agent_priv->agent, NULL,
1984 mad_recv_wc);
1985 atomic_dec(&mad_agent_priv->refcount);
1986 } else {
1987 /* not user rmpp, revert to normal behavior and
1988 * drop the mad */
1989 ib_free_recv_mad(mad_recv_wc);
1990 deref_mad_agent(mad_agent_priv);
1991 return;
1992 }
1993 } else {
1994 ib_mark_mad_done(mad_send_wr);
1995 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1996
1997 /* Defined behavior is to complete response before request */
1998 mad_agent_priv->agent.recv_handler(
1999 &mad_agent_priv->agent,
2000 &mad_send_wr->send_buf,
2001 mad_recv_wc);
2002 atomic_dec(&mad_agent_priv->refcount);
2003
2004 mad_send_wc.status = IB_WC_SUCCESS;
2005 mad_send_wc.vendor_err = 0;
2006 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2007 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2008 }
2009 } else {
2010 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2011 mad_recv_wc);
2012 deref_mad_agent(mad_agent_priv);
2013 }
2014 }
2015
2016 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2017 const struct ib_mad_qp_info *qp_info,
2018 const struct ib_wc *wc,
2019 int port_num,
2020 struct ib_mad_private *recv,
2021 struct ib_mad_private *response)
2022 {
2023 enum smi_forward_action retsmi;
2024 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2025
2026 if (smi_handle_dr_smp_recv(smp,
2027 rdma_cap_ib_switch(port_priv->device),
2028 port_num,
2029 port_priv->device->phys_port_cnt) ==
2030 IB_SMI_DISCARD)
2031 return IB_SMI_DISCARD;
2032
2033 retsmi = smi_check_forward_dr_smp(smp);
2034 if (retsmi == IB_SMI_LOCAL)
2035 return IB_SMI_HANDLE;
2036
2037 if (retsmi == IB_SMI_SEND) { /* don't forward */
2038 if (smi_handle_dr_smp_send(smp,
2039 rdma_cap_ib_switch(port_priv->device),
2040 port_num) == IB_SMI_DISCARD)
2041 return IB_SMI_DISCARD;
2042
2043 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2044 return IB_SMI_DISCARD;
2045 } else if (rdma_cap_ib_switch(port_priv->device)) {
2046 /* forward case for switches */
2047 memcpy(response, recv, mad_priv_size(response));
2048 response->header.recv_wc.wc = &response->header.wc;
2049 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2050 response->header.recv_wc.recv_buf.grh = &response->grh;
2051
2052 agent_send_response((const struct ib_mad_hdr *)response->mad,
2053 &response->grh, wc,
2054 port_priv->device,
2055 smi_get_fwd_port(smp),
2056 qp_info->qp->qp_num,
2057 response->mad_size,
2058 false);
2059
2060 return IB_SMI_DISCARD;
2061 }
2062 return IB_SMI_HANDLE;
2063 }
2064
2065 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2066 struct ib_mad_private *response,
2067 size_t *resp_len, bool opa)
2068 {
2069 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2070 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2071
2072 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2073 recv_hdr->method == IB_MGMT_METHOD_SET) {
2074 memcpy(response, recv, mad_priv_size(response));
2075 response->header.recv_wc.wc = &response->header.wc;
2076 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2077 response->header.recv_wc.recv_buf.grh = &response->grh;
2078 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2079 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2080 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2081 resp_hdr->status |= IB_SMP_DIRECTION;
2082
2083 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2084 if (recv_hdr->mgmt_class ==
2085 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2086 recv_hdr->mgmt_class ==
2087 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2088 *resp_len = opa_get_smp_header_size(
2089 (const struct opa_smp *)recv->mad);
2090 else
2091 *resp_len = sizeof(struct ib_mad_hdr);
2092 }
2093
2094 return true;
2095 } else {
2096 return false;
2097 }
2098 }
2099
2100 static enum smi_action
2101 handle_opa_smi(struct ib_mad_port_private *port_priv,
2102 struct ib_mad_qp_info *qp_info,
2103 struct ib_wc *wc,
2104 int port_num,
2105 struct ib_mad_private *recv,
2106 struct ib_mad_private *response)
2107 {
2108 enum smi_forward_action retsmi;
2109 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2110
2111 if (opa_smi_handle_dr_smp_recv(smp,
2112 rdma_cap_ib_switch(port_priv->device),
2113 port_num,
2114 port_priv->device->phys_port_cnt) ==
2115 IB_SMI_DISCARD)
2116 return IB_SMI_DISCARD;
2117
2118 retsmi = opa_smi_check_forward_dr_smp(smp);
2119 if (retsmi == IB_SMI_LOCAL)
2120 return IB_SMI_HANDLE;
2121
2122 if (retsmi == IB_SMI_SEND) { /* don't forward */
2123 if (opa_smi_handle_dr_smp_send(smp,
2124 rdma_cap_ib_switch(port_priv->device),
2125 port_num) == IB_SMI_DISCARD)
2126 return IB_SMI_DISCARD;
2127
2128 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2129 IB_SMI_DISCARD)
2130 return IB_SMI_DISCARD;
2131
2132 } else if (rdma_cap_ib_switch(port_priv->device)) {
2133 /* forward case for switches */
2134 memcpy(response, recv, mad_priv_size(response));
2135 response->header.recv_wc.wc = &response->header.wc;
2136 response->header.recv_wc.recv_buf.opa_mad =
2137 (struct opa_mad *)response->mad;
2138 response->header.recv_wc.recv_buf.grh = &response->grh;
2139
2140 agent_send_response((const struct ib_mad_hdr *)response->mad,
2141 &response->grh, wc,
2142 port_priv->device,
2143 opa_smi_get_fwd_port(smp),
2144 qp_info->qp->qp_num,
2145 recv->header.wc.byte_len,
2146 true);
2147
2148 return IB_SMI_DISCARD;
2149 }
2150
2151 return IB_SMI_HANDLE;
2152 }
2153
2154 static enum smi_action
2155 handle_smi(struct ib_mad_port_private *port_priv,
2156 struct ib_mad_qp_info *qp_info,
2157 struct ib_wc *wc,
2158 int port_num,
2159 struct ib_mad_private *recv,
2160 struct ib_mad_private *response,
2161 bool opa)
2162 {
2163 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2164
2165 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2166 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2167 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2168 response);
2169
2170 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2171 }
2172
2173 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2174 {
2175 struct ib_mad_port_private *port_priv = cq->cq_context;
2176 struct ib_mad_list_head *mad_list =
2177 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2178 struct ib_mad_qp_info *qp_info;
2179 struct ib_mad_private_header *mad_priv_hdr;
2180 struct ib_mad_private *recv, *response = NULL;
2181 struct ib_mad_agent_private *mad_agent;
2182 int port_num;
2183 int ret = IB_MAD_RESULT_SUCCESS;
2184 size_t mad_size;
2185 u16 resp_mad_pkey_index = 0;
2186 bool opa;
2187
2188 if (list_empty_careful(&port_priv->port_list))
2189 return;
2190
2191 if (wc->status != IB_WC_SUCCESS) {
2192 /*
2193 * Receive errors indicate that the QP has entered the error
2194 * state - error handling/shutdown code will cleanup
2195 */
2196 return;
2197 }
2198
2199 qp_info = mad_list->mad_queue->qp_info;
2200 dequeue_mad(mad_list);
2201
2202 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2203 qp_info->port_priv->port_num);
2204
2205 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2206 mad_list);
2207 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2208 ib_dma_unmap_single(port_priv->device,
2209 recv->header.mapping,
2210 mad_priv_dma_size(recv),
2211 DMA_FROM_DEVICE);
2212
2213 /* Setup MAD receive work completion from "normal" work completion */
2214 recv->header.wc = *wc;
2215 recv->header.recv_wc.wc = &recv->header.wc;
2216
2217 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2218 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2219 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2220 } else {
2221 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2222 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2223 }
2224
2225 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2226 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2227
2228 if (atomic_read(&qp_info->snoop_count))
2229 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2230
2231 /* Validate MAD */
2232 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2233 goto out;
2234
2235 mad_size = recv->mad_size;
2236 response = alloc_mad_private(mad_size, GFP_KERNEL);
2237 if (!response)
2238 goto out;
2239
2240 if (rdma_cap_ib_switch(port_priv->device))
2241 port_num = wc->port_num;
2242 else
2243 port_num = port_priv->port_num;
2244
2245 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2246 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2247 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2248 response, opa)
2249 == IB_SMI_DISCARD)
2250 goto out;
2251 }
2252
2253 /* Give driver "right of first refusal" on incoming MAD */
2254 if (port_priv->device->process_mad) {
2255 ret = port_priv->device->process_mad(port_priv->device, 0,
2256 port_priv->port_num,
2257 wc, &recv->grh,
2258 (const struct ib_mad_hdr *)recv->mad,
2259 recv->mad_size,
2260 (struct ib_mad_hdr *)response->mad,
2261 &mad_size, &resp_mad_pkey_index);
2262
2263 if (opa)
2264 wc->pkey_index = resp_mad_pkey_index;
2265
2266 if (ret & IB_MAD_RESULT_SUCCESS) {
2267 if (ret & IB_MAD_RESULT_CONSUMED)
2268 goto out;
2269 if (ret & IB_MAD_RESULT_REPLY) {
2270 agent_send_response((const struct ib_mad_hdr *)response->mad,
2271 &recv->grh, wc,
2272 port_priv->device,
2273 port_num,
2274 qp_info->qp->qp_num,
2275 mad_size, opa);
2276 goto out;
2277 }
2278 }
2279 }
2280
2281 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2282 if (mad_agent) {
2283 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2284 /*
2285 * recv is freed up in error cases in ib_mad_complete_recv
2286 * or via recv_handler in ib_mad_complete_recv()
2287 */
2288 recv = NULL;
2289 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2290 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2291 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2292 port_priv->device, port_num,
2293 qp_info->qp->qp_num, mad_size, opa);
2294 }
2295
2296 out:
2297 /* Post another receive request for this QP */
2298 if (response) {
2299 ib_mad_post_receive_mads(qp_info, response);
2300 kfree(recv);
2301 } else
2302 ib_mad_post_receive_mads(qp_info, recv);
2303 }
2304
2305 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2306 {
2307 struct ib_mad_send_wr_private *mad_send_wr;
2308 unsigned long delay;
2309
2310 if (list_empty(&mad_agent_priv->wait_list)) {
2311 cancel_delayed_work(&mad_agent_priv->timed_work);
2312 } else {
2313 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2314 struct ib_mad_send_wr_private,
2315 agent_list);
2316
2317 if (time_after(mad_agent_priv->timeout,
2318 mad_send_wr->timeout)) {
2319 mad_agent_priv->timeout = mad_send_wr->timeout;
2320 delay = mad_send_wr->timeout - jiffies;
2321 if ((long)delay <= 0)
2322 delay = 1;
2323 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2324 &mad_agent_priv->timed_work, delay);
2325 }
2326 }
2327 }
2328
2329 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2330 {
2331 struct ib_mad_agent_private *mad_agent_priv;
2332 struct ib_mad_send_wr_private *temp_mad_send_wr;
2333 struct list_head *list_item;
2334 unsigned long delay;
2335
2336 mad_agent_priv = mad_send_wr->mad_agent_priv;
2337 list_del(&mad_send_wr->agent_list);
2338
2339 delay = mad_send_wr->timeout;
2340 mad_send_wr->timeout += jiffies;
2341
2342 if (delay) {
2343 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2344 temp_mad_send_wr = list_entry(list_item,
2345 struct ib_mad_send_wr_private,
2346 agent_list);
2347 if (time_after(mad_send_wr->timeout,
2348 temp_mad_send_wr->timeout))
2349 break;
2350 }
2351 }
2352 else
2353 list_item = &mad_agent_priv->wait_list;
2354 list_add(&mad_send_wr->agent_list, list_item);
2355
2356 /* Reschedule a work item if we have a shorter timeout */
2357 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2358 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2359 &mad_agent_priv->timed_work, delay);
2360 }
2361
2362 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2363 int timeout_ms)
2364 {
2365 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2366 wait_for_response(mad_send_wr);
2367 }
2368
2369 /*
2370 * Process a send work completion
2371 */
2372 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2373 struct ib_mad_send_wc *mad_send_wc)
2374 {
2375 struct ib_mad_agent_private *mad_agent_priv;
2376 unsigned long flags;
2377 int ret;
2378
2379 mad_agent_priv = mad_send_wr->mad_agent_priv;
2380 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2381 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2382 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2383 if (ret == IB_RMPP_RESULT_CONSUMED)
2384 goto done;
2385 } else
2386 ret = IB_RMPP_RESULT_UNHANDLED;
2387
2388 if (mad_send_wc->status != IB_WC_SUCCESS &&
2389 mad_send_wr->status == IB_WC_SUCCESS) {
2390 mad_send_wr->status = mad_send_wc->status;
2391 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2392 }
2393
2394 if (--mad_send_wr->refcount > 0) {
2395 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2396 mad_send_wr->status == IB_WC_SUCCESS) {
2397 wait_for_response(mad_send_wr);
2398 }
2399 goto done;
2400 }
2401
2402 /* Remove send from MAD agent and notify client of completion */
2403 list_del(&mad_send_wr->agent_list);
2404 adjust_timeout(mad_agent_priv);
2405 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2406
2407 if (mad_send_wr->status != IB_WC_SUCCESS )
2408 mad_send_wc->status = mad_send_wr->status;
2409 if (ret == IB_RMPP_RESULT_INTERNAL)
2410 ib_rmpp_send_handler(mad_send_wc);
2411 else
2412 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2413 mad_send_wc);
2414
2415 /* Release reference on agent taken when sending */
2416 deref_mad_agent(mad_agent_priv);
2417 return;
2418 done:
2419 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2420 }
2421
2422 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2423 {
2424 struct ib_mad_port_private *port_priv = cq->cq_context;
2425 struct ib_mad_list_head *mad_list =
2426 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2427 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2428 struct ib_mad_qp_info *qp_info;
2429 struct ib_mad_queue *send_queue;
2430 const struct ib_send_wr *bad_send_wr;
2431 struct ib_mad_send_wc mad_send_wc;
2432 unsigned long flags;
2433 int ret;
2434
2435 if (list_empty_careful(&port_priv->port_list))
2436 return;
2437
2438 if (wc->status != IB_WC_SUCCESS) {
2439 if (!ib_mad_send_error(port_priv, wc))
2440 return;
2441 }
2442
2443 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2444 mad_list);
2445 send_queue = mad_list->mad_queue;
2446 qp_info = send_queue->qp_info;
2447
2448 retry:
2449 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2450 mad_send_wr->header_mapping,
2451 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2452 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2453 mad_send_wr->payload_mapping,
2454 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2455 queued_send_wr = NULL;
2456 spin_lock_irqsave(&send_queue->lock, flags);
2457 list_del(&mad_list->list);
2458
2459 /* Move queued send to the send queue */
2460 if (send_queue->count-- > send_queue->max_active) {
2461 mad_list = container_of(qp_info->overflow_list.next,
2462 struct ib_mad_list_head, list);
2463 queued_send_wr = container_of(mad_list,
2464 struct ib_mad_send_wr_private,
2465 mad_list);
2466 list_move_tail(&mad_list->list, &send_queue->list);
2467 }
2468 spin_unlock_irqrestore(&send_queue->lock, flags);
2469
2470 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2471 mad_send_wc.status = wc->status;
2472 mad_send_wc.vendor_err = wc->vendor_err;
2473 if (atomic_read(&qp_info->snoop_count))
2474 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2475 IB_MAD_SNOOP_SEND_COMPLETIONS);
2476 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2477
2478 if (queued_send_wr) {
2479 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2480 &bad_send_wr);
2481 if (ret) {
2482 dev_err(&port_priv->device->dev,
2483 "ib_post_send failed: %d\n", ret);
2484 mad_send_wr = queued_send_wr;
2485 wc->status = IB_WC_LOC_QP_OP_ERR;
2486 goto retry;
2487 }
2488 }
2489 }
2490
2491 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2492 {
2493 struct ib_mad_send_wr_private *mad_send_wr;
2494 struct ib_mad_list_head *mad_list;
2495 unsigned long flags;
2496
2497 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2498 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2499 mad_send_wr = container_of(mad_list,
2500 struct ib_mad_send_wr_private,
2501 mad_list);
2502 mad_send_wr->retry = 1;
2503 }
2504 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2505 }
2506
2507 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2508 struct ib_wc *wc)
2509 {
2510 struct ib_mad_list_head *mad_list =
2511 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2512 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2513 struct ib_mad_send_wr_private *mad_send_wr;
2514 int ret;
2515
2516 /*
2517 * Send errors will transition the QP to SQE - move
2518 * QP to RTS and repost flushed work requests
2519 */
2520 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2521 mad_list);
2522 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2523 if (mad_send_wr->retry) {
2524 /* Repost send */
2525 const struct ib_send_wr *bad_send_wr;
2526
2527 mad_send_wr->retry = 0;
2528 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2529 &bad_send_wr);
2530 if (!ret)
2531 return false;
2532 }
2533 } else {
2534 struct ib_qp_attr *attr;
2535
2536 /* Transition QP to RTS and fail offending send */
2537 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2538 if (attr) {
2539 attr->qp_state = IB_QPS_RTS;
2540 attr->cur_qp_state = IB_QPS_SQE;
2541 ret = ib_modify_qp(qp_info->qp, attr,
2542 IB_QP_STATE | IB_QP_CUR_STATE);
2543 kfree(attr);
2544 if (ret)
2545 dev_err(&port_priv->device->dev,
2546 "%s - ib_modify_qp to RTS: %d\n",
2547 __func__, ret);
2548 else
2549 mark_sends_for_retry(qp_info);
2550 }
2551 }
2552
2553 return true;
2554 }
2555
2556 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2557 {
2558 unsigned long flags;
2559 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2560 struct ib_mad_send_wc mad_send_wc;
2561 struct list_head cancel_list;
2562
2563 INIT_LIST_HEAD(&cancel_list);
2564
2565 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2566 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2567 &mad_agent_priv->send_list, agent_list) {
2568 if (mad_send_wr->status == IB_WC_SUCCESS) {
2569 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2570 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2571 }
2572 }
2573
2574 /* Empty wait list to prevent receives from finding a request */
2575 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2576 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2577
2578 /* Report all cancelled requests */
2579 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2580 mad_send_wc.vendor_err = 0;
2581
2582 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2583 &cancel_list, agent_list) {
2584 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2585 list_del(&mad_send_wr->agent_list);
2586 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2587 &mad_send_wc);
2588 atomic_dec(&mad_agent_priv->refcount);
2589 }
2590 }
2591
2592 static struct ib_mad_send_wr_private*
2593 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2594 struct ib_mad_send_buf *send_buf)
2595 {
2596 struct ib_mad_send_wr_private *mad_send_wr;
2597
2598 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2599 agent_list) {
2600 if (&mad_send_wr->send_buf == send_buf)
2601 return mad_send_wr;
2602 }
2603
2604 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2605 agent_list) {
2606 if (is_rmpp_data_mad(mad_agent_priv,
2607 mad_send_wr->send_buf.mad) &&
2608 &mad_send_wr->send_buf == send_buf)
2609 return mad_send_wr;
2610 }
2611 return NULL;
2612 }
2613
2614 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2615 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2616 {
2617 struct ib_mad_agent_private *mad_agent_priv;
2618 struct ib_mad_send_wr_private *mad_send_wr;
2619 unsigned long flags;
2620 int active;
2621
2622 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2623 agent);
2624 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2625 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2626 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2627 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2628 return -EINVAL;
2629 }
2630
2631 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2632 if (!timeout_ms) {
2633 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2634 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2635 }
2636
2637 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2638 if (active)
2639 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2640 else
2641 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2642
2643 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2644 return 0;
2645 }
2646 EXPORT_SYMBOL(ib_modify_mad);
2647
2648 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2649 struct ib_mad_send_buf *send_buf)
2650 {
2651 ib_modify_mad(mad_agent, send_buf, 0);
2652 }
2653 EXPORT_SYMBOL(ib_cancel_mad);
2654
2655 static void local_completions(struct work_struct *work)
2656 {
2657 struct ib_mad_agent_private *mad_agent_priv;
2658 struct ib_mad_local_private *local;
2659 struct ib_mad_agent_private *recv_mad_agent;
2660 unsigned long flags;
2661 int free_mad;
2662 struct ib_wc wc;
2663 struct ib_mad_send_wc mad_send_wc;
2664 bool opa;
2665
2666 mad_agent_priv =
2667 container_of(work, struct ib_mad_agent_private, local_work);
2668
2669 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2670 mad_agent_priv->qp_info->port_priv->port_num);
2671
2672 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2673 while (!list_empty(&mad_agent_priv->local_list)) {
2674 local = list_entry(mad_agent_priv->local_list.next,
2675 struct ib_mad_local_private,
2676 completion_list);
2677 list_del(&local->completion_list);
2678 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2679 free_mad = 0;
2680 if (local->mad_priv) {
2681 u8 base_version;
2682 recv_mad_agent = local->recv_mad_agent;
2683 if (!recv_mad_agent) {
2684 dev_err(&mad_agent_priv->agent.device->dev,
2685 "No receive MAD agent for local completion\n");
2686 free_mad = 1;
2687 goto local_send_completion;
2688 }
2689
2690 /*
2691 * Defined behavior is to complete response
2692 * before request
2693 */
2694 build_smp_wc(recv_mad_agent->agent.qp,
2695 local->mad_send_wr->send_wr.wr.wr_cqe,
2696 be16_to_cpu(IB_LID_PERMISSIVE),
2697 local->mad_send_wr->send_wr.pkey_index,
2698 recv_mad_agent->agent.port_num, &wc);
2699
2700 local->mad_priv->header.recv_wc.wc = &wc;
2701
2702 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2703 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2704 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2705 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2706 } else {
2707 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2708 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2709 }
2710
2711 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2712 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2713 &local->mad_priv->header.recv_wc.rmpp_list);
2714 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2715 local->mad_priv->header.recv_wc.recv_buf.mad =
2716 (struct ib_mad *)local->mad_priv->mad;
2717 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2718 snoop_recv(recv_mad_agent->qp_info,
2719 &local->mad_priv->header.recv_wc,
2720 IB_MAD_SNOOP_RECVS);
2721 recv_mad_agent->agent.recv_handler(
2722 &recv_mad_agent->agent,
2723 &local->mad_send_wr->send_buf,
2724 &local->mad_priv->header.recv_wc);
2725 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2726 atomic_dec(&recv_mad_agent->refcount);
2727 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2728 }
2729
2730 local_send_completion:
2731 /* Complete send */
2732 mad_send_wc.status = IB_WC_SUCCESS;
2733 mad_send_wc.vendor_err = 0;
2734 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2735 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2736 snoop_send(mad_agent_priv->qp_info,
2737 &local->mad_send_wr->send_buf,
2738 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2739 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2740 &mad_send_wc);
2741
2742 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2743 atomic_dec(&mad_agent_priv->refcount);
2744 if (free_mad)
2745 kfree(local->mad_priv);
2746 kfree(local);
2747 }
2748 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2749 }
2750
2751 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2752 {
2753 int ret;
2754
2755 if (!mad_send_wr->retries_left)
2756 return -ETIMEDOUT;
2757
2758 mad_send_wr->retries_left--;
2759 mad_send_wr->send_buf.retries++;
2760
2761 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2762
2763 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2764 ret = ib_retry_rmpp(mad_send_wr);
2765 switch (ret) {
2766 case IB_RMPP_RESULT_UNHANDLED:
2767 ret = ib_send_mad(mad_send_wr);
2768 break;
2769 case IB_RMPP_RESULT_CONSUMED:
2770 ret = 0;
2771 break;
2772 default:
2773 ret = -ECOMM;
2774 break;
2775 }
2776 } else
2777 ret = ib_send_mad(mad_send_wr);
2778
2779 if (!ret) {
2780 mad_send_wr->refcount++;
2781 list_add_tail(&mad_send_wr->agent_list,
2782 &mad_send_wr->mad_agent_priv->send_list);
2783 }
2784 return ret;
2785 }
2786
2787 static void timeout_sends(struct work_struct *work)
2788 {
2789 struct ib_mad_agent_private *mad_agent_priv;
2790 struct ib_mad_send_wr_private *mad_send_wr;
2791 struct ib_mad_send_wc mad_send_wc;
2792 unsigned long flags, delay;
2793
2794 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2795 timed_work.work);
2796 mad_send_wc.vendor_err = 0;
2797
2798 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2799 while (!list_empty(&mad_agent_priv->wait_list)) {
2800 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2801 struct ib_mad_send_wr_private,
2802 agent_list);
2803
2804 if (time_after(mad_send_wr->timeout, jiffies)) {
2805 delay = mad_send_wr->timeout - jiffies;
2806 if ((long)delay <= 0)
2807 delay = 1;
2808 queue_delayed_work(mad_agent_priv->qp_info->
2809 port_priv->wq,
2810 &mad_agent_priv->timed_work, delay);
2811 break;
2812 }
2813
2814 list_del(&mad_send_wr->agent_list);
2815 if (mad_send_wr->status == IB_WC_SUCCESS &&
2816 !retry_send(mad_send_wr))
2817 continue;
2818
2819 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2820
2821 if (mad_send_wr->status == IB_WC_SUCCESS)
2822 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2823 else
2824 mad_send_wc.status = mad_send_wr->status;
2825 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2826 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2827 &mad_send_wc);
2828
2829 atomic_dec(&mad_agent_priv->refcount);
2830 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2831 }
2832 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2833 }
2834
2835 /*
2836 * Allocate receive MADs and post receive WRs for them
2837 */
2838 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2839 struct ib_mad_private *mad)
2840 {
2841 unsigned long flags;
2842 int post, ret;
2843 struct ib_mad_private *mad_priv;
2844 struct ib_sge sg_list;
2845 struct ib_recv_wr recv_wr;
2846 const struct ib_recv_wr *bad_recv_wr;
2847 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2848
2849 /* Initialize common scatter list fields */
2850 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2851
2852 /* Initialize common receive WR fields */
2853 recv_wr.next = NULL;
2854 recv_wr.sg_list = &sg_list;
2855 recv_wr.num_sge = 1;
2856
2857 do {
2858 /* Allocate and map receive buffer */
2859 if (mad) {
2860 mad_priv = mad;
2861 mad = NULL;
2862 } else {
2863 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2864 GFP_ATOMIC);
2865 if (!mad_priv) {
2866 ret = -ENOMEM;
2867 break;
2868 }
2869 }
2870 sg_list.length = mad_priv_dma_size(mad_priv);
2871 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2872 &mad_priv->grh,
2873 mad_priv_dma_size(mad_priv),
2874 DMA_FROM_DEVICE);
2875 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2876 sg_list.addr))) {
2877 kfree(mad_priv);
2878 ret = -ENOMEM;
2879 break;
2880 }
2881 mad_priv->header.mapping = sg_list.addr;
2882 mad_priv->header.mad_list.mad_queue = recv_queue;
2883 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2884 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2885
2886 /* Post receive WR */
2887 spin_lock_irqsave(&recv_queue->lock, flags);
2888 post = (++recv_queue->count < recv_queue->max_active);
2889 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2890 spin_unlock_irqrestore(&recv_queue->lock, flags);
2891 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2892 if (ret) {
2893 spin_lock_irqsave(&recv_queue->lock, flags);
2894 list_del(&mad_priv->header.mad_list.list);
2895 recv_queue->count--;
2896 spin_unlock_irqrestore(&recv_queue->lock, flags);
2897 ib_dma_unmap_single(qp_info->port_priv->device,
2898 mad_priv->header.mapping,
2899 mad_priv_dma_size(mad_priv),
2900 DMA_FROM_DEVICE);
2901 kfree(mad_priv);
2902 dev_err(&qp_info->port_priv->device->dev,
2903 "ib_post_recv failed: %d\n", ret);
2904 break;
2905 }
2906 } while (post);
2907
2908 return ret;
2909 }
2910
2911 /*
2912 * Return all the posted receive MADs
2913 */
2914 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2915 {
2916 struct ib_mad_private_header *mad_priv_hdr;
2917 struct ib_mad_private *recv;
2918 struct ib_mad_list_head *mad_list;
2919
2920 if (!qp_info->qp)
2921 return;
2922
2923 while (!list_empty(&qp_info->recv_queue.list)) {
2924
2925 mad_list = list_entry(qp_info->recv_queue.list.next,
2926 struct ib_mad_list_head, list);
2927 mad_priv_hdr = container_of(mad_list,
2928 struct ib_mad_private_header,
2929 mad_list);
2930 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2931 header);
2932
2933 /* Remove from posted receive MAD list */
2934 list_del(&mad_list->list);
2935
2936 ib_dma_unmap_single(qp_info->port_priv->device,
2937 recv->header.mapping,
2938 mad_priv_dma_size(recv),
2939 DMA_FROM_DEVICE);
2940 kfree(recv);
2941 }
2942
2943 qp_info->recv_queue.count = 0;
2944 }
2945
2946 /*
2947 * Start the port
2948 */
2949 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2950 {
2951 int ret, i;
2952 struct ib_qp_attr *attr;
2953 struct ib_qp *qp;
2954 u16 pkey_index;
2955
2956 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2957 if (!attr)
2958 return -ENOMEM;
2959
2960 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2961 IB_DEFAULT_PKEY_FULL, &pkey_index);
2962 if (ret)
2963 pkey_index = 0;
2964
2965 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2966 qp = port_priv->qp_info[i].qp;
2967 if (!qp)
2968 continue;
2969
2970 /*
2971 * PKey index for QP1 is irrelevant but
2972 * one is needed for the Reset to Init transition
2973 */
2974 attr->qp_state = IB_QPS_INIT;
2975 attr->pkey_index = pkey_index;
2976 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2977 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2978 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2979 if (ret) {
2980 dev_err(&port_priv->device->dev,
2981 "Couldn't change QP%d state to INIT: %d\n",
2982 i, ret);
2983 goto out;
2984 }
2985
2986 attr->qp_state = IB_QPS_RTR;
2987 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2988 if (ret) {
2989 dev_err(&port_priv->device->dev,
2990 "Couldn't change QP%d state to RTR: %d\n",
2991 i, ret);
2992 goto out;
2993 }
2994
2995 attr->qp_state = IB_QPS_RTS;
2996 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2997 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2998 if (ret) {
2999 dev_err(&port_priv->device->dev,
3000 "Couldn't change QP%d state to RTS: %d\n",
3001 i, ret);
3002 goto out;
3003 }
3004 }
3005
3006 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3007 if (ret) {
3008 dev_err(&port_priv->device->dev,
3009 "Failed to request completion notification: %d\n",
3010 ret);
3011 goto out;
3012 }
3013
3014 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3015 if (!port_priv->qp_info[i].qp)
3016 continue;
3017
3018 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3019 if (ret) {
3020 dev_err(&port_priv->device->dev,
3021 "Couldn't post receive WRs\n");
3022 goto out;
3023 }
3024 }
3025 out:
3026 kfree(attr);
3027 return ret;
3028 }
3029
3030 static void qp_event_handler(struct ib_event *event, void *qp_context)
3031 {
3032 struct ib_mad_qp_info *qp_info = qp_context;
3033
3034 /* It's worse than that! He's dead, Jim! */
3035 dev_err(&qp_info->port_priv->device->dev,
3036 "Fatal error (%d) on MAD QP (%d)\n",
3037 event->event, qp_info->qp->qp_num);
3038 }
3039
3040 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3041 struct ib_mad_queue *mad_queue)
3042 {
3043 mad_queue->qp_info = qp_info;
3044 mad_queue->count = 0;
3045 spin_lock_init(&mad_queue->lock);
3046 INIT_LIST_HEAD(&mad_queue->list);
3047 }
3048
3049 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3050 struct ib_mad_qp_info *qp_info)
3051 {
3052 qp_info->port_priv = port_priv;
3053 init_mad_queue(qp_info, &qp_info->send_queue);
3054 init_mad_queue(qp_info, &qp_info->recv_queue);
3055 INIT_LIST_HEAD(&qp_info->overflow_list);
3056 spin_lock_init(&qp_info->snoop_lock);
3057 qp_info->snoop_table = NULL;
3058 qp_info->snoop_table_size = 0;
3059 atomic_set(&qp_info->snoop_count, 0);
3060 }
3061
3062 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3063 enum ib_qp_type qp_type)
3064 {
3065 struct ib_qp_init_attr qp_init_attr;
3066 int ret;
3067
3068 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3069 qp_init_attr.send_cq = qp_info->port_priv->cq;
3070 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3071 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3072 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3073 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3074 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3075 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3076 qp_init_attr.qp_type = qp_type;
3077 qp_init_attr.port_num = qp_info->port_priv->port_num;
3078 qp_init_attr.qp_context = qp_info;
3079 qp_init_attr.event_handler = qp_event_handler;
3080 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3081 if (IS_ERR(qp_info->qp)) {
3082 dev_err(&qp_info->port_priv->device->dev,
3083 "Couldn't create ib_mad QP%d\n",
3084 get_spl_qp_index(qp_type));
3085 ret = PTR_ERR(qp_info->qp);
3086 goto error;
3087 }
3088 /* Use minimum queue sizes unless the CQ is resized */
3089 qp_info->send_queue.max_active = mad_sendq_size;
3090 qp_info->recv_queue.max_active = mad_recvq_size;
3091 return 0;
3092
3093 error:
3094 return ret;
3095 }
3096
3097 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3098 {
3099 if (!qp_info->qp)
3100 return;
3101
3102 ib_destroy_qp(qp_info->qp);
3103 kfree(qp_info->snoop_table);
3104 }
3105
3106 /*
3107 * Open the port
3108 * Create the QP, PD, MR, and CQ if needed
3109 */
3110 static int ib_mad_port_open(struct ib_device *device,
3111 int port_num)
3112 {
3113 int ret, cq_size;
3114 struct ib_mad_port_private *port_priv;
3115 unsigned long flags;
3116 char name[sizeof "ib_mad123"];
3117 int has_smi;
3118
3119 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3120 return -EFAULT;
3121
3122 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3123 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3124 return -EFAULT;
3125
3126 /* Create new device info */
3127 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3128 if (!port_priv)
3129 return -ENOMEM;
3130
3131 port_priv->device = device;
3132 port_priv->port_num = port_num;
3133 spin_lock_init(&port_priv->reg_lock);
3134 INIT_LIST_HEAD(&port_priv->agent_list);
3135 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3136 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3137
3138 cq_size = mad_sendq_size + mad_recvq_size;
3139 has_smi = rdma_cap_ib_smi(device, port_num);
3140 if (has_smi)
3141 cq_size *= 2;
3142
3143 port_priv->pd = ib_alloc_pd(device, 0);
3144 if (IS_ERR(port_priv->pd)) {
3145 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3146 ret = PTR_ERR(port_priv->pd);
3147 goto error3;
3148 }
3149
3150 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3151 IB_POLL_WORKQUEUE);
3152 if (IS_ERR(port_priv->cq)) {
3153 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3154 ret = PTR_ERR(port_priv->cq);
3155 goto error4;
3156 }
3157
3158 if (has_smi) {
3159 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3160 if (ret)
3161 goto error6;
3162 }
3163 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3164 if (ret)
3165 goto error7;
3166
3167 snprintf(name, sizeof name, "ib_mad%d", port_num);
3168 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3169 if (!port_priv->wq) {
3170 ret = -ENOMEM;
3171 goto error8;
3172 }
3173
3174 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3175 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3176 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3177
3178 ret = ib_mad_port_start(port_priv);
3179 if (ret) {
3180 dev_err(&device->dev, "Couldn't start port\n");
3181 goto error9;
3182 }
3183
3184 return 0;
3185
3186 error9:
3187 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3188 list_del_init(&port_priv->port_list);
3189 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3190
3191 destroy_workqueue(port_priv->wq);
3192 error8:
3193 destroy_mad_qp(&port_priv->qp_info[1]);
3194 error7:
3195 destroy_mad_qp(&port_priv->qp_info[0]);
3196 error6:
3197 ib_free_cq(port_priv->cq);
3198 cleanup_recv_queue(&port_priv->qp_info[1]);
3199 cleanup_recv_queue(&port_priv->qp_info[0]);
3200 error4:
3201 ib_dealloc_pd(port_priv->pd);
3202 error3:
3203 kfree(port_priv);
3204
3205 return ret;
3206 }
3207
3208 /*
3209 * Close the port
3210 * If there are no classes using the port, free the port
3211 * resources (CQ, MR, PD, QP) and remove the port's info structure
3212 */
3213 static int ib_mad_port_close(struct ib_device *device, int port_num)
3214 {
3215 struct ib_mad_port_private *port_priv;
3216 unsigned long flags;
3217
3218 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3219 port_priv = __ib_get_mad_port(device, port_num);
3220 if (port_priv == NULL) {
3221 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3222 dev_err(&device->dev, "Port %d not found\n", port_num);
3223 return -ENODEV;
3224 }
3225 list_del_init(&port_priv->port_list);
3226 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3227
3228 destroy_workqueue(port_priv->wq);
3229 destroy_mad_qp(&port_priv->qp_info[1]);
3230 destroy_mad_qp(&port_priv->qp_info[0]);
3231 ib_free_cq(port_priv->cq);
3232 ib_dealloc_pd(port_priv->pd);
3233 cleanup_recv_queue(&port_priv->qp_info[1]);
3234 cleanup_recv_queue(&port_priv->qp_info[0]);
3235 /* XXX: Handle deallocation of MAD registration tables */
3236
3237 kfree(port_priv);
3238
3239 return 0;
3240 }
3241
3242 static void ib_mad_init_device(struct ib_device *device)
3243 {
3244 int start, i;
3245
3246 start = rdma_start_port(device);
3247
3248 for (i = start; i <= rdma_end_port(device); i++) {
3249 if (!rdma_cap_ib_mad(device, i))
3250 continue;
3251
3252 if (ib_mad_port_open(device, i)) {
3253 dev_err(&device->dev, "Couldn't open port %d\n", i);
3254 goto error;
3255 }
3256 if (ib_agent_port_open(device, i)) {
3257 dev_err(&device->dev,
3258 "Couldn't open port %d for agents\n", i);
3259 goto error_agent;
3260 }
3261 }
3262 return;
3263
3264 error_agent:
3265 if (ib_mad_port_close(device, i))
3266 dev_err(&device->dev, "Couldn't close port %d\n", i);
3267
3268 error:
3269 while (--i >= start) {
3270 if (!rdma_cap_ib_mad(device, i))
3271 continue;
3272
3273 if (ib_agent_port_close(device, i))
3274 dev_err(&device->dev,
3275 "Couldn't close port %d for agents\n", i);
3276 if (ib_mad_port_close(device, i))
3277 dev_err(&device->dev, "Couldn't close port %d\n", i);
3278 }
3279 }
3280
3281 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3282 {
3283 int i;
3284
3285 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3286 if (!rdma_cap_ib_mad(device, i))
3287 continue;
3288
3289 if (ib_agent_port_close(device, i))
3290 dev_err(&device->dev,
3291 "Couldn't close port %d for agents\n", i);
3292 if (ib_mad_port_close(device, i))
3293 dev_err(&device->dev, "Couldn't close port %d\n", i);
3294 }
3295 }
3296
3297 static struct ib_client mad_client = {
3298 .name = "mad",
3299 .add = ib_mad_init_device,
3300 .remove = ib_mad_remove_device
3301 };
3302
3303 int ib_mad_init(void)
3304 {
3305 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3306 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3307
3308 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3309 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3310
3311 INIT_LIST_HEAD(&ib_mad_port_list);
3312
3313 if (ib_register_client(&mad_client)) {
3314 pr_err("Couldn't register ib_mad client\n");
3315 return -EINVAL;
3316 }
3317
3318 return 0;
3319 }
3320
3321 void ib_mad_cleanup(void)
3322 {
3323 ib_unregister_client(&mad_client);
3324 }
Cache object: 43189faf722ffd346f380bb8e43a953e
|