1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
9 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
10 *
11 * This software is available to you under a choice of one of two
12 * licenses. You may choose to be licensed under the terms of the GNU
13 * General Public License (GPL) Version 2, available from the file
14 * COPYING in the main directory of this source tree, or the
15 * OpenIB.org BSD license below:
16 *
17 * Redistribution and use in source and binary forms, with or
18 * without modification, are permitted provided that the following
19 * conditions are met:
20 *
21 * - Redistributions of source code must retain the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer.
24 *
25 * - Redistributions in binary form must reproduce the above
26 * copyright notice, this list of conditions and the following
27 * disclaimer in the documentation and/or other materials
28 * provided with the distribution.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
31 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
33 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
34 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
36 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * SOFTWARE.
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include <linux/dma-mapping.h>
44 #include <linux/err.h>
45 #include <linux/idr.h>
46 #include <linux/interrupt.h>
47 #include <linux/rbtree.h>
48 #include <linux/sched.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <linux/completion.h>
52 #include <linux/slab.h>
53 #include <linux/module.h>
54 #include <linux/wait.h>
55
56 #include <rdma/iw_cm.h>
57 #include <rdma/ib_addr.h>
58 #include <rdma/iw_portmap.h>
59
60 #include "iwcm.h"
61
62 MODULE_AUTHOR("Tom Tucker");
63 MODULE_DESCRIPTION("iWARP CM");
64 MODULE_LICENSE("Dual BSD/GPL");
65
66 static const char * const iwcm_rej_reason_strs[] = {
67 [ECONNRESET] = "reset by remote host",
68 [ECONNREFUSED] = "refused by remote application",
69 [ETIMEDOUT] = "setup timeout",
70 };
71
72 const char *__attribute_const__ iwcm_reject_msg(int reason)
73 {
74 size_t index;
75
76 /* iWARP uses negative errnos */
77 index = -reason;
78
79 if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
80 iwcm_rej_reason_strs[index])
81 return iwcm_rej_reason_strs[index];
82 else
83 return "unrecognized reason";
84 }
85 EXPORT_SYMBOL(iwcm_reject_msg);
86
87 static struct workqueue_struct *iwcm_wq;
88 struct iwcm_work {
89 struct work_struct work;
90 struct iwcm_id_private *cm_id;
91 struct list_head list;
92 struct iw_cm_event event;
93 struct list_head free_list;
94 };
95
96 static unsigned int default_backlog = 256;
97
98 /*
99 * The following services provide a mechanism for pre-allocating iwcm_work
100 * elements. The design pre-allocates them based on the cm_id type:
101 * LISTENING IDS: Get enough elements preallocated to handle the
102 * listen backlog.
103 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
104 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
105 *
106 * Allocating them in connect and listen avoids having to deal
107 * with allocation failures on the event upcall from the provider (which
108 * is called in the interrupt context).
109 *
110 * One exception is when creating the cm_id for incoming connection requests.
111 * There are two cases:
112 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
113 * the backlog is exceeded, then no more connection request events will
114 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
115 * to the provider to reject the connection request.
116 * 2) in the connection request workqueue handler, cm_conn_req_handler().
117 * If work elements cannot be allocated for the new connect request cm_id,
118 * then IWCM will call the provider reject method. This is ok since
119 * cm_conn_req_handler() runs in the workqueue thread context.
120 */
121
122 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
123 {
124 struct iwcm_work *work;
125
126 if (list_empty(&cm_id_priv->work_free_list))
127 return NULL;
128 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
129 free_list);
130 list_del_init(&work->free_list);
131 return work;
132 }
133
134 static void put_work(struct iwcm_work *work)
135 {
136 list_add(&work->free_list, &work->cm_id->work_free_list);
137 }
138
139 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
140 {
141 struct list_head *e, *tmp;
142
143 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
144 kfree(list_entry(e, struct iwcm_work, free_list));
145 }
146
147 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
148 {
149 struct iwcm_work *work;
150
151 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
152 while (count--) {
153 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
154 if (!work) {
155 dealloc_work_entries(cm_id_priv);
156 return -ENOMEM;
157 }
158 work->cm_id = cm_id_priv;
159 INIT_LIST_HEAD(&work->list);
160 put_work(work);
161 }
162 return 0;
163 }
164
165 /*
166 * Save private data from incoming connection requests to
167 * iw_cm_event, so the low level driver doesn't have to. Adjust
168 * the event ptr to point to the local copy.
169 */
170 static int copy_private_data(struct iw_cm_event *event)
171 {
172 void *p;
173
174 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
175 if (!p)
176 return -ENOMEM;
177 event->private_data = p;
178 return 0;
179 }
180
181 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
182 {
183 dealloc_work_entries(cm_id_priv);
184 kfree(cm_id_priv);
185 }
186
187 /*
188 * Release a reference on cm_id. If the last reference is being
189 * released, free the cm_id and return 1.
190 */
191 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
192 {
193 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
194 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
195 BUG_ON(!list_empty(&cm_id_priv->work_list));
196 free_cm_id(cm_id_priv);
197 return 1;
198 }
199
200 return 0;
201 }
202
203 static void add_ref(struct iw_cm_id *cm_id)
204 {
205 struct iwcm_id_private *cm_id_priv;
206 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
207 atomic_inc(&cm_id_priv->refcount);
208 }
209
210 static void rem_ref(struct iw_cm_id *cm_id)
211 {
212 struct iwcm_id_private *cm_id_priv;
213
214 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
215
216 (void)iwcm_deref_id(cm_id_priv);
217 }
218
219 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
220
221 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
222 iw_cm_handler cm_handler,
223 void *context)
224 {
225 struct iwcm_id_private *cm_id_priv;
226
227 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
228 if (!cm_id_priv)
229 return ERR_PTR(-ENOMEM);
230
231 cm_id_priv->state = IW_CM_STATE_IDLE;
232 cm_id_priv->id.device = device;
233 cm_id_priv->id.cm_handler = cm_handler;
234 cm_id_priv->id.context = context;
235 cm_id_priv->id.event_handler = cm_event_handler;
236 cm_id_priv->id.add_ref = add_ref;
237 cm_id_priv->id.rem_ref = rem_ref;
238 spin_lock_init(&cm_id_priv->lock);
239 atomic_set(&cm_id_priv->refcount, 1);
240 init_waitqueue_head(&cm_id_priv->connect_wait);
241 init_completion(&cm_id_priv->destroy_comp);
242 INIT_LIST_HEAD(&cm_id_priv->work_list);
243 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
244
245 return &cm_id_priv->id;
246 }
247 EXPORT_SYMBOL(iw_create_cm_id);
248
249
250 static int iwcm_modify_qp_err(struct ib_qp *qp)
251 {
252 struct ib_qp_attr qp_attr;
253
254 if (!qp)
255 return -EINVAL;
256
257 qp_attr.qp_state = IB_QPS_ERR;
258 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
259 }
260
261 /*
262 * This is really the RDMAC CLOSING state. It is most similar to the
263 * IB SQD QP state.
264 */
265 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
266 {
267 struct ib_qp_attr qp_attr;
268
269 BUG_ON(qp == NULL);
270 qp_attr.qp_state = IB_QPS_SQD;
271 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
272 }
273
274 /*
275 * CM_ID <-- CLOSING
276 *
277 * Block if a passive or active connection is currently being processed. Then
278 * process the event as follows:
279 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
280 * based on the abrupt flag
281 * - If the connection is already in the CLOSING or IDLE state, the peer is
282 * disconnecting concurrently with us and we've already seen the
283 * DISCONNECT event -- ignore the request and return 0
284 * - Disconnect on a listening endpoint returns -EINVAL
285 */
286 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
287 {
288 struct iwcm_id_private *cm_id_priv;
289 unsigned long flags;
290 int ret = 0;
291 struct ib_qp *qp = NULL;
292
293 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
294 /* Wait if we're currently in a connect or accept downcall */
295 wait_event(cm_id_priv->connect_wait,
296 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
297
298 spin_lock_irqsave(&cm_id_priv->lock, flags);
299 switch (cm_id_priv->state) {
300 case IW_CM_STATE_ESTABLISHED:
301 cm_id_priv->state = IW_CM_STATE_CLOSING;
302
303 /* QP could be <nul> for user-mode client */
304 if (cm_id_priv->qp)
305 qp = cm_id_priv->qp;
306 else
307 ret = -EINVAL;
308 break;
309 case IW_CM_STATE_LISTEN:
310 ret = -EINVAL;
311 break;
312 case IW_CM_STATE_CLOSING:
313 /* remote peer closed first */
314 case IW_CM_STATE_IDLE:
315 /* accept or connect returned !0 */
316 break;
317 case IW_CM_STATE_CONN_RECV:
318 /*
319 * App called disconnect before/without calling accept after
320 * connect_request event delivered.
321 */
322 break;
323 case IW_CM_STATE_CONN_SENT:
324 /* Can only get here if wait above fails */
325 default:
326 BUG();
327 }
328 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
329
330 if (qp) {
331 if (abrupt)
332 (void) iwcm_modify_qp_err(qp);
333 else
334 (void) iwcm_modify_qp_sqd(qp);
335
336 /*
337 * If both sides are disconnecting the QP could
338 * already be in ERR or SQD states
339 */
340 ret = 0;
341 }
342
343 return ret;
344 }
345 EXPORT_SYMBOL(iw_cm_disconnect);
346
347 /*
348 * CM_ID <-- DESTROYING
349 *
350 * Clean up all resources associated with the connection and release
351 * the initial reference taken by iw_create_cm_id.
352 */
353 static void destroy_cm_id(struct iw_cm_id *cm_id)
354 {
355 struct iwcm_id_private *cm_id_priv;
356 unsigned long flags;
357
358 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
359 /*
360 * Wait if we're currently in a connect or accept downcall. A
361 * listening endpoint should never block here.
362 */
363 wait_event(cm_id_priv->connect_wait,
364 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
365
366 /*
367 * Since we're deleting the cm_id, drop any events that
368 * might arrive before the last dereference.
369 */
370 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
371
372 spin_lock_irqsave(&cm_id_priv->lock, flags);
373 switch (cm_id_priv->state) {
374 case IW_CM_STATE_LISTEN:
375 cm_id_priv->state = IW_CM_STATE_DESTROYING;
376 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
377 /* destroy the listening endpoint */
378 cm_id->device->iwcm->destroy_listen(cm_id);
379 spin_lock_irqsave(&cm_id_priv->lock, flags);
380 break;
381 case IW_CM_STATE_ESTABLISHED:
382 cm_id_priv->state = IW_CM_STATE_DESTROYING;
383 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
384 /* Abrupt close of the connection */
385 (void)iwcm_modify_qp_err(cm_id_priv->qp);
386 spin_lock_irqsave(&cm_id_priv->lock, flags);
387 break;
388 case IW_CM_STATE_IDLE:
389 case IW_CM_STATE_CLOSING:
390 cm_id_priv->state = IW_CM_STATE_DESTROYING;
391 break;
392 case IW_CM_STATE_CONN_RECV:
393 /*
394 * App called destroy before/without calling accept after
395 * receiving connection request event notification or
396 * returned non zero from the event callback function.
397 * In either case, must tell the provider to reject.
398 */
399 cm_id_priv->state = IW_CM_STATE_DESTROYING;
400 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
401 cm_id->device->iwcm->reject(cm_id, NULL, 0);
402 spin_lock_irqsave(&cm_id_priv->lock, flags);
403 break;
404 case IW_CM_STATE_CONN_SENT:
405 case IW_CM_STATE_DESTROYING:
406 default:
407 BUG();
408 break;
409 }
410 if (cm_id_priv->qp) {
411 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
412 cm_id_priv->qp = NULL;
413 }
414 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
415
416 (void)iwcm_deref_id(cm_id_priv);
417 }
418
419 /*
420 * This function is only called by the application thread and cannot
421 * be called by the event thread. The function will wait for all
422 * references to be released on the cm_id and then kfree the cm_id
423 * object.
424 */
425 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
426 {
427 destroy_cm_id(cm_id);
428 }
429 EXPORT_SYMBOL(iw_destroy_cm_id);
430
431 /**
432 * iw_cm_map - Use portmapper to map the ports
433 * @cm_id: connection manager pointer
434 * @active: Indicates the active side when true
435 * returns nonzero for error only if iwpm_create_mapinfo() fails
436 *
437 * Tries to add a mapping for a port using the Portmapper. If
438 * successful in mapping the IP/Port it will check the remote
439 * mapped IP address for a wildcard IP address and replace the
440 * zero IP address with the remote_addr.
441 */
442 static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
443 {
444 cm_id->m_local_addr = cm_id->local_addr;
445 cm_id->m_remote_addr = cm_id->remote_addr;
446
447 return 0;
448 }
449
450 /*
451 * CM_ID <-- LISTEN
452 *
453 * Start listening for connect requests. Generates one CONNECT_REQUEST
454 * event for each inbound connect request.
455 */
456 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
457 {
458 struct iwcm_id_private *cm_id_priv;
459 unsigned long flags;
460 int ret;
461
462 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
463
464 if (!backlog)
465 backlog = default_backlog;
466
467 ret = alloc_work_entries(cm_id_priv, backlog);
468 if (ret)
469 return ret;
470
471 spin_lock_irqsave(&cm_id_priv->lock, flags);
472 switch (cm_id_priv->state) {
473 case IW_CM_STATE_IDLE:
474 cm_id_priv->state = IW_CM_STATE_LISTEN;
475 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
476 ret = iw_cm_map(cm_id, false);
477 if (!ret)
478 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
479 if (ret)
480 cm_id_priv->state = IW_CM_STATE_IDLE;
481 spin_lock_irqsave(&cm_id_priv->lock, flags);
482 break;
483 default:
484 ret = -EINVAL;
485 }
486 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
487
488 return ret;
489 }
490 EXPORT_SYMBOL(iw_cm_listen);
491
492 /*
493 * CM_ID <-- IDLE
494 *
495 * Rejects an inbound connection request. No events are generated.
496 */
497 int iw_cm_reject(struct iw_cm_id *cm_id,
498 const void *private_data,
499 u8 private_data_len)
500 {
501 struct iwcm_id_private *cm_id_priv;
502 unsigned long flags;
503 int ret;
504
505 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
506 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
507
508 spin_lock_irqsave(&cm_id_priv->lock, flags);
509 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
510 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
511 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
512 wake_up_all(&cm_id_priv->connect_wait);
513 return -EINVAL;
514 }
515 cm_id_priv->state = IW_CM_STATE_IDLE;
516 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
517
518 ret = cm_id->device->iwcm->reject(cm_id, private_data,
519 private_data_len);
520
521 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
522 wake_up_all(&cm_id_priv->connect_wait);
523
524 return ret;
525 }
526 EXPORT_SYMBOL(iw_cm_reject);
527
528 /*
529 * CM_ID <-- ESTABLISHED
530 *
531 * Accepts an inbound connection request and generates an ESTABLISHED
532 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
533 * until the ESTABLISHED event is received from the provider.
534 */
535 int iw_cm_accept(struct iw_cm_id *cm_id,
536 struct iw_cm_conn_param *iw_param)
537 {
538 struct iwcm_id_private *cm_id_priv;
539 struct ib_qp *qp;
540 unsigned long flags;
541 int ret;
542
543 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
544 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
545
546 spin_lock_irqsave(&cm_id_priv->lock, flags);
547 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
548 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
549 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
550 wake_up_all(&cm_id_priv->connect_wait);
551 return -EINVAL;
552 }
553 /* Get the ib_qp given the QPN */
554 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
555 if (!qp) {
556 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
557 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
558 wake_up_all(&cm_id_priv->connect_wait);
559 return -EINVAL;
560 }
561 cm_id->device->iwcm->add_ref(qp);
562 cm_id_priv->qp = qp;
563 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
564
565 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
566 if (ret) {
567 /* An error on accept precludes provider events */
568 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
569 cm_id_priv->state = IW_CM_STATE_IDLE;
570 spin_lock_irqsave(&cm_id_priv->lock, flags);
571 if (cm_id_priv->qp) {
572 cm_id->device->iwcm->rem_ref(qp);
573 cm_id_priv->qp = NULL;
574 }
575 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
576 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
577 wake_up_all(&cm_id_priv->connect_wait);
578 }
579
580 return ret;
581 }
582 EXPORT_SYMBOL(iw_cm_accept);
583
584 /*
585 * Active Side: CM_ID <-- CONN_SENT
586 *
587 * If successful, results in the generation of a CONNECT_REPLY
588 * event. iw_cm_disconnect and iw_cm_destroy will block until the
589 * CONNECT_REPLY event is received from the provider.
590 */
591 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
592 {
593 struct iwcm_id_private *cm_id_priv;
594 int ret;
595 unsigned long flags;
596 struct ib_qp *qp;
597
598 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
599
600 ret = alloc_work_entries(cm_id_priv, 4);
601 if (ret)
602 return ret;
603
604 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
605 spin_lock_irqsave(&cm_id_priv->lock, flags);
606
607 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
608 ret = -EINVAL;
609 goto err;
610 }
611
612 /* Get the ib_qp given the QPN */
613 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
614 if (!qp) {
615 ret = -EINVAL;
616 goto err;
617 }
618 cm_id->device->iwcm->add_ref(qp);
619 cm_id_priv->qp = qp;
620 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
621 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
622
623 ret = iw_cm_map(cm_id, true);
624 if (!ret)
625 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
626 if (!ret)
627 return 0; /* success */
628
629 spin_lock_irqsave(&cm_id_priv->lock, flags);
630 if (cm_id_priv->qp) {
631 cm_id->device->iwcm->rem_ref(qp);
632 cm_id_priv->qp = NULL;
633 }
634 cm_id_priv->state = IW_CM_STATE_IDLE;
635 err:
636 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
637 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
638 wake_up_all(&cm_id_priv->connect_wait);
639 return ret;
640 }
641 EXPORT_SYMBOL(iw_cm_connect);
642
643 /*
644 * Passive Side: new CM_ID <-- CONN_RECV
645 *
646 * Handles an inbound connect request. The function creates a new
647 * iw_cm_id to represent the new connection and inherits the client
648 * callback function and other attributes from the listening parent.
649 *
650 * The work item contains a pointer to the listen_cm_id and the event. The
651 * listen_cm_id contains the client cm_handler, context and
652 * device. These are copied when the device is cloned. The event
653 * contains the new four tuple.
654 *
655 * An error on the child should not affect the parent, so this
656 * function does not return a value.
657 */
658 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
659 struct iw_cm_event *iw_event)
660 {
661 unsigned long flags;
662 struct iw_cm_id *cm_id;
663 struct iwcm_id_private *cm_id_priv;
664 int ret;
665
666 /*
667 * The provider should never generate a connection request
668 * event with a bad status.
669 */
670 BUG_ON(iw_event->status);
671
672 cm_id = iw_create_cm_id(listen_id_priv->id.device,
673 listen_id_priv->id.cm_handler,
674 listen_id_priv->id.context);
675 /* If the cm_id could not be created, ignore the request */
676 if (IS_ERR(cm_id))
677 goto out;
678
679 cm_id->provider_data = iw_event->provider_data;
680 cm_id->m_local_addr = iw_event->local_addr;
681 cm_id->m_remote_addr = iw_event->remote_addr;
682 cm_id->local_addr = listen_id_priv->id.local_addr;
683 cm_id->remote_addr = iw_event->remote_addr;
684 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
685 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
686
687 /*
688 * We could be destroying the listening id. If so, ignore this
689 * upcall.
690 */
691 spin_lock_irqsave(&listen_id_priv->lock, flags);
692 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
693 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
694 iw_cm_reject(cm_id, NULL, 0);
695 iw_destroy_cm_id(cm_id);
696 goto out;
697 }
698 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
699
700 ret = alloc_work_entries(cm_id_priv, 3);
701 if (ret) {
702 iw_cm_reject(cm_id, NULL, 0);
703 iw_destroy_cm_id(cm_id);
704 goto out;
705 }
706
707 /* Call the client CM handler */
708 ret = cm_id->cm_handler(cm_id, iw_event);
709 if (ret) {
710 iw_cm_reject(cm_id, NULL, 0);
711 iw_destroy_cm_id(cm_id);
712 }
713
714 out:
715 if (iw_event->private_data_len)
716 kfree(iw_event->private_data);
717 }
718
719 /*
720 * Passive Side: CM_ID <-- ESTABLISHED
721 *
722 * The provider generated an ESTABLISHED event which means that
723 * the MPA negotion has completed successfully and we are now in MPA
724 * FPDU mode.
725 *
726 * This event can only be received in the CONN_RECV state. If the
727 * remote peer closed, the ESTABLISHED event would be received followed
728 * by the CLOSE event. If the app closes, it will block until we wake
729 * it up after processing this event.
730 */
731 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
732 struct iw_cm_event *iw_event)
733 {
734 unsigned long flags;
735 int ret;
736
737 spin_lock_irqsave(&cm_id_priv->lock, flags);
738
739 /*
740 * We clear the CONNECT_WAIT bit here to allow the callback
741 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
742 * from a callback handler is not allowed.
743 */
744 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
745 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
746 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
747 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
748 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
749 wake_up_all(&cm_id_priv->connect_wait);
750
751 return ret;
752 }
753
754 /*
755 * Active Side: CM_ID <-- ESTABLISHED
756 *
757 * The app has called connect and is waiting for the established event to
758 * post it's requests to the server. This event will wake up anyone
759 * blocked in iw_cm_disconnect or iw_destroy_id.
760 */
761 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
762 struct iw_cm_event *iw_event)
763 {
764 unsigned long flags;
765 int ret;
766
767 spin_lock_irqsave(&cm_id_priv->lock, flags);
768 /*
769 * Clear the connect wait bit so a callback function calling
770 * iw_cm_disconnect will not wait and deadlock this thread
771 */
772 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
773 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
774 if (iw_event->status == 0) {
775 cm_id_priv->id.m_local_addr = iw_event->local_addr;
776 cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
777 iw_event->local_addr = cm_id_priv->id.local_addr;
778 iw_event->remote_addr = cm_id_priv->id.remote_addr;
779 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
780 } else {
781 /* REJECTED or RESET */
782 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
783 cm_id_priv->qp = NULL;
784 cm_id_priv->state = IW_CM_STATE_IDLE;
785 }
786 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
787 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
788
789 if (iw_event->private_data_len)
790 kfree(iw_event->private_data);
791
792 /* Wake up waiters on connect complete */
793 wake_up_all(&cm_id_priv->connect_wait);
794
795 return ret;
796 }
797
798 /*
799 * CM_ID <-- CLOSING
800 *
801 * If in the ESTABLISHED state, move to CLOSING.
802 */
803 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
804 struct iw_cm_event *iw_event)
805 {
806 unsigned long flags;
807
808 spin_lock_irqsave(&cm_id_priv->lock, flags);
809 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
810 cm_id_priv->state = IW_CM_STATE_CLOSING;
811 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
812 }
813
814 /*
815 * CM_ID <-- IDLE
816 *
817 * If in the ESTBLISHED or CLOSING states, the QP will have have been
818 * moved by the provider to the ERR state. Disassociate the CM_ID from
819 * the QP, move to IDLE, and remove the 'connected' reference.
820 *
821 * If in some other state, the cm_id was destroyed asynchronously.
822 * This is the last reference that will result in waking up
823 * the app thread blocked in iw_destroy_cm_id.
824 */
825 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
826 struct iw_cm_event *iw_event)
827 {
828 unsigned long flags;
829 int ret = 0;
830 spin_lock_irqsave(&cm_id_priv->lock, flags);
831
832 if (cm_id_priv->qp) {
833 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
834 cm_id_priv->qp = NULL;
835 }
836 switch (cm_id_priv->state) {
837 case IW_CM_STATE_ESTABLISHED:
838 case IW_CM_STATE_CLOSING:
839 cm_id_priv->state = IW_CM_STATE_IDLE;
840 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
841 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
842 spin_lock_irqsave(&cm_id_priv->lock, flags);
843 break;
844 case IW_CM_STATE_DESTROYING:
845 break;
846 default:
847 BUG();
848 }
849 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
850
851 return ret;
852 }
853
854 static int process_event(struct iwcm_id_private *cm_id_priv,
855 struct iw_cm_event *iw_event)
856 {
857 int ret = 0;
858
859 switch (iw_event->event) {
860 case IW_CM_EVENT_CONNECT_REQUEST:
861 cm_conn_req_handler(cm_id_priv, iw_event);
862 break;
863 case IW_CM_EVENT_CONNECT_REPLY:
864 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
865 break;
866 case IW_CM_EVENT_ESTABLISHED:
867 ret = cm_conn_est_handler(cm_id_priv, iw_event);
868 break;
869 case IW_CM_EVENT_DISCONNECT:
870 cm_disconnect_handler(cm_id_priv, iw_event);
871 break;
872 case IW_CM_EVENT_CLOSE:
873 ret = cm_close_handler(cm_id_priv, iw_event);
874 break;
875 default:
876 BUG();
877 }
878
879 return ret;
880 }
881
882 /*
883 * Process events on the work_list for the cm_id. If the callback
884 * function requests that the cm_id be deleted, a flag is set in the
885 * cm_id flags to indicate that when the last reference is
886 * removed, the cm_id is to be destroyed. This is necessary to
887 * distinguish between an object that will be destroyed by the app
888 * thread asleep on the destroy_comp list vs. an object destroyed
889 * here synchronously when the last reference is removed.
890 */
891 static void cm_work_handler(struct work_struct *_work)
892 {
893 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
894 struct iw_cm_event levent;
895 struct iwcm_id_private *cm_id_priv = work->cm_id;
896 unsigned long flags;
897 int empty;
898 int ret = 0;
899
900 spin_lock_irqsave(&cm_id_priv->lock, flags);
901 empty = list_empty(&cm_id_priv->work_list);
902 while (!empty) {
903 work = list_entry(cm_id_priv->work_list.next,
904 struct iwcm_work, list);
905 list_del_init(&work->list);
906 empty = list_empty(&cm_id_priv->work_list);
907 levent = work->event;
908 put_work(work);
909 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
910
911 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
912 ret = process_event(cm_id_priv, &levent);
913 if (ret)
914 destroy_cm_id(&cm_id_priv->id);
915 } else
916 pr_debug("dropping event %d\n", levent.event);
917 if (iwcm_deref_id(cm_id_priv))
918 return;
919 if (empty)
920 return;
921 spin_lock_irqsave(&cm_id_priv->lock, flags);
922 }
923 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
924 }
925
926 /*
927 * This function is called on interrupt context. Schedule events on
928 * the iwcm_wq thread to allow callback functions to downcall into
929 * the CM and/or block. Events are queued to a per-CM_ID
930 * work_list. If this is the first event on the work_list, the work
931 * element is also queued on the iwcm_wq thread.
932 *
933 * Each event holds a reference on the cm_id. Until the last posted
934 * event has been delivered and processed, the cm_id cannot be
935 * deleted.
936 *
937 * Returns:
938 * 0 - the event was handled.
939 * -ENOMEM - the event was not handled due to lack of resources.
940 */
941 static int cm_event_handler(struct iw_cm_id *cm_id,
942 struct iw_cm_event *iw_event)
943 {
944 struct iwcm_work *work;
945 struct iwcm_id_private *cm_id_priv;
946 unsigned long flags;
947 int ret = 0;
948
949 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
950
951 spin_lock_irqsave(&cm_id_priv->lock, flags);
952 work = get_work(cm_id_priv);
953 if (!work) {
954 ret = -ENOMEM;
955 goto out;
956 }
957
958 INIT_WORK(&work->work, cm_work_handler);
959 work->cm_id = cm_id_priv;
960 work->event = *iw_event;
961
962 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
963 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
964 work->event.private_data_len) {
965 ret = copy_private_data(&work->event);
966 if (ret) {
967 put_work(work);
968 goto out;
969 }
970 }
971
972 atomic_inc(&cm_id_priv->refcount);
973 if (list_empty(&cm_id_priv->work_list)) {
974 list_add_tail(&work->list, &cm_id_priv->work_list);
975 queue_work(iwcm_wq, &work->work);
976 } else
977 list_add_tail(&work->list, &cm_id_priv->work_list);
978 out:
979 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
980 return ret;
981 }
982
983 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
984 struct ib_qp_attr *qp_attr,
985 int *qp_attr_mask)
986 {
987 unsigned long flags;
988 int ret;
989
990 spin_lock_irqsave(&cm_id_priv->lock, flags);
991 switch (cm_id_priv->state) {
992 case IW_CM_STATE_IDLE:
993 case IW_CM_STATE_CONN_SENT:
994 case IW_CM_STATE_CONN_RECV:
995 case IW_CM_STATE_ESTABLISHED:
996 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
997 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
998 IB_ACCESS_REMOTE_READ;
999 ret = 0;
1000 break;
1001 default:
1002 ret = -EINVAL;
1003 break;
1004 }
1005 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1006 return ret;
1007 }
1008
1009 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1010 struct ib_qp_attr *qp_attr,
1011 int *qp_attr_mask)
1012 {
1013 unsigned long flags;
1014 int ret;
1015
1016 spin_lock_irqsave(&cm_id_priv->lock, flags);
1017 switch (cm_id_priv->state) {
1018 case IW_CM_STATE_IDLE:
1019 case IW_CM_STATE_CONN_SENT:
1020 case IW_CM_STATE_CONN_RECV:
1021 case IW_CM_STATE_ESTABLISHED:
1022 *qp_attr_mask = 0;
1023 ret = 0;
1024 break;
1025 default:
1026 ret = -EINVAL;
1027 break;
1028 }
1029 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1030 return ret;
1031 }
1032
1033 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1034 struct ib_qp_attr *qp_attr,
1035 int *qp_attr_mask)
1036 {
1037 struct iwcm_id_private *cm_id_priv;
1038 int ret;
1039
1040 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1041 switch (qp_attr->qp_state) {
1042 case IB_QPS_INIT:
1043 case IB_QPS_RTR:
1044 ret = iwcm_init_qp_init_attr(cm_id_priv,
1045 qp_attr, qp_attr_mask);
1046 break;
1047 case IB_QPS_RTS:
1048 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1049 qp_attr, qp_attr_mask);
1050 break;
1051 default:
1052 ret = -EINVAL;
1053 break;
1054 }
1055 return ret;
1056 }
1057 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1058
1059 static int __init iw_cm_init(void)
1060 {
1061 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
1062 if (!iwcm_wq)
1063 return -ENOMEM;
1064
1065 return 0;
1066 }
1067
1068 static void __exit iw_cm_cleanup(void)
1069 {
1070 destroy_workqueue(iwcm_wq);
1071 }
1072
1073 module_init_order(iw_cm_init, SI_ORDER_FIRST);
1074 module_exit_order(iw_cm_cleanup, SI_ORDER_FIRST);
Cache object: 18baa0d2d9d2aff22f1e426697e7295e
|