1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/dma-mapping.h>
36#include <rdma/ib_cache.h>
37
38#include "mad_priv.h"
39#include "mad_rmpp.h"
40#include "smi.h"
41#include "agent.h"
42
43MODULE_LICENSE("Dual BSD/GPL");
44MODULE_DESCRIPTION("kernel IB MAD API");
45MODULE_AUTHOR("Hal Rosenstock");
46MODULE_AUTHOR("Sean Hefty");
47
48static struct kmem_cache *ib_mad_cache;
49
50static struct list_head ib_mad_port_list;
51static u32 ib_mad_client_id = 0;
52
53
54static spinlock_t ib_mad_port_list_lock;
55
56
57
58static int method_in_use(struct ib_mad_mgmt_method_table **method,
59 struct ib_mad_reg_req *mad_reg_req);
60static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
61static struct ib_mad_agent_private *find_mad_agent(
62 struct ib_mad_port_private *port_priv,
63 struct ib_mad *mad);
64static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
65 struct ib_mad_private *mad);
66static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
67static void timeout_sends(struct work_struct *work);
68static void local_completions(struct work_struct *work);
69static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
70 struct ib_mad_agent_private *agent_priv,
71 u8 mgmt_class);
72static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
73 struct ib_mad_agent_private *agent_priv);
74
75
76
77
78
79static inline struct ib_mad_port_private *
80__ib_get_mad_port(struct ib_device *device, int port_num)
81{
82 struct ib_mad_port_private *entry;
83
84 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
85 if (entry->device == device && entry->port_num == port_num)
86 return entry;
87 }
88 return NULL;
89}
90
91
92
93
94
95static inline struct ib_mad_port_private *
96ib_get_mad_port(struct ib_device *device, int port_num)
97{
98 struct ib_mad_port_private *entry;
99 unsigned long flags;
100
101 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
102 entry = __ib_get_mad_port(device, port_num);
103 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
104
105 return entry;
106}
107
108static inline u8 convert_mgmt_class(u8 mgmt_class)
109{
110
111 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
112 0 : mgmt_class;
113}
114
115static int get_spl_qp_index(enum ib_qp_type qp_type)
116{
117 switch (qp_type)
118 {
119 case IB_QPT_SMI:
120 return 0;
121 case IB_QPT_GSI:
122 return 1;
123 default:
124 return -1;
125 }
126}
127
128static int vendor_class_index(u8 mgmt_class)
129{
130 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
131}
132
133static int is_vendor_class(u8 mgmt_class)
134{
135 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
136 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
137 return 0;
138 return 1;
139}
140
141static int is_vendor_oui(char *oui)
142{
143 if (oui[0] || oui[1] || oui[2])
144 return 1;
145 return 0;
146}
147
148static int is_vendor_method_in_use(
149 struct ib_mad_mgmt_vendor_class *vendor_class,
150 struct ib_mad_reg_req *mad_reg_req)
151{
152 struct ib_mad_mgmt_method_table *method;
153 int i;
154
155 for (i = 0; i < MAX_MGMT_OUI; i++) {
156 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
157 method = vendor_class->method_table[i];
158 if (method) {
159 if (method_in_use(&method, mad_reg_req))
160 return 1;
161 else
162 break;
163 }
164 }
165 }
166 return 0;
167}
168
169int ib_response_mad(struct ib_mad *mad)
170{
171 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
172 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
173 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
174 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
175}
176EXPORT_SYMBOL(ib_response_mad);
177
178
179
180
181struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
182 u8 port_num,
183 enum ib_qp_type qp_type,
184 struct ib_mad_reg_req *mad_reg_req,
185 u8 rmpp_version,
186 ib_mad_send_handler send_handler,
187 ib_mad_recv_handler recv_handler,
188 void *context)
189{
190 struct ib_mad_port_private *port_priv;
191 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
192 struct ib_mad_agent_private *mad_agent_priv;
193 struct ib_mad_reg_req *reg_req = NULL;
194 struct ib_mad_mgmt_class_table *class;
195 struct ib_mad_mgmt_vendor_class_table *vendor;
196 struct ib_mad_mgmt_vendor_class *vendor_class;
197 struct ib_mad_mgmt_method_table *method;
198 int ret2, qpn;
199 unsigned long flags;
200 u8 mgmt_class, vclass;
201
202
203 qpn = get_spl_qp_index(qp_type);
204 if (qpn == -1)
205 goto error1;
206
207 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
208 goto error1;
209
210
211 if (mad_reg_req) {
212 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
213 goto error1;
214 if (!recv_handler)
215 goto error1;
216 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
217
218
219
220
221 if (mad_reg_req->mgmt_class !=
222 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
223 goto error1;
224 } else if (mad_reg_req->mgmt_class == 0) {
225
226
227
228
229 goto error1;
230 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
231
232
233
234
235 if (!is_vendor_oui(mad_reg_req->oui))
236 goto error1;
237 }
238
239 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
240 if (rmpp_version)
241 goto error1;
242 }
243
244 if (qp_type == IB_QPT_SMI) {
245 if ((mad_reg_req->mgmt_class !=
246 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
247 (mad_reg_req->mgmt_class !=
248 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
249 goto error1;
250 } else {
251 if ((mad_reg_req->mgmt_class ==
252 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
253 (mad_reg_req->mgmt_class ==
254 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
255 goto error1;
256 }
257 } else {
258
259 if (!send_handler)
260 goto error1;
261 }
262
263
264 port_priv = ib_get_mad_port(device, port_num);
265 if (!port_priv) {
266 ret = ERR_PTR(-ENODEV);
267 goto error1;
268 }
269
270
271 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
272 if (!mad_agent_priv) {
273 ret = ERR_PTR(-ENOMEM);
274 goto error1;
275 }
276
277 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
278 IB_ACCESS_LOCAL_WRITE);
279 if (IS_ERR(mad_agent_priv->agent.mr)) {
280 ret = ERR_PTR(-ENOMEM);
281 goto error2;
282 }
283
284 if (mad_reg_req) {
285 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
286 if (!reg_req) {
287 ret = ERR_PTR(-ENOMEM);
288 goto error3;
289 }
290
291 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
292 }
293
294
295 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
296 mad_agent_priv->reg_req = reg_req;
297 mad_agent_priv->agent.rmpp_version = rmpp_version;
298 mad_agent_priv->agent.device = device;
299 mad_agent_priv->agent.recv_handler = recv_handler;
300 mad_agent_priv->agent.send_handler = send_handler;
301 mad_agent_priv->agent.context = context;
302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
303 mad_agent_priv->agent.port_num = port_num;
304
305 spin_lock_irqsave(&port_priv->reg_lock, flags);
306 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
307
308
309
310
311
312 if (mad_reg_req) {
313 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
314 if (!is_vendor_class(mgmt_class)) {
315 class = port_priv->version[mad_reg_req->
316 mgmt_class_version].class;
317 if (class) {
318 method = class->method_table[mgmt_class];
319 if (method) {
320 if (method_in_use(&method,
321 mad_reg_req))
322 goto error4;
323 }
324 }
325 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
326 mgmt_class);
327 } else {
328
329 vendor = port_priv->version[mad_reg_req->
330 mgmt_class_version].vendor;
331 if (vendor) {
332 vclass = vendor_class_index(mgmt_class);
333 vendor_class = vendor->vendor_class[vclass];
334 if (vendor_class) {
335 if (is_vendor_method_in_use(
336 vendor_class,
337 mad_reg_req))
338 goto error4;
339 }
340 }
341 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
342 }
343 if (ret2) {
344 ret = ERR_PTR(ret2);
345 goto error4;
346 }
347 }
348
349
350 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
352
353 spin_lock_init(&mad_agent_priv->lock);
354 INIT_LIST_HEAD(&mad_agent_priv->send_list);
355 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
356 INIT_LIST_HEAD(&mad_agent_priv->done_list);
357 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
358 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
359 INIT_LIST_HEAD(&mad_agent_priv->local_list);
360 INIT_WORK(&mad_agent_priv->local_work, local_completions);
361 atomic_set(&mad_agent_priv->refcount, 1);
362 init_completion(&mad_agent_priv->comp);
363
364 return &mad_agent_priv->agent;
365
366error4:
367 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
368 kfree(reg_req);
369error3:
370 ib_dereg_mr(mad_agent_priv->agent.mr);
371error2:
372 kfree(mad_agent_priv);
373error1:
374 return ret;
375}
376EXPORT_SYMBOL(ib_register_mad_agent);
377
378static inline int is_snooping_sends(int mad_snoop_flags)
379{
380 return (mad_snoop_flags &
381 (
382
383 IB_MAD_SNOOP_SEND_COMPLETIONS
384));
385}
386
387static inline int is_snooping_recvs(int mad_snoop_flags)
388{
389 return (mad_snoop_flags &
390 (IB_MAD_SNOOP_RECVS
391));
392}
393
394static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
395 struct ib_mad_snoop_private *mad_snoop_priv)
396{
397 struct ib_mad_snoop_private **new_snoop_table;
398 unsigned long flags;
399 int i;
400
401 spin_lock_irqsave(&qp_info->snoop_lock, flags);
402
403 for (i = 0; i < qp_info->snoop_table_size; i++)
404 if (!qp_info->snoop_table[i])
405 break;
406
407 if (i == qp_info->snoop_table_size) {
408
409 new_snoop_table = krealloc(qp_info->snoop_table,
410 sizeof mad_snoop_priv *
411 (qp_info->snoop_table_size + 1),
412 GFP_ATOMIC);
413 if (!new_snoop_table) {
414 i = -ENOMEM;
415 goto out;
416 }
417
418 qp_info->snoop_table = new_snoop_table;
419 qp_info->snoop_table_size++;
420 }
421 qp_info->snoop_table[i] = mad_snoop_priv;
422 atomic_inc(&qp_info->snoop_count);
423out:
424 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
425 return i;
426}
427
428struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
429 u8 port_num,
430 enum ib_qp_type qp_type,
431 int mad_snoop_flags,
432 ib_mad_snoop_handler snoop_handler,
433 ib_mad_recv_handler recv_handler,
434 void *context)
435{
436 struct ib_mad_port_private *port_priv;
437 struct ib_mad_agent *ret;
438 struct ib_mad_snoop_private *mad_snoop_priv;
439 int qpn;
440
441
442 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
443 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
444 ret = ERR_PTR(-EINVAL);
445 goto error1;
446 }
447 qpn = get_spl_qp_index(qp_type);
448 if (qpn == -1) {
449 ret = ERR_PTR(-EINVAL);
450 goto error1;
451 }
452 port_priv = ib_get_mad_port(device, port_num);
453 if (!port_priv) {
454 ret = ERR_PTR(-ENODEV);
455 goto error1;
456 }
457
458 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
459 if (!mad_snoop_priv) {
460 ret = ERR_PTR(-ENOMEM);
461 goto error1;
462 }
463
464
465 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
466 mad_snoop_priv->agent.device = device;
467 mad_snoop_priv->agent.recv_handler = recv_handler;
468 mad_snoop_priv->agent.snoop_handler = snoop_handler;
469 mad_snoop_priv->agent.context = context;
470 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
471 mad_snoop_priv->agent.port_num = port_num;
472 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
473 init_completion(&mad_snoop_priv->comp);
474 mad_snoop_priv->snoop_index = register_snoop_agent(
475 &port_priv->qp_info[qpn],
476 mad_snoop_priv);
477 if (mad_snoop_priv->snoop_index < 0) {
478 ret = ERR_PTR(mad_snoop_priv->snoop_index);
479 goto error2;
480 }
481
482 atomic_set(&mad_snoop_priv->refcount, 1);
483 return &mad_snoop_priv->agent;
484
485error2:
486 kfree(mad_snoop_priv);
487error1:
488 return ret;
489}
490EXPORT_SYMBOL(ib_register_mad_snoop);
491
492static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
493{
494 if (atomic_dec_and_test(&mad_agent_priv->refcount))
495 complete(&mad_agent_priv->comp);
496}
497
498static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
499{
500 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
501 complete(&mad_snoop_priv->comp);
502}
503
504static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
505{
506 struct ib_mad_port_private *port_priv;
507 unsigned long flags;
508
509
510
511
512
513
514
515 cancel_mads(mad_agent_priv);
516 port_priv = mad_agent_priv->qp_info->port_priv;
517 cancel_delayed_work(&mad_agent_priv->timed_work);
518
519 spin_lock_irqsave(&port_priv->reg_lock, flags);
520 remove_mad_reg_req(mad_agent_priv);
521 list_del(&mad_agent_priv->agent_list);
522 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
523
524 flush_workqueue(port_priv->wq);
525 ib_cancel_rmpp_recvs(mad_agent_priv);
526
527 deref_mad_agent(mad_agent_priv);
528 wait_for_completion(&mad_agent_priv->comp);
529
530 kfree(mad_agent_priv->reg_req);
531 ib_dereg_mr(mad_agent_priv->agent.mr);
532 kfree(mad_agent_priv);
533}
534
535static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
536{
537 struct ib_mad_qp_info *qp_info;
538 unsigned long flags;
539
540 qp_info = mad_snoop_priv->qp_info;
541 spin_lock_irqsave(&qp_info->snoop_lock, flags);
542 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
543 atomic_dec(&qp_info->snoop_count);
544 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
545
546 deref_snoop_agent(mad_snoop_priv);
547 wait_for_completion(&mad_snoop_priv->comp);
548
549 kfree(mad_snoop_priv);
550}
551
552
553
554
555int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
556{
557 struct ib_mad_agent_private *mad_agent_priv;
558 struct ib_mad_snoop_private *mad_snoop_priv;
559
560
561 if (mad_agent->hi_tid) {
562 mad_agent_priv = container_of(mad_agent,
563 struct ib_mad_agent_private,
564 agent);
565 unregister_mad_agent(mad_agent_priv);
566 } else {
567 mad_snoop_priv = container_of(mad_agent,
568 struct ib_mad_snoop_private,
569 agent);
570 unregister_mad_snoop(mad_snoop_priv);
571 }
572 return 0;
573}
574EXPORT_SYMBOL(ib_unregister_mad_agent);
575
576static void dequeue_mad(struct ib_mad_list_head *mad_list)
577{
578 struct ib_mad_queue *mad_queue;
579 unsigned long flags;
580
581 BUG_ON(!mad_list->mad_queue);
582 mad_queue = mad_list->mad_queue;
583 spin_lock_irqsave(&mad_queue->lock, flags);
584 list_del(&mad_list->list);
585 mad_queue->count--;
586 spin_unlock_irqrestore(&mad_queue->lock, flags);
587}
588
589static void snoop_send(struct ib_mad_qp_info *qp_info,
590 struct ib_mad_send_buf *send_buf,
591 struct ib_mad_send_wc *mad_send_wc,
592 int mad_snoop_flags)
593{
594 struct ib_mad_snoop_private *mad_snoop_priv;
595 unsigned long flags;
596 int i;
597
598 spin_lock_irqsave(&qp_info->snoop_lock, flags);
599 for (i = 0; i < qp_info->snoop_table_size; i++) {
600 mad_snoop_priv = qp_info->snoop_table[i];
601 if (!mad_snoop_priv ||
602 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
603 continue;
604
605 atomic_inc(&mad_snoop_priv->refcount);
606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
608 send_buf, mad_send_wc);
609 deref_snoop_agent(mad_snoop_priv);
610 spin_lock_irqsave(&qp_info->snoop_lock, flags);
611 }
612 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
613}
614
615static void snoop_recv(struct ib_mad_qp_info *qp_info,
616 struct ib_mad_recv_wc *mad_recv_wc,
617 int mad_snoop_flags)
618{
619 struct ib_mad_snoop_private *mad_snoop_priv;
620 unsigned long flags;
621 int i;
622
623 spin_lock_irqsave(&qp_info->snoop_lock, flags);
624 for (i = 0; i < qp_info->snoop_table_size; i++) {
625 mad_snoop_priv = qp_info->snoop_table[i];
626 if (!mad_snoop_priv ||
627 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
628 continue;
629
630 atomic_inc(&mad_snoop_priv->refcount);
631 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
632 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
633 mad_recv_wc);
634 deref_snoop_agent(mad_snoop_priv);
635 spin_lock_irqsave(&qp_info->snoop_lock, flags);
636 }
637 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
638}
639
640static void build_smp_wc(struct ib_qp *qp,
641 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
642 struct ib_wc *wc)
643{
644 memset(wc, 0, sizeof *wc);
645 wc->wr_id = wr_id;
646 wc->status = IB_WC_SUCCESS;
647 wc->opcode = IB_WC_RECV;
648 wc->pkey_index = pkey_index;
649 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
650 wc->src_qp = IB_QP0;
651 wc->qp = qp;
652 wc->slid = slid;
653 wc->sl = 0;
654 wc->dlid_path_bits = 0;
655 wc->port_num = port_num;
656}
657
658
659
660
661
662
663static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
664 struct ib_mad_send_wr_private *mad_send_wr)
665{
666 int ret = 0;
667 struct ib_smp *smp = mad_send_wr->send_buf.mad;
668 unsigned long flags;
669 struct ib_mad_local_private *local;
670 struct ib_mad_private *mad_priv;
671 struct ib_mad_port_private *port_priv;
672 struct ib_mad_agent_private *recv_mad_agent = NULL;
673 struct ib_device *device = mad_agent_priv->agent.device;
674 u8 port_num;
675 struct ib_wc mad_wc;
676 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
677
678 if (device->node_type == RDMA_NODE_IB_SWITCH &&
679 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
680 port_num = send_wr->wr.ud.port_num;
681 else
682 port_num = mad_agent_priv->agent.port_num;
683
684
685
686
687
688
689
690 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
691 IB_LID_PERMISSIVE &&
692 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
693 IB_SMI_DISCARD) {
694 ret = -EINVAL;
695 printk(KERN_ERR PFX "Invalid directed route\n");
696 goto out;
697 }
698
699
700 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
701 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
702 goto out;
703
704 local = kmalloc(sizeof *local, GFP_ATOMIC);
705 if (!local) {
706 ret = -ENOMEM;
707 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
708 goto out;
709 }
710 local->mad_priv = NULL;
711 local->recv_mad_agent = NULL;
712 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
713 if (!mad_priv) {
714 ret = -ENOMEM;
715 printk(KERN_ERR PFX "No memory for local response MAD\n");
716 kfree(local);
717 goto out;
718 }
719
720 build_smp_wc(mad_agent_priv->agent.qp,
721 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
722 send_wr->wr.ud.pkey_index,
723 send_wr->wr.ud.port_num, &mad_wc);
724
725
726 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
727 (struct ib_mad *)smp,
728 (struct ib_mad *)&mad_priv->mad);
729 switch (ret)
730 {
731 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
732 if (ib_response_mad(&mad_priv->mad.mad) &&
733 mad_agent_priv->agent.recv_handler) {
734 local->mad_priv = mad_priv;
735 local->recv_mad_agent = mad_agent_priv;
736
737
738
739
740 atomic_inc(&mad_agent_priv->refcount);
741 } else
742 kmem_cache_free(ib_mad_cache, mad_priv);
743 break;
744 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
745 kmem_cache_free(ib_mad_cache, mad_priv);
746 kfree(local);
747 ret = 1;
748 goto out;
749 case IB_MAD_RESULT_SUCCESS:
750
751 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
752 mad_agent_priv->agent.port_num);
753 if (port_priv) {
754 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
755 recv_mad_agent = find_mad_agent(port_priv,
756 &mad_priv->mad.mad);
757 }
758 if (!port_priv || !recv_mad_agent) {
759 kmem_cache_free(ib_mad_cache, mad_priv);
760 kfree(local);
761 ret = 0;
762 goto out;
763 }
764 local->mad_priv = mad_priv;
765 local->recv_mad_agent = recv_mad_agent;
766 break;
767 default:
768 kmem_cache_free(ib_mad_cache, mad_priv);
769 kfree(local);
770 ret = -EINVAL;
771 goto out;
772 }
773
774 local->mad_send_wr = mad_send_wr;
775
776 atomic_inc(&mad_agent_priv->refcount);
777
778 spin_lock_irqsave(&mad_agent_priv->lock, flags);
779 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
780 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
781 queue_work(mad_agent_priv->qp_info->port_priv->wq,
782 &mad_agent_priv->local_work);
783 ret = 1;
784out:
785 return ret;
786}
787
788static int get_pad_size(int hdr_len, int data_len)
789{
790 int seg_size, pad;
791
792 seg_size = sizeof(struct ib_mad) - hdr_len;
793 if (data_len && seg_size) {
794 pad = seg_size - data_len % seg_size;
795 return pad == seg_size ? 0 : pad;
796 } else
797 return seg_size;
798}
799
800static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
801{
802 struct ib_rmpp_segment *s, *t;
803
804 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
805 list_del(&s->list);
806 kfree(s);
807 }
808}
809
810static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
811 gfp_t gfp_mask)
812{
813 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
814 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
815 struct ib_rmpp_segment *seg = NULL;
816 int left, seg_size, pad;
817
818 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
819 seg_size = send_buf->seg_size;
820 pad = send_wr->pad;
821
822
823 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
824 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
825 if (!seg) {
826 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
827 "alloc failed for len %zd, gfp %#x\n",
828 sizeof (*seg) + seg_size, gfp_mask);
829 free_send_rmpp_list(send_wr);
830 return -ENOMEM;
831 }
832 seg->num = ++send_buf->seg_count;
833 list_add_tail(&seg->list, &send_wr->rmpp_list);
834 }
835
836
837 if (pad)
838 memset(seg->data + seg_size - pad, 0, pad);
839
840 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
841 agent.rmpp_version;
842 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
843 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
844
845 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
846 struct ib_rmpp_segment, list);
847 send_wr->last_ack_seg = send_wr->cur_seg;
848 return 0;
849}
850
851struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
852 u32 remote_qpn, u16 pkey_index,
853 int rmpp_active,
854 int hdr_len, int data_len,
855 gfp_t gfp_mask)
856{
857 struct ib_mad_agent_private *mad_agent_priv;
858 struct ib_mad_send_wr_private *mad_send_wr;
859 int pad, message_size, ret, size;
860 void *buf;
861
862 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
863 agent);
864 pad = get_pad_size(hdr_len, data_len);
865 message_size = hdr_len + data_len + pad;
866
867 if ((!mad_agent->rmpp_version &&
868 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
869 (!rmpp_active && message_size > sizeof(struct ib_mad)))
870 return ERR_PTR(-EINVAL);
871
872 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
873 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
874 if (!buf)
875 return ERR_PTR(-ENOMEM);
876
877 mad_send_wr = buf + size;
878 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
879 mad_send_wr->send_buf.mad = buf;
880 mad_send_wr->send_buf.hdr_len = hdr_len;
881 mad_send_wr->send_buf.data_len = data_len;
882 mad_send_wr->pad = pad;
883
884 mad_send_wr->mad_agent_priv = mad_agent_priv;
885 mad_send_wr->sg_list[0].length = hdr_len;
886 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
887 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
888 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
889
890 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
891 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
892 mad_send_wr->send_wr.num_sge = 2;
893 mad_send_wr->send_wr.opcode = IB_WR_SEND;
894 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
895 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
896 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
897 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
898
899 if (rmpp_active) {
900 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
901 if (ret) {
902 kfree(buf);
903 return ERR_PTR(ret);
904 }
905 }
906
907 mad_send_wr->send_buf.mad_agent = mad_agent;
908 atomic_inc(&mad_agent_priv->refcount);
909 return &mad_send_wr->send_buf;
910}
911EXPORT_SYMBOL(ib_create_send_mad);
912
913int ib_get_mad_data_offset(u8 mgmt_class)
914{
915 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
916 return IB_MGMT_SA_HDR;
917 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
918 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
919 (mgmt_class == IB_MGMT_CLASS_BIS))
920 return IB_MGMT_DEVICE_HDR;
921 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
922 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
923 return IB_MGMT_VENDOR_HDR;
924 else
925 return IB_MGMT_MAD_HDR;
926}
927EXPORT_SYMBOL(ib_get_mad_data_offset);
928
929int ib_is_mad_class_rmpp(u8 mgmt_class)
930{
931 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
932 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
933 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
934 (mgmt_class == IB_MGMT_CLASS_BIS) ||
935 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
936 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
937 return 1;
938 return 0;
939}
940EXPORT_SYMBOL(ib_is_mad_class_rmpp);
941
942void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
943{
944 struct ib_mad_send_wr_private *mad_send_wr;
945 struct list_head *list;
946
947 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
948 send_buf);
949 list = &mad_send_wr->cur_seg->list;
950
951 if (mad_send_wr->cur_seg->num < seg_num) {
952 list_for_each_entry(mad_send_wr->cur_seg, list, list)
953 if (mad_send_wr->cur_seg->num == seg_num)
954 break;
955 } else if (mad_send_wr->cur_seg->num > seg_num) {
956 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
957 if (mad_send_wr->cur_seg->num == seg_num)
958 break;
959 }
960 return mad_send_wr->cur_seg->data;
961}
962EXPORT_SYMBOL(ib_get_rmpp_segment);
963
964static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
965{
966 if (mad_send_wr->send_buf.seg_count)
967 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
968 mad_send_wr->seg_num);
969 else
970 return mad_send_wr->send_buf.mad +
971 mad_send_wr->send_buf.hdr_len;
972}
973
974void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
975{
976 struct ib_mad_agent_private *mad_agent_priv;
977 struct ib_mad_send_wr_private *mad_send_wr;
978
979 mad_agent_priv = container_of(send_buf->mad_agent,
980 struct ib_mad_agent_private, agent);
981 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
982 send_buf);
983
984 free_send_rmpp_list(mad_send_wr);
985 kfree(send_buf->mad);
986 deref_mad_agent(mad_agent_priv);
987}
988EXPORT_SYMBOL(ib_free_send_mad);
989
990int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
991{
992 struct ib_mad_qp_info *qp_info;
993 struct list_head *list;
994 struct ib_send_wr *bad_send_wr;
995 struct ib_mad_agent *mad_agent;
996 struct ib_sge *sge;
997 unsigned long flags;
998 int ret;
999
1000
1001 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1002 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1003 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1004
1005 mad_agent = mad_send_wr->send_buf.mad_agent;
1006 sge = mad_send_wr->sg_list;
1007 sge[0].addr = ib_dma_map_single(mad_agent->device,
1008 mad_send_wr->send_buf.mad,
1009 sge[0].length,
1010 DMA_TO_DEVICE);
1011 mad_send_wr->header_mapping = sge[0].addr;
1012
1013 sge[1].addr = ib_dma_map_single(mad_agent->device,
1014 ib_get_payload(mad_send_wr),
1015 sge[1].length,
1016 DMA_TO_DEVICE);
1017 mad_send_wr->payload_mapping = sge[1].addr;
1018
1019 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1020 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1021 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1022 &bad_send_wr);
1023 list = &qp_info->send_queue.list;
1024 } else {
1025 ret = 0;
1026 list = &qp_info->overflow_list;
1027 }
1028
1029 if (!ret) {
1030 qp_info->send_queue.count++;
1031 list_add_tail(&mad_send_wr->mad_list.list, list);
1032 }
1033 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1034 if (ret) {
1035 ib_dma_unmap_single(mad_agent->device,
1036 mad_send_wr->header_mapping,
1037 sge[0].length, DMA_TO_DEVICE);
1038 ib_dma_unmap_single(mad_agent->device,
1039 mad_send_wr->payload_mapping,
1040 sge[1].length, DMA_TO_DEVICE);
1041 }
1042 return ret;
1043}
1044
1045
1046
1047
1048
1049int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1050 struct ib_mad_send_buf **bad_send_buf)
1051{
1052 struct ib_mad_agent_private *mad_agent_priv;
1053 struct ib_mad_send_buf *next_send_buf;
1054 struct ib_mad_send_wr_private *mad_send_wr;
1055 unsigned long flags;
1056 int ret = -EINVAL;
1057
1058
1059 for (; send_buf; send_buf = next_send_buf) {
1060
1061 mad_send_wr = container_of(send_buf,
1062 struct ib_mad_send_wr_private,
1063 send_buf);
1064 mad_agent_priv = mad_send_wr->mad_agent_priv;
1065
1066 if (!send_buf->mad_agent->send_handler ||
1067 (send_buf->timeout_ms &&
1068 !send_buf->mad_agent->recv_handler)) {
1069 ret = -EINVAL;
1070 goto error;
1071 }
1072
1073 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1074 if (mad_agent_priv->agent.rmpp_version) {
1075 ret = -EINVAL;
1076 goto error;
1077 }
1078 }
1079
1080
1081
1082
1083
1084
1085 next_send_buf = send_buf->next;
1086 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1087
1088 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1089 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1090 ret = handle_outgoing_dr_smp(mad_agent_priv,
1091 mad_send_wr);
1092 if (ret < 0)
1093 goto error;
1094 else if (ret == 1)
1095 continue;
1096 }
1097
1098 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1099
1100 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1101 mad_send_wr->max_retries = send_buf->retries;
1102 mad_send_wr->retries_left = send_buf->retries;
1103 send_buf->retries = 0;
1104
1105 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1106 mad_send_wr->status = IB_WC_SUCCESS;
1107
1108
1109 atomic_inc(&mad_agent_priv->refcount);
1110 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1111 list_add_tail(&mad_send_wr->agent_list,
1112 &mad_agent_priv->send_list);
1113 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1114
1115 if (mad_agent_priv->agent.rmpp_version) {
1116 ret = ib_send_rmpp_mad(mad_send_wr);
1117 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1118 ret = ib_send_mad(mad_send_wr);
1119 } else
1120 ret = ib_send_mad(mad_send_wr);
1121 if (ret < 0) {
1122
1123 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1124 list_del(&mad_send_wr->agent_list);
1125 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1126 atomic_dec(&mad_agent_priv->refcount);
1127 goto error;
1128 }
1129 }
1130 return 0;
1131error:
1132 if (bad_send_buf)
1133 *bad_send_buf = send_buf;
1134 return ret;
1135}
1136EXPORT_SYMBOL(ib_post_send_mad);
1137
1138
1139
1140
1141
1142void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1143{
1144 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1145 struct ib_mad_private_header *mad_priv_hdr;
1146 struct ib_mad_private *priv;
1147 struct list_head free_list;
1148
1149 INIT_LIST_HEAD(&free_list);
1150 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1151
1152 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1153 &free_list, list) {
1154 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1155 recv_buf);
1156 mad_priv_hdr = container_of(mad_recv_wc,
1157 struct ib_mad_private_header,
1158 recv_wc);
1159 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1160 header);
1161 kmem_cache_free(ib_mad_cache, priv);
1162 }
1163}
1164EXPORT_SYMBOL(ib_free_recv_mad);
1165
1166struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1167 u8 rmpp_version,
1168 ib_mad_send_handler send_handler,
1169 ib_mad_recv_handler recv_handler,
1170 void *context)
1171{
1172 return ERR_PTR(-EINVAL);
1173}
1174EXPORT_SYMBOL(ib_redirect_mad_qp);
1175
1176int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1177 struct ib_wc *wc)
1178{
1179 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1180 return 0;
1181}
1182EXPORT_SYMBOL(ib_process_mad_wc);
1183
1184static int method_in_use(struct ib_mad_mgmt_method_table **method,
1185 struct ib_mad_reg_req *mad_reg_req)
1186{
1187 int i;
1188
1189 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1190 i < IB_MGMT_MAX_METHODS;
1191 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1192 1+i)) {
1193 if ((*method)->agent[i]) {
1194 printk(KERN_ERR PFX "Method %d already in use\n", i);
1195 return -EINVAL;
1196 }
1197 }
1198 return 0;
1199}
1200
1201static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1202{
1203
1204 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1205 if (!*method) {
1206 printk(KERN_ERR PFX "No memory for "
1207 "ib_mad_mgmt_method_table\n");
1208 return -ENOMEM;
1209 }
1210
1211 return 0;
1212}
1213
1214
1215
1216
1217static int check_method_table(struct ib_mad_mgmt_method_table *method)
1218{
1219 int i;
1220
1221 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1222 if (method->agent[i])
1223 return 1;
1224 return 0;
1225}
1226
1227
1228
1229
1230static int check_class_table(struct ib_mad_mgmt_class_table *class)
1231{
1232 int i;
1233
1234 for (i = 0; i < MAX_MGMT_CLASS; i++)
1235 if (class->method_table[i])
1236 return 1;
1237 return 0;
1238}
1239
1240static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1241{
1242 int i;
1243
1244 for (i = 0; i < MAX_MGMT_OUI; i++)
1245 if (vendor_class->method_table[i])
1246 return 1;
1247 return 0;
1248}
1249
1250static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1251 char *oui)
1252{
1253 int i;
1254
1255 for (i = 0; i < MAX_MGMT_OUI; i++)
1256
1257 if (!memcmp(vendor_class->oui[i], oui, 3))
1258 return i;
1259
1260 return -1;
1261}
1262
1263static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1264{
1265 int i;
1266
1267 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1268 if (vendor->vendor_class[i])
1269 return 1;
1270
1271 return 0;
1272}
1273
1274static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1275 struct ib_mad_agent_private *agent)
1276{
1277 int i;
1278
1279
1280 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1281 if (method->agent[i] == agent) {
1282 method->agent[i] = NULL;
1283 }
1284 }
1285}
1286
1287static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1288 struct ib_mad_agent_private *agent_priv,
1289 u8 mgmt_class)
1290{
1291 struct ib_mad_port_private *port_priv;
1292 struct ib_mad_mgmt_class_table **class;
1293 struct ib_mad_mgmt_method_table **method;
1294 int i, ret;
1295
1296 port_priv = agent_priv->qp_info->port_priv;
1297 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1298 if (!*class) {
1299
1300 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1301 if (!*class) {
1302 printk(KERN_ERR PFX "No memory for "
1303 "ib_mad_mgmt_class_table\n");
1304 ret = -ENOMEM;
1305 goto error1;
1306 }
1307
1308
1309 method = &(*class)->method_table[mgmt_class];
1310 if ((ret = allocate_method_table(method)))
1311 goto error2;
1312 } else {
1313 method = &(*class)->method_table[mgmt_class];
1314 if (!*method) {
1315
1316 if ((ret = allocate_method_table(method)))
1317 goto error1;
1318 }
1319 }
1320
1321
1322 if (method_in_use(method, mad_reg_req))
1323 goto error3;
1324
1325
1326 for (i = find_first_bit(mad_reg_req->method_mask,
1327 IB_MGMT_MAX_METHODS);
1328 i < IB_MGMT_MAX_METHODS;
1329 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1330 1+i)) {
1331 (*method)->agent[i] = agent_priv;
1332 }
1333 return 0;
1334
1335error3:
1336
1337 remove_methods_mad_agent(*method, agent_priv);
1338
1339 if (!check_method_table(*method)) {
1340
1341 kfree(*method);
1342 *method = NULL;
1343 }
1344 ret = -EINVAL;
1345 goto error1;
1346error2:
1347 kfree(*class);
1348 *class = NULL;
1349error1:
1350 return ret;
1351}
1352
1353static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1354 struct ib_mad_agent_private *agent_priv)
1355{
1356 struct ib_mad_port_private *port_priv;
1357 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1358 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1359 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1360 struct ib_mad_mgmt_method_table **method;
1361 int i, ret = -ENOMEM;
1362 u8 vclass;
1363
1364
1365 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1366 port_priv = agent_priv->qp_info->port_priv;
1367 vendor_table = &port_priv->version[
1368 mad_reg_req->mgmt_class_version].vendor;
1369 if (!*vendor_table) {
1370
1371 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1372 if (!vendor) {
1373 printk(KERN_ERR PFX "No memory for "
1374 "ib_mad_mgmt_vendor_class_table\n");
1375 goto error1;
1376 }
1377
1378 *vendor_table = vendor;
1379 }
1380 if (!(*vendor_table)->vendor_class[vclass]) {
1381
1382 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1383 if (!vendor_class) {
1384 printk(KERN_ERR PFX "No memory for "
1385 "ib_mad_mgmt_vendor_class\n");
1386 goto error2;
1387 }
1388
1389 (*vendor_table)->vendor_class[vclass] = vendor_class;
1390 }
1391 for (i = 0; i < MAX_MGMT_OUI; i++) {
1392
1393 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1394 mad_reg_req->oui, 3)) {
1395 method = &(*vendor_table)->vendor_class[
1396 vclass]->method_table[i];
1397 BUG_ON(!*method);
1398 goto check_in_use;
1399 }
1400 }
1401 for (i = 0; i < MAX_MGMT_OUI; i++) {
1402
1403 if (!is_vendor_oui((*vendor_table)->vendor_class[
1404 vclass]->oui[i])) {
1405 method = &(*vendor_table)->vendor_class[
1406 vclass]->method_table[i];
1407 BUG_ON(*method);
1408
1409 if ((ret = allocate_method_table(method)))
1410 goto error3;
1411 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1412 mad_reg_req->oui, 3);
1413 goto check_in_use;
1414 }
1415 }
1416 printk(KERN_ERR PFX "All OUI slots in use\n");
1417 goto error3;
1418
1419check_in_use:
1420
1421 if (method_in_use(method, mad_reg_req))
1422 goto error4;
1423
1424
1425 for (i = find_first_bit(mad_reg_req->method_mask,
1426 IB_MGMT_MAX_METHODS);
1427 i < IB_MGMT_MAX_METHODS;
1428 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1429 1+i)) {
1430 (*method)->agent[i] = agent_priv;
1431 }
1432 return 0;
1433
1434error4:
1435
1436 remove_methods_mad_agent(*method, agent_priv);
1437
1438 if (!check_method_table(*method)) {
1439
1440 kfree(*method);
1441 *method = NULL;
1442 }
1443 ret = -EINVAL;
1444error3:
1445 if (vendor_class) {
1446 (*vendor_table)->vendor_class[vclass] = NULL;
1447 kfree(vendor_class);
1448 }
1449error2:
1450 if (vendor) {
1451 *vendor_table = NULL;
1452 kfree(vendor);
1453 }
1454error1:
1455 return ret;
1456}
1457
1458static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1459{
1460 struct ib_mad_port_private *port_priv;
1461 struct ib_mad_mgmt_class_table *class;
1462 struct ib_mad_mgmt_method_table *method;
1463 struct ib_mad_mgmt_vendor_class_table *vendor;
1464 struct ib_mad_mgmt_vendor_class *vendor_class;
1465 int index;
1466 u8 mgmt_class;
1467
1468
1469
1470
1471
1472 if (!agent_priv->reg_req) {
1473 goto out;
1474 }
1475
1476 port_priv = agent_priv->qp_info->port_priv;
1477 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1478 class = port_priv->version[
1479 agent_priv->reg_req->mgmt_class_version].class;
1480 if (!class)
1481 goto vendor_check;
1482
1483 method = class->method_table[mgmt_class];
1484 if (method) {
1485
1486 remove_methods_mad_agent(method, agent_priv);
1487
1488 if (!check_method_table(method)) {
1489
1490 kfree(method);
1491 class->method_table[mgmt_class] = NULL;
1492
1493 if (!check_class_table(class)) {
1494
1495 kfree(class);
1496 port_priv->version[
1497 agent_priv->reg_req->
1498 mgmt_class_version].class = NULL;
1499 }
1500 }
1501 }
1502
1503vendor_check:
1504 if (!is_vendor_class(mgmt_class))
1505 goto out;
1506
1507
1508 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1509 vendor = port_priv->version[
1510 agent_priv->reg_req->mgmt_class_version].vendor;
1511
1512 if (!vendor)
1513 goto out;
1514
1515 vendor_class = vendor->vendor_class[mgmt_class];
1516 if (vendor_class) {
1517 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1518 if (index < 0)
1519 goto out;
1520 method = vendor_class->method_table[index];
1521 if (method) {
1522
1523 remove_methods_mad_agent(method, agent_priv);
1524
1525
1526
1527
1528 if (!check_method_table(method)) {
1529
1530 kfree(method);
1531 vendor_class->method_table[index] = NULL;
1532 memset(vendor_class->oui[index], 0, 3);
1533
1534 if (!check_vendor_class(vendor_class)) {
1535
1536 kfree(vendor_class);
1537 vendor->vendor_class[mgmt_class] = NULL;
1538
1539 if (!check_vendor_table(vendor)) {
1540 kfree(vendor);
1541 port_priv->version[
1542 agent_priv->reg_req->
1543 mgmt_class_version].
1544 vendor = NULL;
1545 }
1546 }
1547 }
1548 }
1549 }
1550
1551out:
1552 return;
1553}
1554
1555static struct ib_mad_agent_private *
1556find_mad_agent(struct ib_mad_port_private *port_priv,
1557 struct ib_mad *mad)
1558{
1559 struct ib_mad_agent_private *mad_agent = NULL;
1560 unsigned long flags;
1561
1562 spin_lock_irqsave(&port_priv->reg_lock, flags);
1563 if (ib_response_mad(mad)) {
1564 u32 hi_tid;
1565 struct ib_mad_agent_private *entry;
1566
1567
1568
1569
1570
1571 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1572 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1573 if (entry->agent.hi_tid == hi_tid) {
1574 mad_agent = entry;
1575 break;
1576 }
1577 }
1578 } else {
1579 struct ib_mad_mgmt_class_table *class;
1580 struct ib_mad_mgmt_method_table *method;
1581 struct ib_mad_mgmt_vendor_class_table *vendor;
1582 struct ib_mad_mgmt_vendor_class *vendor_class;
1583 struct ib_vendor_mad *vendor_mad;
1584 int index;
1585
1586
1587
1588
1589
1590 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1591 goto out;
1592 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1593 class = port_priv->version[
1594 mad->mad_hdr.class_version].class;
1595 if (!class)
1596 goto out;
1597 method = class->method_table[convert_mgmt_class(
1598 mad->mad_hdr.mgmt_class)];
1599 if (method)
1600 mad_agent = method->agent[mad->mad_hdr.method &
1601 ~IB_MGMT_METHOD_RESP];
1602 } else {
1603 vendor = port_priv->version[
1604 mad->mad_hdr.class_version].vendor;
1605 if (!vendor)
1606 goto out;
1607 vendor_class = vendor->vendor_class[vendor_class_index(
1608 mad->mad_hdr.mgmt_class)];
1609 if (!vendor_class)
1610 goto out;
1611
1612 vendor_mad = (struct ib_vendor_mad *)mad;
1613 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1614 if (index == -1)
1615 goto out;
1616 method = vendor_class->method_table[index];
1617 if (method) {
1618 mad_agent = method->agent[mad->mad_hdr.method &
1619 ~IB_MGMT_METHOD_RESP];
1620 }
1621 }
1622 }
1623
1624 if (mad_agent) {
1625 if (mad_agent->agent.recv_handler)
1626 atomic_inc(&mad_agent->refcount);
1627 else {
1628 printk(KERN_NOTICE PFX "No receive handler for client "
1629 "%p on port %d\n",
1630 &mad_agent->agent, port_priv->port_num);
1631 mad_agent = NULL;
1632 }
1633 }
1634out:
1635 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1636
1637 return mad_agent;
1638}
1639
1640static int validate_mad(struct ib_mad *mad, u32 qp_num)
1641{
1642 int valid = 0;
1643
1644
1645 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1646 printk(KERN_ERR PFX "MAD received with unsupported base "
1647 "version %d\n", mad->mad_hdr.base_version);
1648 goto out;
1649 }
1650
1651
1652 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1653 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1654 if (qp_num == 0)
1655 valid = 1;
1656 } else {
1657
1658 if (qp_num != 0)
1659 valid = 1;
1660 }
1661
1662out:
1663 return valid;
1664}
1665
1666static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1667 struct ib_mad_hdr *mad_hdr)
1668{
1669 struct ib_rmpp_mad *rmpp_mad;
1670
1671 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1672 return !mad_agent_priv->agent.rmpp_version ||
1673 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1674 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1675 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1676}
1677
1678static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1679 struct ib_mad_recv_wc *rwc)
1680{
1681 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1682 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1683}
1684
1685static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1686 struct ib_mad_send_wr_private *wr,
1687 struct ib_mad_recv_wc *rwc )
1688{
1689 struct ib_ah_attr attr;
1690 u8 send_resp, rcv_resp;
1691 union ib_gid sgid;
1692 struct ib_device *device = mad_agent_priv->agent.device;
1693 u8 port_num = mad_agent_priv->agent.port_num;
1694 u8 lmc;
1695
1696 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1697 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1698
1699 if (send_resp == rcv_resp)
1700
1701 return 0;
1702
1703 if (ib_query_ah(wr->send_buf.ah, &attr))
1704
1705 return 0;
1706
1707 if (!!(attr.ah_flags & IB_AH_GRH) !=
1708 !!(rwc->wc->wc_flags & IB_WC_GRH))
1709
1710 return 0;
1711
1712 if (!send_resp && rcv_resp) {
1713
1714 if (!(attr.ah_flags & IB_AH_GRH)) {
1715 if (ib_get_cached_lmc(device, port_num, &lmc))
1716 return 0;
1717 return (!lmc || !((attr.src_path_bits ^
1718 rwc->wc->dlid_path_bits) &
1719 ((1 << lmc) - 1)));
1720 } else {
1721 if (ib_get_cached_gid(device, port_num,
1722 attr.grh.sgid_index, &sgid))
1723 return 0;
1724 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1725 16);
1726 }
1727 }
1728
1729 if (!(attr.ah_flags & IB_AH_GRH))
1730 return attr.dlid == rwc->wc->slid;
1731 else
1732 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1733 16);
1734}
1735
1736static inline int is_direct(u8 class)
1737{
1738 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1739}
1740
1741struct ib_mad_send_wr_private*
1742ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1743 struct ib_mad_recv_wc *wc)
1744{
1745 struct ib_mad_send_wr_private *wr;
1746 struct ib_mad *mad;
1747
1748 mad = (struct ib_mad *)wc->recv_buf.mad;
1749
1750 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1751 if ((wr->tid == mad->mad_hdr.tid) &&
1752 rcv_has_same_class(wr, wc) &&
1753
1754
1755
1756
1757 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1758 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1759 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1760 }
1761
1762
1763
1764
1765
1766 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1767 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1768 wr->tid == mad->mad_hdr.tid &&
1769 wr->timeout &&
1770 rcv_has_same_class(wr, wc) &&
1771
1772
1773
1774
1775 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1776 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1777
1778 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1779 }
1780 return NULL;
1781}
1782
1783void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1784{
1785 mad_send_wr->timeout = 0;
1786 if (mad_send_wr->refcount == 1)
1787 list_move_tail(&mad_send_wr->agent_list,
1788 &mad_send_wr->mad_agent_priv->done_list);
1789}
1790
1791static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1792 struct ib_mad_recv_wc *mad_recv_wc)
1793{
1794 struct ib_mad_send_wr_private *mad_send_wr;
1795 struct ib_mad_send_wc mad_send_wc;
1796 unsigned long flags;
1797
1798 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1799 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1800 if (mad_agent_priv->agent.rmpp_version) {
1801 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1802 mad_recv_wc);
1803 if (!mad_recv_wc) {
1804 deref_mad_agent(mad_agent_priv);
1805 return;
1806 }
1807 }
1808
1809
1810 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1811 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1812 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1813 if (!mad_send_wr) {
1814 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1815 ib_free_recv_mad(mad_recv_wc);
1816 deref_mad_agent(mad_agent_priv);
1817 return;
1818 }
1819 ib_mark_mad_done(mad_send_wr);
1820 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1821
1822
1823 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1824 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1825 mad_recv_wc);
1826 atomic_dec(&mad_agent_priv->refcount);
1827
1828 mad_send_wc.status = IB_WC_SUCCESS;
1829 mad_send_wc.vendor_err = 0;
1830 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1831 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1832 } else {
1833 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1834 mad_recv_wc);
1835 deref_mad_agent(mad_agent_priv);
1836 }
1837}
1838
1839static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1840 struct ib_wc *wc)
1841{
1842 struct ib_mad_qp_info *qp_info;
1843 struct ib_mad_private_header *mad_priv_hdr;
1844 struct ib_mad_private *recv, *response = NULL;
1845 struct ib_mad_list_head *mad_list;
1846 struct ib_mad_agent_private *mad_agent;
1847 int port_num;
1848
1849 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1850 qp_info = mad_list->mad_queue->qp_info;
1851 dequeue_mad(mad_list);
1852
1853 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1854 mad_list);
1855 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1856 ib_dma_unmap_single(port_priv->device,
1857 recv->header.mapping,
1858 sizeof(struct ib_mad_private) -
1859 sizeof(struct ib_mad_private_header),
1860 DMA_FROM_DEVICE);
1861
1862
1863 recv->header.wc = *wc;
1864 recv->header.recv_wc.wc = &recv->header.wc;
1865 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1866 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1867 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1868
1869 if (atomic_read(&qp_info->snoop_count))
1870 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1871
1872
1873 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1874 goto out;
1875
1876 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1877 if (!response) {
1878 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1879 "for response buffer\n");
1880 goto out;
1881 }
1882
1883 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1884 port_num = wc->port_num;
1885 else
1886 port_num = port_priv->port_num;
1887
1888 if (recv->mad.mad.mad_hdr.mgmt_class ==
1889 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1890 enum smi_forward_action retsmi;
1891
1892 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1893 port_priv->device->node_type,
1894 port_num,
1895 port_priv->device->phys_port_cnt) ==
1896 IB_SMI_DISCARD)
1897 goto out;
1898
1899 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1900 if (retsmi == IB_SMI_LOCAL)
1901 goto local;
1902
1903 if (retsmi == IB_SMI_SEND) {
1904 if (smi_handle_dr_smp_send(&recv->mad.smp,
1905 port_priv->device->node_type,
1906 port_num) == IB_SMI_DISCARD)
1907 goto out;
1908
1909 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1910 goto out;
1911 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1912
1913 memcpy(response, recv, sizeof(*response));
1914 response->header.recv_wc.wc = &response->header.wc;
1915 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1916 response->header.recv_wc.recv_buf.grh = &response->grh;
1917
1918 agent_send_response(&response->mad.mad,
1919 &response->grh, wc,
1920 port_priv->device,
1921 smi_get_fwd_port(&recv->mad.smp),
1922 qp_info->qp->qp_num);
1923
1924 goto out;
1925 }
1926 }
1927
1928local:
1929
1930 if (port_priv->device->process_mad) {
1931 int ret;
1932
1933 ret = port_priv->device->process_mad(port_priv->device, 0,
1934 port_priv->port_num,
1935 wc, &recv->grh,
1936 &recv->mad.mad,
1937 &response->mad.mad);
1938 if (ret & IB_MAD_RESULT_SUCCESS) {
1939 if (ret & IB_MAD_RESULT_CONSUMED)
1940 goto out;
1941 if (ret & IB_MAD_RESULT_REPLY) {
1942 agent_send_response(&response->mad.mad,
1943 &recv->grh, wc,
1944 port_priv->device,
1945 port_num,
1946 qp_info->qp->qp_num);
1947 goto out;
1948 }
1949 }
1950 }
1951
1952 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1953 if (mad_agent) {
1954 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1955
1956
1957
1958
1959 recv = NULL;
1960 }
1961
1962out:
1963
1964 if (response) {
1965 ib_mad_post_receive_mads(qp_info, response);
1966 if (recv)
1967 kmem_cache_free(ib_mad_cache, recv);
1968 } else
1969 ib_mad_post_receive_mads(qp_info, recv);
1970}
1971
1972static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1973{
1974 struct ib_mad_send_wr_private *mad_send_wr;
1975 unsigned long delay;
1976
1977 if (list_empty(&mad_agent_priv->wait_list)) {
1978 cancel_delayed_work(&mad_agent_priv->timed_work);
1979 } else {
1980 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1981 struct ib_mad_send_wr_private,
1982 agent_list);
1983
1984 if (time_after(mad_agent_priv->timeout,
1985 mad_send_wr->timeout)) {
1986 mad_agent_priv->timeout = mad_send_wr->timeout;
1987 cancel_delayed_work(&mad_agent_priv->timed_work);
1988 delay = mad_send_wr->timeout - jiffies;
1989 if ((long)delay <= 0)
1990 delay = 1;
1991 queue_delayed_work(mad_agent_priv->qp_info->
1992 port_priv->wq,
1993 &mad_agent_priv->timed_work, delay);
1994 }
1995 }
1996}
1997
1998static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1999{
2000 struct ib_mad_agent_private *mad_agent_priv;
2001 struct ib_mad_send_wr_private *temp_mad_send_wr;
2002 struct list_head *list_item;
2003 unsigned long delay;
2004
2005 mad_agent_priv = mad_send_wr->mad_agent_priv;
2006 list_del(&mad_send_wr->agent_list);
2007
2008 delay = mad_send_wr->timeout;
2009 mad_send_wr->timeout += jiffies;
2010
2011 if (delay) {
2012 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2013 temp_mad_send_wr = list_entry(list_item,
2014 struct ib_mad_send_wr_private,
2015 agent_list);
2016 if (time_after(mad_send_wr->timeout,
2017 temp_mad_send_wr->timeout))
2018 break;
2019 }
2020 }
2021 else
2022 list_item = &mad_agent_priv->wait_list;
2023 list_add(&mad_send_wr->agent_list, list_item);
2024
2025
2026 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2027 cancel_delayed_work(&mad_agent_priv->timed_work);
2028 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2029 &mad_agent_priv->timed_work, delay);
2030 }
2031}
2032
2033void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2034 int timeout_ms)
2035{
2036 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2037 wait_for_response(mad_send_wr);
2038}
2039
2040
2041
2042
2043void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2044 struct ib_mad_send_wc *mad_send_wc)
2045{
2046 struct ib_mad_agent_private *mad_agent_priv;
2047 unsigned long flags;
2048 int ret;
2049
2050 mad_agent_priv = mad_send_wr->mad_agent_priv;
2051 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2052 if (mad_agent_priv->agent.rmpp_version) {
2053 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2054 if (ret == IB_RMPP_RESULT_CONSUMED)
2055 goto done;
2056 } else
2057 ret = IB_RMPP_RESULT_UNHANDLED;
2058
2059 if (mad_send_wc->status != IB_WC_SUCCESS &&
2060 mad_send_wr->status == IB_WC_SUCCESS) {
2061 mad_send_wr->status = mad_send_wc->status;
2062 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2063 }
2064
2065 if (--mad_send_wr->refcount > 0) {
2066 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2067 mad_send_wr->status == IB_WC_SUCCESS) {
2068 wait_for_response(mad_send_wr);
2069 }
2070 goto done;
2071 }
2072
2073
2074 list_del(&mad_send_wr->agent_list);
2075 adjust_timeout(mad_agent_priv);
2076 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2077
2078 if (mad_send_wr->status != IB_WC_SUCCESS )
2079 mad_send_wc->status = mad_send_wr->status;
2080 if (ret == IB_RMPP_RESULT_INTERNAL)
2081 ib_rmpp_send_handler(mad_send_wc);
2082 else
2083 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2084 mad_send_wc);
2085
2086
2087 deref_mad_agent(mad_agent_priv);
2088 return;
2089done:
2090 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2091}
2092
2093static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2094 struct ib_wc *wc)
2095{
2096 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2097 struct ib_mad_list_head *mad_list;
2098 struct ib_mad_qp_info *qp_info;
2099 struct ib_mad_queue *send_queue;
2100 struct ib_send_wr *bad_send_wr;
2101 struct ib_mad_send_wc mad_send_wc;
2102 unsigned long flags;
2103 int ret;
2104
2105 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2106 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2107 mad_list);
2108 send_queue = mad_list->mad_queue;
2109 qp_info = send_queue->qp_info;
2110
2111retry:
2112 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2113 mad_send_wr->header_mapping,
2114 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2115 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2116 mad_send_wr->payload_mapping,
2117 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2118 queued_send_wr = NULL;
2119 spin_lock_irqsave(&send_queue->lock, flags);
2120 list_del(&mad_list->list);
2121
2122
2123 if (send_queue->count-- > send_queue->max_active) {
2124 mad_list = container_of(qp_info->overflow_list.next,
2125 struct ib_mad_list_head, list);
2126 queued_send_wr = container_of(mad_list,
2127 struct ib_mad_send_wr_private,
2128 mad_list);
2129 list_move_tail(&mad_list->list, &send_queue->list);
2130 }
2131 spin_unlock_irqrestore(&send_queue->lock, flags);
2132
2133 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2134 mad_send_wc.status = wc->status;
2135 mad_send_wc.vendor_err = wc->vendor_err;
2136 if (atomic_read(&qp_info->snoop_count))
2137 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2138 IB_MAD_SNOOP_SEND_COMPLETIONS);
2139 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2140
2141 if (queued_send_wr) {
2142 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2143 &bad_send_wr);
2144 if (ret) {
2145 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2146 mad_send_wr = queued_send_wr;
2147 wc->status = IB_WC_LOC_QP_OP_ERR;
2148 goto retry;
2149 }
2150 }
2151}
2152
2153static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2154{
2155 struct ib_mad_send_wr_private *mad_send_wr;
2156 struct ib_mad_list_head *mad_list;
2157 unsigned long flags;
2158
2159 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2160 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2161 mad_send_wr = container_of(mad_list,
2162 struct ib_mad_send_wr_private,
2163 mad_list);
2164 mad_send_wr->retry = 1;
2165 }
2166 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2167}
2168
2169static void mad_error_handler(struct ib_mad_port_private *port_priv,
2170 struct ib_wc *wc)
2171{
2172 struct ib_mad_list_head *mad_list;
2173 struct ib_mad_qp_info *qp_info;
2174 struct ib_mad_send_wr_private *mad_send_wr;
2175 int ret;
2176
2177
2178 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2179 qp_info = mad_list->mad_queue->qp_info;
2180 if (mad_list->mad_queue == &qp_info->recv_queue)
2181
2182
2183
2184
2185 return;
2186
2187
2188
2189
2190
2191 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2192 mad_list);
2193 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2194 if (mad_send_wr->retry) {
2195
2196 struct ib_send_wr *bad_send_wr;
2197
2198 mad_send_wr->retry = 0;
2199 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2200 &bad_send_wr);
2201 if (ret)
2202 ib_mad_send_done_handler(port_priv, wc);
2203 } else
2204 ib_mad_send_done_handler(port_priv, wc);
2205 } else {
2206 struct ib_qp_attr *attr;
2207
2208
2209 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2210 if (attr) {
2211 attr->qp_state = IB_QPS_RTS;
2212 attr->cur_qp_state = IB_QPS_SQE;
2213 ret = ib_modify_qp(qp_info->qp, attr,
2214 IB_QP_STATE | IB_QP_CUR_STATE);
2215 kfree(attr);
2216 if (ret)
2217 printk(KERN_ERR PFX "mad_error_handler - "
2218 "ib_modify_qp to RTS : %d\n", ret);
2219 else
2220 mark_sends_for_retry(qp_info);
2221 }
2222 ib_mad_send_done_handler(port_priv, wc);
2223 }
2224}
2225
2226
2227
2228
2229static void ib_mad_completion_handler(struct work_struct *work)
2230{
2231 struct ib_mad_port_private *port_priv;
2232 struct ib_wc wc;
2233
2234 port_priv = container_of(work, struct ib_mad_port_private, work);
2235 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2236
2237 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2238 if (wc.status == IB_WC_SUCCESS) {
2239 switch (wc.opcode) {
2240 case IB_WC_SEND:
2241 ib_mad_send_done_handler(port_priv, &wc);
2242 break;
2243 case IB_WC_RECV:
2244 ib_mad_recv_done_handler(port_priv, &wc);
2245 break;
2246 default:
2247 BUG_ON(1);
2248 break;
2249 }
2250 } else
2251 mad_error_handler(port_priv, &wc);
2252 }
2253}
2254
2255static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2256{
2257 unsigned long flags;
2258 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2259 struct ib_mad_send_wc mad_send_wc;
2260 struct list_head cancel_list;
2261
2262 INIT_LIST_HEAD(&cancel_list);
2263
2264 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2265 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2266 &mad_agent_priv->send_list, agent_list) {
2267 if (mad_send_wr->status == IB_WC_SUCCESS) {
2268 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2269 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2270 }
2271 }
2272
2273
2274 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2275 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2276
2277
2278 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2279 mad_send_wc.vendor_err = 0;
2280
2281 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2282 &cancel_list, agent_list) {
2283 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2284 list_del(&mad_send_wr->agent_list);
2285 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2286 &mad_send_wc);
2287 atomic_dec(&mad_agent_priv->refcount);
2288 }
2289}
2290
2291static struct ib_mad_send_wr_private*
2292find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2293 struct ib_mad_send_buf *send_buf)
2294{
2295 struct ib_mad_send_wr_private *mad_send_wr;
2296
2297 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2298 agent_list) {
2299 if (&mad_send_wr->send_buf == send_buf)
2300 return mad_send_wr;
2301 }
2302
2303 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2304 agent_list) {
2305 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2306 &mad_send_wr->send_buf == send_buf)
2307 return mad_send_wr;
2308 }
2309 return NULL;
2310}
2311
2312int ib_modify_mad(struct ib_mad_agent *mad_agent,
2313 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2314{
2315 struct ib_mad_agent_private *mad_agent_priv;
2316 struct ib_mad_send_wr_private *mad_send_wr;
2317 unsigned long flags;
2318 int active;
2319
2320 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2321 agent);
2322 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2323 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2324 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2325 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2326 return -EINVAL;
2327 }
2328
2329 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2330 if (!timeout_ms) {
2331 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2332 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2333 }
2334
2335 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2336 if (active)
2337 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2338 else
2339 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2340
2341 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2342 return 0;
2343}
2344EXPORT_SYMBOL(ib_modify_mad);
2345
2346void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2347 struct ib_mad_send_buf *send_buf)
2348{
2349 ib_modify_mad(mad_agent, send_buf, 0);
2350}
2351EXPORT_SYMBOL(ib_cancel_mad);
2352
2353static void local_completions(struct work_struct *work)
2354{
2355 struct ib_mad_agent_private *mad_agent_priv;
2356 struct ib_mad_local_private *local;
2357 struct ib_mad_agent_private *recv_mad_agent;
2358 unsigned long flags;
2359 int recv = 0;
2360 struct ib_wc wc;
2361 struct ib_mad_send_wc mad_send_wc;
2362
2363 mad_agent_priv =
2364 container_of(work, struct ib_mad_agent_private, local_work);
2365
2366 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2367 while (!list_empty(&mad_agent_priv->local_list)) {
2368 local = list_entry(mad_agent_priv->local_list.next,
2369 struct ib_mad_local_private,
2370 completion_list);
2371 list_del(&local->completion_list);
2372 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2373 if (local->mad_priv) {
2374 recv_mad_agent = local->recv_mad_agent;
2375 if (!recv_mad_agent) {
2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2377 goto local_send_completion;
2378 }
2379
2380 recv = 1;
2381
2382
2383
2384
2385 build_smp_wc(recv_mad_agent->agent.qp,
2386 (unsigned long) local->mad_send_wr,
2387 be16_to_cpu(IB_LID_PERMISSIVE),
2388 0, recv_mad_agent->agent.port_num, &wc);
2389
2390 local->mad_priv->header.recv_wc.wc = &wc;
2391 local->mad_priv->header.recv_wc.mad_len =
2392 sizeof(struct ib_mad);
2393 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2394 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2395 &local->mad_priv->header.recv_wc.rmpp_list);
2396 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2397 local->mad_priv->header.recv_wc.recv_buf.mad =
2398 &local->mad_priv->mad.mad;
2399 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2400 snoop_recv(recv_mad_agent->qp_info,
2401 &local->mad_priv->header.recv_wc,
2402 IB_MAD_SNOOP_RECVS);
2403 recv_mad_agent->agent.recv_handler(
2404 &recv_mad_agent->agent,
2405 &local->mad_priv->header.recv_wc);
2406 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2407 atomic_dec(&recv_mad_agent->refcount);
2408 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2409 }
2410
2411local_send_completion:
2412
2413 mad_send_wc.status = IB_WC_SUCCESS;
2414 mad_send_wc.vendor_err = 0;
2415 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2416 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2417 snoop_send(mad_agent_priv->qp_info,
2418 &local->mad_send_wr->send_buf,
2419 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2420 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2421 &mad_send_wc);
2422
2423 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2424 atomic_dec(&mad_agent_priv->refcount);
2425 if (!recv)
2426 kmem_cache_free(ib_mad_cache, local->mad_priv);
2427 kfree(local);
2428 }
2429 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2430}
2431
2432static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2433{
2434 int ret;
2435
2436 if (!mad_send_wr->retries_left)
2437 return -ETIMEDOUT;
2438
2439 mad_send_wr->retries_left--;
2440 mad_send_wr->send_buf.retries++;
2441
2442 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2443
2444 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2445 ret = ib_retry_rmpp(mad_send_wr);
2446 switch (ret) {
2447 case IB_RMPP_RESULT_UNHANDLED:
2448 ret = ib_send_mad(mad_send_wr);
2449 break;
2450 case IB_RMPP_RESULT_CONSUMED:
2451 ret = 0;
2452 break;
2453 default:
2454 ret = -ECOMM;
2455 break;
2456 }
2457 } else
2458 ret = ib_send_mad(mad_send_wr);
2459
2460 if (!ret) {
2461 mad_send_wr->refcount++;
2462 list_add_tail(&mad_send_wr->agent_list,
2463 &mad_send_wr->mad_agent_priv->send_list);
2464 }
2465 return ret;
2466}
2467
2468static void timeout_sends(struct work_struct *work)
2469{
2470 struct ib_mad_agent_private *mad_agent_priv;
2471 struct ib_mad_send_wr_private *mad_send_wr;
2472 struct ib_mad_send_wc mad_send_wc;
2473 unsigned long flags, delay;
2474
2475 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2476 timed_work.work);
2477 mad_send_wc.vendor_err = 0;
2478
2479 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2480 while (!list_empty(&mad_agent_priv->wait_list)) {
2481 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2482 struct ib_mad_send_wr_private,
2483 agent_list);
2484
2485 if (time_after(mad_send_wr->timeout, jiffies)) {
2486 delay = mad_send_wr->timeout - jiffies;
2487 if ((long)delay <= 0)
2488 delay = 1;
2489 queue_delayed_work(mad_agent_priv->qp_info->
2490 port_priv->wq,
2491 &mad_agent_priv->timed_work, delay);
2492 break;
2493 }
2494
2495 list_del(&mad_send_wr->agent_list);
2496 if (mad_send_wr->status == IB_WC_SUCCESS &&
2497 !retry_send(mad_send_wr))
2498 continue;
2499
2500 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2501
2502 if (mad_send_wr->status == IB_WC_SUCCESS)
2503 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2504 else
2505 mad_send_wc.status = mad_send_wr->status;
2506 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2507 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2508 &mad_send_wc);
2509
2510 atomic_dec(&mad_agent_priv->refcount);
2511 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2512 }
2513 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2514}
2515
2516static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2517{
2518 struct ib_mad_port_private *port_priv = cq->cq_context;
2519 unsigned long flags;
2520
2521 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2522 if (!list_empty(&port_priv->port_list))
2523 queue_work(port_priv->wq, &port_priv->work);
2524 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2525}
2526
2527
2528
2529
2530static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2531 struct ib_mad_private *mad)
2532{
2533 unsigned long flags;
2534 int post, ret;
2535 struct ib_mad_private *mad_priv;
2536 struct ib_sge sg_list;
2537 struct ib_recv_wr recv_wr, *bad_recv_wr;
2538 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2539
2540
2541 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2542 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2543
2544
2545 recv_wr.next = NULL;
2546 recv_wr.sg_list = &sg_list;
2547 recv_wr.num_sge = 1;
2548
2549 do {
2550
2551 if (mad) {
2552 mad_priv = mad;
2553 mad = NULL;
2554 } else {
2555 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2556 if (!mad_priv) {
2557 printk(KERN_ERR PFX "No memory for receive buffer\n");
2558 ret = -ENOMEM;
2559 break;
2560 }
2561 }
2562 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2563 &mad_priv->grh,
2564 sizeof *mad_priv -
2565 sizeof mad_priv->header,
2566 DMA_FROM_DEVICE);
2567 mad_priv->header.mapping = sg_list.addr;
2568 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2569 mad_priv->header.mad_list.mad_queue = recv_queue;
2570
2571
2572 spin_lock_irqsave(&recv_queue->lock, flags);
2573 post = (++recv_queue->count < recv_queue->max_active);
2574 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2575 spin_unlock_irqrestore(&recv_queue->lock, flags);
2576 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2577 if (ret) {
2578 spin_lock_irqsave(&recv_queue->lock, flags);
2579 list_del(&mad_priv->header.mad_list.list);
2580 recv_queue->count--;
2581 spin_unlock_irqrestore(&recv_queue->lock, flags);
2582 ib_dma_unmap_single(qp_info->port_priv->device,
2583 mad_priv->header.mapping,
2584 sizeof *mad_priv -
2585 sizeof mad_priv->header,
2586 DMA_FROM_DEVICE);
2587 kmem_cache_free(ib_mad_cache, mad_priv);
2588 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2589 break;
2590 }
2591 } while (post);
2592
2593 return ret;
2594}
2595
2596
2597
2598
2599static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2600{
2601 struct ib_mad_private_header *mad_priv_hdr;
2602 struct ib_mad_private *recv;
2603 struct ib_mad_list_head *mad_list;
2604
2605 while (!list_empty(&qp_info->recv_queue.list)) {
2606
2607 mad_list = list_entry(qp_info->recv_queue.list.next,
2608 struct ib_mad_list_head, list);
2609 mad_priv_hdr = container_of(mad_list,
2610 struct ib_mad_private_header,
2611 mad_list);
2612 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2613 header);
2614
2615
2616 list_del(&mad_list->list);
2617
2618 ib_dma_unmap_single(qp_info->port_priv->device,
2619 recv->header.mapping,
2620 sizeof(struct ib_mad_private) -
2621 sizeof(struct ib_mad_private_header),
2622 DMA_FROM_DEVICE);
2623 kmem_cache_free(ib_mad_cache, recv);
2624 }
2625
2626 qp_info->recv_queue.count = 0;
2627}
2628
2629
2630
2631
2632static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2633{
2634 int ret, i;
2635 struct ib_qp_attr *attr;
2636 struct ib_qp *qp;
2637
2638 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2639 if (!attr) {
2640 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2641 return -ENOMEM;
2642 }
2643
2644 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2645 qp = port_priv->qp_info[i].qp;
2646
2647
2648
2649
2650 attr->qp_state = IB_QPS_INIT;
2651 attr->pkey_index = 0;
2652 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2653 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2654 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2655 if (ret) {
2656 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2657 "INIT: %d\n", i, ret);
2658 goto out;
2659 }
2660
2661 attr->qp_state = IB_QPS_RTR;
2662 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2663 if (ret) {
2664 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2665 "RTR: %d\n", i, ret);
2666 goto out;
2667 }
2668
2669 attr->qp_state = IB_QPS_RTS;
2670 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2671 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2672 if (ret) {
2673 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2674 "RTS: %d\n", i, ret);
2675 goto out;
2676 }
2677 }
2678
2679 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2680 if (ret) {
2681 printk(KERN_ERR PFX "Failed to request completion "
2682 "notification: %d\n", ret);
2683 goto out;
2684 }
2685
2686 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2687 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2688 if (ret) {
2689 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2690 goto out;
2691 }
2692 }
2693out:
2694 kfree(attr);
2695 return ret;
2696}
2697
2698static void qp_event_handler(struct ib_event *event, void *qp_context)
2699{
2700 struct ib_mad_qp_info *qp_info = qp_context;
2701
2702
2703 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2704 event->event, qp_info->qp->qp_num);
2705}
2706
2707static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2708 struct ib_mad_queue *mad_queue)
2709{
2710 mad_queue->qp_info = qp_info;
2711 mad_queue->count = 0;
2712 spin_lock_init(&mad_queue->lock);
2713 INIT_LIST_HEAD(&mad_queue->list);
2714}
2715
2716static void init_mad_qp(struct ib_mad_port_private *port_priv,
2717 struct ib_mad_qp_info *qp_info)
2718{
2719 qp_info->port_priv = port_priv;
2720 init_mad_queue(qp_info, &qp_info->send_queue);
2721 init_mad_queue(qp_info, &qp_info->recv_queue);
2722 INIT_LIST_HEAD(&qp_info->overflow_list);
2723 spin_lock_init(&qp_info->snoop_lock);
2724 qp_info->snoop_table = NULL;
2725 qp_info->snoop_table_size = 0;
2726 atomic_set(&qp_info->snoop_count, 0);
2727}
2728
2729static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2730 enum ib_qp_type qp_type)
2731{
2732 struct ib_qp_init_attr qp_init_attr;
2733 int ret;
2734
2735 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2736 qp_init_attr.send_cq = qp_info->port_priv->cq;
2737 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2738 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2739 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2740 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2741 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2742 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2743 qp_init_attr.qp_type = qp_type;
2744 qp_init_attr.port_num = qp_info->port_priv->port_num;
2745 qp_init_attr.qp_context = qp_info;
2746 qp_init_attr.event_handler = qp_event_handler;
2747 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2748 if (IS_ERR(qp_info->qp)) {
2749 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2750 get_spl_qp_index(qp_type));
2751 ret = PTR_ERR(qp_info->qp);
2752 goto error;
2753 }
2754
2755 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2756 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2757 return 0;
2758
2759error:
2760 return ret;
2761}
2762
2763static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2764{
2765 ib_destroy_qp(qp_info->qp);
2766 kfree(qp_info->snoop_table);
2767}
2768
2769
2770
2771
2772
2773static int ib_mad_port_open(struct ib_device *device,
2774 int port_num)
2775{
2776 int ret, cq_size;
2777 struct ib_mad_port_private *port_priv;
2778 unsigned long flags;
2779 char name[sizeof "ib_mad123"];
2780
2781
2782 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2783 if (!port_priv) {
2784 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2785 return -ENOMEM;
2786 }
2787
2788 port_priv->device = device;
2789 port_priv->port_num = port_num;
2790 spin_lock_init(&port_priv->reg_lock);
2791 INIT_LIST_HEAD(&port_priv->agent_list);
2792 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2793 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2794
2795 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2796 port_priv->cq = ib_create_cq(port_priv->device,
2797 ib_mad_thread_completion_handler,
2798 NULL, port_priv, cq_size, 0);
2799 if (IS_ERR(port_priv->cq)) {
2800 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2801 ret = PTR_ERR(port_priv->cq);
2802 goto error3;
2803 }
2804
2805 port_priv->pd = ib_alloc_pd(device);
2806 if (IS_ERR(port_priv->pd)) {
2807 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2808 ret = PTR_ERR(port_priv->pd);
2809 goto error4;
2810 }
2811
2812 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2813 if (IS_ERR(port_priv->mr)) {
2814 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2815 ret = PTR_ERR(port_priv->mr);
2816 goto error5;
2817 }
2818
2819 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2820 if (ret)
2821 goto error6;
2822 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2823 if (ret)
2824 goto error7;
2825
2826 snprintf(name, sizeof name, "ib_mad%d", port_num);
2827 port_priv->wq = create_singlethread_workqueue(name);
2828 if (!port_priv->wq) {
2829 ret = -ENOMEM;
2830 goto error8;
2831 }
2832 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2833
2834 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2835 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2836 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2837
2838 ret = ib_mad_port_start(port_priv);
2839 if (ret) {
2840 printk(KERN_ERR PFX "Couldn't start port\n");
2841 goto error9;
2842 }
2843
2844 return 0;
2845
2846error9:
2847 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2848 list_del_init(&port_priv->port_list);
2849 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2850
2851 destroy_workqueue(port_priv->wq);
2852error8:
2853 destroy_mad_qp(&port_priv->qp_info[1]);
2854error7:
2855 destroy_mad_qp(&port_priv->qp_info[0]);
2856error6:
2857 ib_dereg_mr(port_priv->mr);
2858error5:
2859 ib_dealloc_pd(port_priv->pd);
2860error4:
2861 ib_destroy_cq(port_priv->cq);
2862 cleanup_recv_queue(&port_priv->qp_info[1]);
2863 cleanup_recv_queue(&port_priv->qp_info[0]);
2864error3:
2865 kfree(port_priv);
2866
2867 return ret;
2868}
2869
2870
2871
2872
2873
2874
2875static int ib_mad_port_close(struct ib_device *device, int port_num)
2876{
2877 struct ib_mad_port_private *port_priv;
2878 unsigned long flags;
2879
2880 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2881 port_priv = __ib_get_mad_port(device, port_num);
2882 if (port_priv == NULL) {
2883 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2884 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2885 return -ENODEV;
2886 }
2887 list_del_init(&port_priv->port_list);
2888 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2889
2890 destroy_workqueue(port_priv->wq);
2891 destroy_mad_qp(&port_priv->qp_info[1]);
2892 destroy_mad_qp(&port_priv->qp_info[0]);
2893 ib_dereg_mr(port_priv->mr);
2894 ib_dealloc_pd(port_priv->pd);
2895 ib_destroy_cq(port_priv->cq);
2896 cleanup_recv_queue(&port_priv->qp_info[1]);
2897 cleanup_recv_queue(&port_priv->qp_info[0]);
2898
2899
2900 kfree(port_priv);
2901
2902 return 0;
2903}
2904
2905static void ib_mad_init_device(struct ib_device *device)
2906{
2907 int start, end, i;
2908
2909 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2910 return;
2911
2912 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2913 start = 0;
2914 end = 0;
2915 } else {
2916 start = 1;
2917 end = device->phys_port_cnt;
2918 }
2919
2920 for (i = start; i <= end; i++) {
2921 if (ib_mad_port_open(device, i)) {
2922 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2923 device->name, i);
2924 goto error;
2925 }
2926 if (ib_agent_port_open(device, i)) {
2927 printk(KERN_ERR PFX "Couldn't open %s port %d "
2928 "for agents\n",
2929 device->name, i);
2930 goto error_agent;
2931 }
2932 }
2933 return;
2934
2935error_agent:
2936 if (ib_mad_port_close(device, i))
2937 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2938 device->name, i);
2939
2940error:
2941 i--;
2942
2943 while (i >= start) {
2944 if (ib_agent_port_close(device, i))
2945 printk(KERN_ERR PFX "Couldn't close %s port %d "
2946 "for agents\n",
2947 device->name, i);
2948 if (ib_mad_port_close(device, i))
2949 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2950 device->name, i);
2951 i--;
2952 }
2953}
2954
2955static void ib_mad_remove_device(struct ib_device *device)
2956{
2957 int i, num_ports, cur_port;
2958
2959 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2960 num_ports = 1;
2961 cur_port = 0;
2962 } else {
2963 num_ports = device->phys_port_cnt;
2964 cur_port = 1;
2965 }
2966 for (i = 0; i < num_ports; i++, cur_port++) {
2967 if (ib_agent_port_close(device, cur_port))
2968 printk(KERN_ERR PFX "Couldn't close %s port %d "
2969 "for agents\n",
2970 device->name, cur_port);
2971 if (ib_mad_port_close(device, cur_port))
2972 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2973 device->name, cur_port);
2974 }
2975}
2976
2977static struct ib_client mad_client = {
2978 .name = "mad",
2979 .add = ib_mad_init_device,
2980 .remove = ib_mad_remove_device
2981};
2982
2983static int __init ib_mad_init_module(void)
2984{
2985 int ret;
2986
2987 spin_lock_init(&ib_mad_port_list_lock);
2988
2989 ib_mad_cache = kmem_cache_create("ib_mad",
2990 sizeof(struct ib_mad_private),
2991 0,
2992 SLAB_HWCACHE_ALIGN,
2993 NULL);
2994 if (!ib_mad_cache) {
2995 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2996 ret = -ENOMEM;
2997 goto error1;
2998 }
2999
3000 INIT_LIST_HEAD(&ib_mad_port_list);
3001
3002 if (ib_register_client(&mad_client)) {
3003 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3004 ret = -EINVAL;
3005 goto error2;
3006 }
3007
3008 return 0;
3009
3010error2:
3011 kmem_cache_destroy(ib_mad_cache);
3012error1:
3013 return ret;
3014}
3015
3016static void __exit ib_mad_cleanup_module(void)
3017{
3018 ib_unregister_client(&mad_client);
3019 kmem_cache_destroy(ib_mad_cache);
3020}
3021
3022module_init(ib_mad_init_module);
3023module_exit(ib_mad_cleanup_module);
3024