1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/spinlock.h>
35#include <linux/idr.h>
36#include <linux/pci.h>
37#include <linux/io.h>
38#include <linux/delay.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41
42#include "ipath_kernel.h"
43#include "ipath_verbs.h"
44
45static void ipath_update_pio_bufs(struct ipath_devdata *);
46
47const char *ipath_get_unit_name(int unit)
48{
49 static char iname[16];
50 snprintf(iname, sizeof iname, "infinipath%u", unit);
51 return iname;
52}
53
54#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
55#define PFX IPATH_DRV_NAME ": "
56
57
58
59
60
61const char ib_ipath_version[] = IPATH_IDSTR "\n";
62
63static struct idr unit_table;
64DEFINE_SPINLOCK(ipath_devs_lock);
65LIST_HEAD(ipath_dev_list);
66
67wait_queue_head_t ipath_state_wait;
68
69unsigned ipath_debug = __IPATH_INFO;
70
71module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
72MODULE_PARM_DESC(debug, "mask for debug prints");
73EXPORT_SYMBOL_GPL(ipath_debug);
74
75unsigned ipath_mtu4096 = 1;
76module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
77MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
78
79static unsigned ipath_hol_timeout_ms = 13000;
80module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
81MODULE_PARM_DESC(hol_timeout_ms,
82 "duration of user app suspension after link failure");
83
84unsigned ipath_linkrecovery = 1;
85module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
86MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
87
88MODULE_LICENSE("GPL");
89MODULE_AUTHOR("QLogic <support@qlogic.com>");
90MODULE_DESCRIPTION("QLogic InfiniPath driver");
91
92
93
94
95
96const char *ipath_ibcstatus_str[] = {
97 "Disabled",
98 "LinkUp",
99 "PollActive",
100 "PollQuiet",
101 "SleepDelay",
102 "SleepQuiet",
103 "LState6",
104 "LState7",
105 "CfgDebounce",
106 "CfgRcvfCfg",
107 "CfgWaitRmt",
108 "CfgIdle",
109 "RecovRetrain",
110 "CfgTxRevLane",
111 "RecovWaitRmt",
112 "RecovIdle",
113
114 "CfgEnhanced",
115 "CfgTest",
116 "CfgWaitRmtTest",
117 "CfgWaitCfgEnhanced",
118 "SendTS_T",
119 "SendTstIdles",
120 "RcvTS_T",
121 "SendTst_TS1s",
122 "LTState18", "LTState19", "LTState1A", "LTState1B",
123 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
124};
125
126static void __devexit ipath_remove_one(struct pci_dev *);
127static int __devinit ipath_init_one(struct pci_dev *,
128 const struct pci_device_id *);
129
130
131#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
132#define PCI_VENDOR_ID_QLOGIC 0x1077
133#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
134#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
135#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
136
137
138#define STATUS_TIMEOUT 60
139
140static const struct pci_device_id ipath_pci_tbl[] = {
141 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
142 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
143 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
144 { 0, }
145};
146
147MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
148
149static struct pci_driver ipath_driver = {
150 .name = IPATH_DRV_NAME,
151 .probe = ipath_init_one,
152 .remove = __devexit_p(ipath_remove_one),
153 .id_table = ipath_pci_tbl,
154 .driver = {
155 .groups = ipath_driver_attr_groups,
156 },
157};
158
159static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
160 u32 *bar0, u32 *bar1)
161{
162 int ret;
163
164 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
165 if (ret)
166 ipath_dev_err(dd, "failed to read bar0 before enable: "
167 "error %d\n", -ret);
168
169 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
170 if (ret)
171 ipath_dev_err(dd, "failed to read bar1 before enable: "
172 "error %d\n", -ret);
173
174 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
175}
176
177static void ipath_free_devdata(struct pci_dev *pdev,
178 struct ipath_devdata *dd)
179{
180 unsigned long flags;
181
182 pci_set_drvdata(pdev, NULL);
183
184 if (dd->ipath_unit != -1) {
185 spin_lock_irqsave(&ipath_devs_lock, flags);
186 idr_remove(&unit_table, dd->ipath_unit);
187 list_del(&dd->ipath_list);
188 spin_unlock_irqrestore(&ipath_devs_lock, flags);
189 }
190 vfree(dd);
191}
192
193static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
194{
195 unsigned long flags;
196 struct ipath_devdata *dd;
197 int ret;
198
199 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
200 dd = ERR_PTR(-ENOMEM);
201 goto bail;
202 }
203
204 dd = vmalloc(sizeof(*dd));
205 if (!dd) {
206 dd = ERR_PTR(-ENOMEM);
207 goto bail;
208 }
209 memset(dd, 0, sizeof(*dd));
210 dd->ipath_unit = -1;
211
212 spin_lock_irqsave(&ipath_devs_lock, flags);
213
214 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
215 if (ret < 0) {
216 printk(KERN_ERR IPATH_DRV_NAME
217 ": Could not allocate unit ID: error %d\n", -ret);
218 ipath_free_devdata(pdev, dd);
219 dd = ERR_PTR(ret);
220 goto bail_unlock;
221 }
222
223 dd->pcidev = pdev;
224 pci_set_drvdata(pdev, dd);
225
226 list_add(&dd->ipath_list, &ipath_dev_list);
227
228bail_unlock:
229 spin_unlock_irqrestore(&ipath_devs_lock, flags);
230
231bail:
232 return dd;
233}
234
235static inline struct ipath_devdata *__ipath_lookup(int unit)
236{
237 return idr_find(&unit_table, unit);
238}
239
240struct ipath_devdata *ipath_lookup(int unit)
241{
242 struct ipath_devdata *dd;
243 unsigned long flags;
244
245 spin_lock_irqsave(&ipath_devs_lock, flags);
246 dd = __ipath_lookup(unit);
247 spin_unlock_irqrestore(&ipath_devs_lock, flags);
248
249 return dd;
250}
251
252int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
253{
254 int nunits, npresent, nup;
255 struct ipath_devdata *dd;
256 unsigned long flags;
257 int maxports;
258
259 nunits = npresent = nup = maxports = 0;
260
261 spin_lock_irqsave(&ipath_devs_lock, flags);
262
263 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
264 nunits++;
265 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
266 npresent++;
267 if (dd->ipath_lid &&
268 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
269 | IPATH_LINKUNK)))
270 nup++;
271 if (dd->ipath_cfgports > maxports)
272 maxports = dd->ipath_cfgports;
273 }
274
275 spin_unlock_irqrestore(&ipath_devs_lock, flags);
276
277 if (npresentp)
278 *npresentp = npresent;
279 if (nupp)
280 *nupp = nup;
281 if (maxportsp)
282 *maxportsp = maxports;
283
284 return nunits;
285}
286
287
288
289
290
291
292
293int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
294{
295 return -EOPNOTSUPP;
296}
297
298void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
299{
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315static void ipath_verify_pioperf(struct ipath_devdata *dd)
316{
317 u32 pbnum, cnt, lcnt;
318 u32 __iomem *piobuf;
319 u32 *addr;
320 u64 msecs, emsecs;
321
322 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
323 if (!piobuf) {
324 dev_info(&dd->pcidev->dev,
325 "No PIObufs for checking perf, skipping\n");
326 return;
327 }
328
329
330
331
332
333 cnt = 1024;
334
335 addr = vmalloc(cnt);
336 if (!addr) {
337 dev_info(&dd->pcidev->dev,
338 "Couldn't get memory for checking PIO perf,"
339 " skipping\n");
340 goto done;
341 }
342
343 preempt_disable();
344 msecs = 1 + jiffies_to_msecs(jiffies);
345 for (lcnt = 0; lcnt < 10000U; lcnt++) {
346
347 if (jiffies_to_msecs(jiffies) >= msecs)
348 break;
349 udelay(1);
350 }
351
352 ipath_disable_armlaunch(dd);
353
354
355
356
357
358 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
359 writeq(1UL << 63, piobuf);
360 else
361 writeq(0, piobuf);
362 ipath_flush_wc();
363
364
365
366
367
368
369 msecs = jiffies_to_msecs(jiffies);
370 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
371 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
372 emsecs = jiffies_to_msecs(jiffies) - msecs;
373 }
374
375
376 if (lcnt < (emsecs * 1024U))
377 ipath_dev_err(dd,
378 "Performance problem: bandwidth to PIO buffers is "
379 "only %u MiB/sec\n",
380 lcnt / (u32) emsecs);
381 else
382 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
383 lcnt / (u32) emsecs);
384
385 preempt_enable();
386
387 vfree(addr);
388
389done:
390
391 ipath_disarm_piobufs(dd, pbnum, 1);
392 ipath_enable_armlaunch(dd);
393}
394
395static int __devinit ipath_init_one(struct pci_dev *pdev,
396 const struct pci_device_id *ent)
397{
398 int ret, len, j;
399 struct ipath_devdata *dd;
400 unsigned long long addr;
401 u32 bar0 = 0, bar1 = 0;
402 u8 rev;
403
404 dd = ipath_alloc_devdata(pdev);
405 if (IS_ERR(dd)) {
406 ret = PTR_ERR(dd);
407 printk(KERN_ERR IPATH_DRV_NAME
408 ": Could not allocate devdata: error %d\n", -ret);
409 goto bail;
410 }
411
412 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
413
414 ret = pci_enable_device(pdev);
415 if (ret) {
416
417
418
419
420
421
422
423
424
425
426
427
428 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
429 dd->ipath_unit, -ret);
430 goto bail_devdata;
431 }
432 addr = pci_resource_start(pdev, 0);
433 len = pci_resource_len(pdev, 0);
434 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
435 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
436 ent->device, ent->driver_data);
437
438 read_bars(dd, pdev, &bar0, &bar1);
439
440 if (!bar1 && !(bar0 & ~0xf)) {
441 if (addr) {
442 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
443 "rewriting as %llx\n", addr);
444 ret = pci_write_config_dword(
445 pdev, PCI_BASE_ADDRESS_0, addr);
446 if (ret) {
447 ipath_dev_err(dd, "rewrite of BAR0 "
448 "failed: err %d\n", -ret);
449 goto bail_disable;
450 }
451 ret = pci_write_config_dword(
452 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
453 if (ret) {
454 ipath_dev_err(dd, "rewrite of BAR1 "
455 "failed: err %d\n", -ret);
456 goto bail_disable;
457 }
458 } else {
459 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
460 "not usable until reboot\n");
461 ret = -ENODEV;
462 goto bail_disable;
463 }
464 }
465
466 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
467 if (ret) {
468 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
469 "err %d\n", dd->ipath_unit, -ret);
470 goto bail_disable;
471 }
472
473 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
474 if (ret) {
475
476
477
478
479
480 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
481 if (ret) {
482 dev_info(&pdev->dev,
483 "Unable to set DMA mask for unit %u: %d\n",
484 dd->ipath_unit, ret);
485 goto bail_regions;
486 }
487 else {
488 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
489 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
490 if (ret)
491 dev_info(&pdev->dev,
492 "Unable to set DMA consistent mask "
493 "for unit %u: %d\n",
494 dd->ipath_unit, ret);
495
496 }
497 }
498 else {
499 ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
500 if (ret)
501 dev_info(&pdev->dev,
502 "Unable to set DMA consistent mask "
503 "for unit %u: %d\n",
504 dd->ipath_unit, ret);
505 }
506
507 pci_set_master(pdev);
508
509
510
511
512
513 dd->ipath_pcibar0 = addr;
514 dd->ipath_pcibar1 = addr >> 32;
515 dd->ipath_deviceid = ent->device;
516 dd->ipath_vendorid = ent->vendor;
517
518
519 switch (ent->device) {
520 case PCI_DEVICE_ID_INFINIPATH_HT:
521#ifdef CONFIG_HT_IRQ
522 ipath_init_iba6110_funcs(dd);
523 break;
524#else
525 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
526 "CONFIG_HT_IRQ is not enabled\n", ent->device);
527 return -ENODEV;
528#endif
529 case PCI_DEVICE_ID_INFINIPATH_PE800:
530#ifdef CONFIG_PCI_MSI
531 ipath_init_iba6120_funcs(dd);
532 break;
533#else
534 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
535 "CONFIG_PCI_MSI is not enabled\n", ent->device);
536 return -ENODEV;
537#endif
538 case PCI_DEVICE_ID_INFINIPATH_7220:
539#ifndef CONFIG_PCI_MSI
540 ipath_dbg("CONFIG_PCI_MSI is not enabled, "
541 "using INTx for unit %u\n", dd->ipath_unit);
542#endif
543 ipath_init_iba7220_funcs(dd);
544 break;
545 default:
546 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
547 "failing\n", ent->device);
548 return -ENODEV;
549 }
550
551 for (j = 0; j < 6; j++) {
552 if (!pdev->resource[j].start)
553 continue;
554 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
555 j, (unsigned long long)pdev->resource[j].start,
556 (unsigned long long)pdev->resource[j].end,
557 (unsigned long long)pci_resource_len(pdev, j));
558 }
559
560 if (!addr) {
561 ipath_dev_err(dd, "No valid address in BAR 0!\n");
562 ret = -ENODEV;
563 goto bail_regions;
564 }
565
566 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
567 if (ret) {
568 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
569 "%u: err %d\n", dd->ipath_unit, -ret);
570 goto bail_regions;
571 }
572 dd->ipath_pcirev = rev;
573
574#if defined(__powerpc__)
575
576 dd->ipath_kregbase = __ioremap(addr, len,
577 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
578#else
579 dd->ipath_kregbase = ioremap_nocache(addr, len);
580#endif
581
582 if (!dd->ipath_kregbase) {
583 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
584 addr);
585 ret = -ENOMEM;
586 goto bail_iounmap;
587 }
588 dd->ipath_kregend = (u64 __iomem *)
589 ((void __iomem *)dd->ipath_kregbase + len);
590 dd->ipath_physaddr = addr;
591
592 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
593 addr, dd->ipath_kregbase);
594
595 if (dd->ipath_f_bus(dd, pdev))
596 ipath_dev_err(dd, "Failed to setup config space; "
597 "continuing anyway\n");
598
599
600
601
602
603
604
605 if (!dd->ipath_irq)
606 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
607 "work\n");
608 else {
609 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
610 IPATH_DRV_NAME, dd);
611 if (ret) {
612 ipath_dev_err(dd, "Couldn't setup irq handler, "
613 "irq=%d: %d\n", dd->ipath_irq, ret);
614 goto bail_iounmap;
615 }
616 }
617
618 ret = ipath_init_chip(dd, 0);
619 if (ret)
620 goto bail_irqsetup;
621
622 ret = ipath_enable_wc(dd);
623
624 if (ret) {
625 ipath_dev_err(dd, "Write combining not enabled "
626 "(err %d): performance may be poor\n",
627 -ret);
628 ret = 0;
629 }
630
631 ipath_verify_pioperf(dd);
632
633 ipath_device_create_group(&pdev->dev, dd);
634 ipathfs_add_device(dd);
635 ipath_user_add(dd);
636 ipath_diag_add(dd);
637 ipath_register_ib_device(dd);
638
639 goto bail;
640
641bail_irqsetup:
642 if (pdev->irq)
643 free_irq(pdev->irq, dd);
644
645bail_iounmap:
646 iounmap((volatile void __iomem *) dd->ipath_kregbase);
647
648bail_regions:
649 pci_release_regions(pdev);
650
651bail_disable:
652 pci_disable_device(pdev);
653
654bail_devdata:
655 ipath_free_devdata(pdev, dd);
656
657bail:
658 return ret;
659}
660
661static void __devexit cleanup_device(struct ipath_devdata *dd)
662{
663 int port;
664
665 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
666
667 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
668 if (dd->ipath_kregbase) {
669
670
671
672
673
674 dd->ipath_kregbase = NULL;
675 dd->ipath_uregbase = 0;
676 dd->ipath_sregbase = 0;
677 dd->ipath_cregbase = 0;
678 dd->ipath_kregsize = 0;
679 }
680 ipath_disable_wc(dd);
681 }
682
683 if (dd->ipath_spectriggerhit)
684 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
685 dd->ipath_spectriggerhit);
686
687 if (dd->ipath_pioavailregs_dma) {
688 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
689 (void *) dd->ipath_pioavailregs_dma,
690 dd->ipath_pioavailregs_phys);
691 dd->ipath_pioavailregs_dma = NULL;
692 }
693 if (dd->ipath_dummy_hdrq) {
694 dma_free_coherent(&dd->pcidev->dev,
695 dd->ipath_pd[0]->port_rcvhdrq_size,
696 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
697 dd->ipath_dummy_hdrq = NULL;
698 }
699
700 if (dd->ipath_pageshadow) {
701 struct page **tmpp = dd->ipath_pageshadow;
702 dma_addr_t *tmpd = dd->ipath_physshadow;
703 int i, cnt = 0;
704
705 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
706 "locked\n");
707 for (port = 0; port < dd->ipath_cfgports; port++) {
708 int port_tidbase = port * dd->ipath_rcvtidcnt;
709 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
710 for (i = port_tidbase; i < maxtid; i++) {
711 if (!tmpp[i])
712 continue;
713 pci_unmap_page(dd->pcidev, tmpd[i],
714 PAGE_SIZE, PCI_DMA_FROMDEVICE);
715 ipath_release_user_pages(&tmpp[i], 1);
716 tmpp[i] = NULL;
717 cnt++;
718 }
719 }
720 if (cnt) {
721 ipath_stats.sps_pageunlocks += cnt;
722 ipath_cdbg(VERBOSE, "There were still %u expTID "
723 "entries locked\n", cnt);
724 }
725 if (ipath_stats.sps_pagelocks ||
726 ipath_stats.sps_pageunlocks)
727 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
728 "unlocked via ipath_m{un}lock\n",
729 (unsigned long long)
730 ipath_stats.sps_pagelocks,
731 (unsigned long long)
732 ipath_stats.sps_pageunlocks);
733
734 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
735 dd->ipath_pageshadow);
736 tmpp = dd->ipath_pageshadow;
737 dd->ipath_pageshadow = NULL;
738 vfree(tmpp);
739
740 dd->ipath_egrtidbase = NULL;
741 }
742
743
744
745
746
747
748 for (port = 0; port < dd->ipath_portcnt; port++) {
749 struct ipath_portdata *pd = dd->ipath_pd[port];
750 dd->ipath_pd[port] = NULL;
751 ipath_free_pddata(dd, pd);
752 }
753 kfree(dd->ipath_pd);
754
755
756
757
758 dd->ipath_pd = NULL;
759}
760
761static void __devexit ipath_remove_one(struct pci_dev *pdev)
762{
763 struct ipath_devdata *dd = pci_get_drvdata(pdev);
764
765 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
766
767
768
769
770
771 ipath_shutdown_device(dd);
772
773 flush_scheduled_work();
774
775 if (dd->verbs_dev)
776 ipath_unregister_ib_device(dd->verbs_dev);
777
778 ipath_diag_remove(dd);
779 ipath_user_remove(dd);
780 ipathfs_remove_device(dd);
781 ipath_device_remove_group(&pdev->dev, dd);
782
783 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
784 "unit %u\n", dd, (u32) dd->ipath_unit);
785
786 cleanup_device(dd);
787
788
789
790
791
792
793
794 if (dd->ipath_irq) {
795 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
796 dd->ipath_unit, dd->ipath_irq);
797 dd->ipath_f_free_irq(dd);
798 } else
799 ipath_dbg("irq is 0, not doing free_irq "
800 "for unit %u\n", dd->ipath_unit);
801
802
803
804
805
806
807 if (dd->ipath_f_cleanup)
808
809 dd->ipath_f_cleanup(dd);
810
811 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
812 iounmap((volatile void __iomem *) dd->ipath_kregbase);
813 pci_release_regions(pdev);
814 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
815 pci_disable_device(pdev);
816
817 ipath_free_devdata(pdev, dd);
818}
819
820
821DEFINE_MUTEX(ipath_mutex);
822
823static DEFINE_SPINLOCK(ipath_pioavail_lock);
824
825
826
827
828
829
830
831
832
833
834
835
836void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
837 unsigned cnt)
838{
839 unsigned i, last = first + cnt;
840 unsigned long flags;
841
842 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
843 for (i = first; i < last; i++) {
844 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
845
846
847
848
849
850 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
851 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
852 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
853
854 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
855 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
856 }
857
858 ipath_force_pio_avail_update(dd);
859}
860
861
862
863
864
865
866
867
868
869
870
871
872
873int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
874{
875 dd->ipath_state_wanted = state;
876 wait_event_interruptible_timeout(ipath_state_wait,
877 (dd->ipath_flags & state),
878 msecs_to_jiffies(msecs));
879 dd->ipath_state_wanted = 0;
880
881 if (!(dd->ipath_flags & state)) {
882 u64 val;
883 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
884 " ms\n",
885
886 (state & IPATH_LINKINIT) ? "INIT" :
887 ((state & IPATH_LINKDOWN) ? "DOWN" :
888 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
889 msecs);
890 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
891 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
892 (unsigned long long) ipath_read_kreg64(
893 dd, dd->ipath_kregs->kr_ibcctrl),
894 (unsigned long long) val,
895 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
896 }
897 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
898}
899
900static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
901 char *buf, size_t blen)
902{
903 static const struct {
904 ipath_err_t err;
905 const char *msg;
906 } errs[] = {
907 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
908 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
909 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
910 { INFINIPATH_E_SDMABASE, "SDmaBase" },
911 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
912 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
913 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
914 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
915 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
916 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
917 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
918 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
919 };
920 int i;
921 int expected;
922 size_t bidx = 0;
923
924 for (i = 0; i < ARRAY_SIZE(errs); i++) {
925 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
926 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
927 if ((err & errs[i].err) && !expected)
928 bidx += snprintf(buf + bidx, blen - bidx,
929 "%s ", errs[i].msg);
930 }
931}
932
933
934
935
936
937
938
939int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
940 ipath_err_t err)
941{
942 int iserr = 1;
943 *buf = '\0';
944 if (err & INFINIPATH_E_PKTERRS) {
945 if (!(err & ~INFINIPATH_E_PKTERRS))
946 iserr = 0;
947 if (ipath_debug & __IPATH_ERRPKTDBG) {
948 if (err & INFINIPATH_E_REBP)
949 strlcat(buf, "EBP ", blen);
950 if (err & INFINIPATH_E_RVCRC)
951 strlcat(buf, "VCRC ", blen);
952 if (err & INFINIPATH_E_RICRC) {
953 strlcat(buf, "CRC ", blen);
954
955 err &= INFINIPATH_E_RICRC;
956 }
957 if (err & INFINIPATH_E_RSHORTPKTLEN)
958 strlcat(buf, "rshortpktlen ", blen);
959 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
960 strlcat(buf, "sdroppeddatapkt ", blen);
961 if (err & INFINIPATH_E_SPKTLEN)
962 strlcat(buf, "spktlen ", blen);
963 }
964 if ((err & INFINIPATH_E_RICRC) &&
965 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
966 strlcat(buf, "CRC ", blen);
967 if (!iserr)
968 goto done;
969 }
970 if (err & INFINIPATH_E_RHDRLEN)
971 strlcat(buf, "rhdrlen ", blen);
972 if (err & INFINIPATH_E_RBADTID)
973 strlcat(buf, "rbadtid ", blen);
974 if (err & INFINIPATH_E_RBADVERSION)
975 strlcat(buf, "rbadversion ", blen);
976 if (err & INFINIPATH_E_RHDR)
977 strlcat(buf, "rhdr ", blen);
978 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
979 strlcat(buf, "sendspecialtrigger ", blen);
980 if (err & INFINIPATH_E_RLONGPKTLEN)
981 strlcat(buf, "rlongpktlen ", blen);
982 if (err & INFINIPATH_E_RMAXPKTLEN)
983 strlcat(buf, "rmaxpktlen ", blen);
984 if (err & INFINIPATH_E_RMINPKTLEN)
985 strlcat(buf, "rminpktlen ", blen);
986 if (err & INFINIPATH_E_SMINPKTLEN)
987 strlcat(buf, "sminpktlen ", blen);
988 if (err & INFINIPATH_E_RFORMATERR)
989 strlcat(buf, "rformaterr ", blen);
990 if (err & INFINIPATH_E_RUNSUPVL)
991 strlcat(buf, "runsupvl ", blen);
992 if (err & INFINIPATH_E_RUNEXPCHAR)
993 strlcat(buf, "runexpchar ", blen);
994 if (err & INFINIPATH_E_RIBFLOW)
995 strlcat(buf, "ribflow ", blen);
996 if (err & INFINIPATH_E_SUNDERRUN)
997 strlcat(buf, "sunderrun ", blen);
998 if (err & INFINIPATH_E_SPIOARMLAUNCH)
999 strlcat(buf, "spioarmlaunch ", blen);
1000 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
1001 strlcat(buf, "sunexperrpktnum ", blen);
1002 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
1003 strlcat(buf, "sdroppedsmppkt ", blen);
1004 if (err & INFINIPATH_E_SMAXPKTLEN)
1005 strlcat(buf, "smaxpktlen ", blen);
1006 if (err & INFINIPATH_E_SUNSUPVL)
1007 strlcat(buf, "sunsupVL ", blen);
1008 if (err & INFINIPATH_E_INVALIDADDR)
1009 strlcat(buf, "invalidaddr ", blen);
1010 if (err & INFINIPATH_E_RRCVEGRFULL)
1011 strlcat(buf, "rcvegrfull ", blen);
1012 if (err & INFINIPATH_E_RRCVHDRFULL)
1013 strlcat(buf, "rcvhdrfull ", blen);
1014 if (err & INFINIPATH_E_IBSTATUSCHANGED)
1015 strlcat(buf, "ibcstatuschg ", blen);
1016 if (err & INFINIPATH_E_RIBLOSTLINK)
1017 strlcat(buf, "riblostlink ", blen);
1018 if (err & INFINIPATH_E_HARDWARE)
1019 strlcat(buf, "hardware ", blen);
1020 if (err & INFINIPATH_E_RESET)
1021 strlcat(buf, "reset ", blen);
1022 if (err & INFINIPATH_E_SDMAERRS)
1023 decode_sdma_errs(dd, err, buf, blen);
1024 if (err & INFINIPATH_E_INVALIDEEPCMD)
1025 strlcat(buf, "invalideepromcmd ", blen);
1026done:
1027 return iserr;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static void get_rhf_errstring(u32 err, char *msg, size_t len)
1039{
1040
1041 *msg = '\0';
1042
1043 if (err & INFINIPATH_RHF_H_ICRCERR)
1044 strlcat(msg, "icrcerr ", len);
1045 if (err & INFINIPATH_RHF_H_VCRCERR)
1046 strlcat(msg, "vcrcerr ", len);
1047 if (err & INFINIPATH_RHF_H_PARITYERR)
1048 strlcat(msg, "parityerr ", len);
1049 if (err & INFINIPATH_RHF_H_LENERR)
1050 strlcat(msg, "lenerr ", len);
1051 if (err & INFINIPATH_RHF_H_MTUERR)
1052 strlcat(msg, "mtuerr ", len);
1053 if (err & INFINIPATH_RHF_H_IHDRERR)
1054
1055 strlcat(msg, "ipathhdrerr ", len);
1056 if (err & INFINIPATH_RHF_H_TIDERR)
1057 strlcat(msg, "tiderr ", len);
1058 if (err & INFINIPATH_RHF_H_MKERR)
1059
1060 strlcat(msg, "invalid ipathhdr ", len);
1061 if (err & INFINIPATH_RHF_H_IBERR)
1062 strlcat(msg, "iberr ", len);
1063 if (err & INFINIPATH_RHF_L_SWA)
1064 strlcat(msg, "swA ", len);
1065 if (err & INFINIPATH_RHF_L_SWB)
1066 strlcat(msg, "swB ", len);
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1077{
1078 return dd->ipath_port0_skbinfo ?
1079 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
1080}
1081
1082
1083
1084
1085
1086
1087struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1088 gfp_t gfp_mask)
1089{
1090 struct sk_buff *skb;
1091 u32 len;
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 len = dd->ipath_ibmaxlen + 4;
1106
1107 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1108
1109
1110
1111
1112 len += 2047;
1113 }
1114
1115 skb = __dev_alloc_skb(len, gfp_mask);
1116 if (!skb) {
1117 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1118 len);
1119 goto bail;
1120 }
1121
1122 skb_reserve(skb, 4);
1123
1124 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1125 u32 una = (unsigned long)skb->data & 2047;
1126 if (una)
1127 skb_reserve(skb, 2048 - una);
1128 }
1129
1130bail:
1131 return skb;
1132}
1133
1134static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1135 u32 eflags,
1136 u32 l,
1137 u32 etail,
1138 __le32 *rhf_addr,
1139 struct ipath_message_header *hdr)
1140{
1141 char emsg[128];
1142
1143 get_rhf_errstring(eflags, emsg, sizeof emsg);
1144 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1145 "tlen=%x opcode=%x egridx=%x: %s\n",
1146 eflags, l,
1147 ipath_hdrget_rcv_type(rhf_addr),
1148 ipath_hdrget_length_in_bytes(rhf_addr),
1149 be32_to_cpu(hdr->bth[0]) >> 24,
1150 etail, emsg);
1151
1152
1153 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1154 u8 n = (dd->ipath_ibcctrl >>
1155 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1156 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1157
1158 if (++dd->ipath_lli_counter > n) {
1159 dd->ipath_lli_counter = 0;
1160 dd->ipath_lli_errors++;
1161 }
1162 }
1163}
1164
1165
1166
1167
1168
1169
1170
1171void ipath_kreceive(struct ipath_portdata *pd)
1172{
1173 struct ipath_devdata *dd = pd->port_dd;
1174 __le32 *rhf_addr;
1175 void *ebuf;
1176 const u32 rsize = dd->ipath_rcvhdrentsize;
1177 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;
1178 u32 etail = -1, l, hdrqtail;
1179 struct ipath_message_header *hdr;
1180 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1181 static u64 totcalls;
1182 int last;
1183
1184 l = pd->port_head;
1185 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1186 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1187 u32 seq = ipath_hdrget_seq(rhf_addr);
1188
1189 if (seq != pd->port_seq_cnt)
1190 goto bail;
1191 hdrqtail = 0;
1192 } else {
1193 hdrqtail = ipath_get_rcvhdrtail(pd);
1194 if (l == hdrqtail)
1195 goto bail;
1196 smp_rmb();
1197 }
1198
1199reloop:
1200 for (last = 0, i = 1; !last; i += !last) {
1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1202 eflags = ipath_hdrget_err_flags(rhf_addr);
1203 etype = ipath_hdrget_rcv_type(rhf_addr);
1204
1205 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1206 ebuf = NULL;
1207 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1208 ipath_hdrget_use_egr_buf(rhf_addr) :
1209 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1210
1211
1212
1213
1214
1215
1216
1217 etail = ipath_hdrget_index(rhf_addr);
1218 updegr = 1;
1219 if (tlen > sizeof(*hdr) ||
1220 etype == RCVHQ_RCV_TYPE_NON_KD)
1221 ebuf = ipath_get_egrbuf(dd, etail);
1222 }
1223
1224
1225
1226
1227
1228
1229 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1230 etype != RCVHQ_RCV_TYPE_ERROR &&
1231 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1232 IPS_PROTO_VERSION)
1233 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1234 "%x\n", etype);
1235
1236 if (unlikely(eflags))
1237 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1238 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1239 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1240 if (dd->ipath_lli_counter)
1241 dd->ipath_lli_counter--;
1242 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1243 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1244 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1245 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1246 "qp=%x), len %x; ignored\n",
1247 etype, opcode, qp, tlen);
1248 }
1249 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1250 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1251 be32_to_cpu(hdr->bth[0]) >> 24);
1252 else {
1253
1254
1255
1256
1257
1258
1259
1260 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1261 " %x, len %x hdrq+%x rhf: %Lx\n",
1262 etail, tlen, l, (unsigned long long)
1263 le64_to_cpu(*(__le64 *) rhf_addr));
1264 if (ipath_debug & __IPATH_ERRPKTDBG) {
1265 u32 j, *d, dw = rsize-2;
1266 if (rsize > (tlen>>2))
1267 dw = tlen>>2;
1268 d = (u32 *)hdr;
1269 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1270 dw);
1271 for (j = 0; j < dw; j++)
1272 printk(KERN_DEBUG "%8x%s", d[j],
1273 (j%8) == 7 ? "\n" : " ");
1274 printk(KERN_DEBUG ".\n");
1275 }
1276 }
1277 l += rsize;
1278 if (l >= maxcnt)
1279 l = 0;
1280 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1281 l + dd->ipath_rhf_offset;
1282 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1283 u32 seq = ipath_hdrget_seq(rhf_addr);
1284
1285 if (++pd->port_seq_cnt > 13)
1286 pd->port_seq_cnt = 1;
1287 if (seq != pd->port_seq_cnt)
1288 last = 1;
1289 } else if (l == hdrqtail)
1290 last = 1;
1291
1292
1293
1294
1295
1296 if (last || !(i & 0xf)) {
1297 u64 lval = l;
1298
1299
1300 if (last)
1301 lval |= dd->ipath_rhdrhead_intr_off;
1302 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1303 pd->port_port);
1304 if (updegr) {
1305 ipath_write_ureg(dd, ur_rcvegrindexhead,
1306 etail, pd->port_port);
1307 updegr = 0;
1308 }
1309 }
1310 }
1311
1312 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1313 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1314
1315
1316
1317
1318
1319
1320
1321
1322 u32 hqtail = ipath_get_rcvhdrtail(pd);
1323 if (hqtail != hdrqtail) {
1324 hdrqtail = hqtail;
1325 reloop = 1;
1326 goto reloop;
1327 }
1328 }
1329
1330 pkttot += i;
1331
1332 pd->port_head = l;
1333
1334 if (pkttot > ipath_stats.sps_maxpkts_call)
1335 ipath_stats.sps_maxpkts_call = pkttot;
1336 ipath_stats.sps_port0pkts += pkttot;
1337 ipath_stats.sps_avgpkts_call =
1338 ipath_stats.sps_port0pkts / ++totcalls;
1339
1340bail:;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1353{
1354 unsigned long flags;
1355 int i;
1356 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375 if (!dd->ipath_pioavailregs_dma) {
1376 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1377 return;
1378 }
1379 if (ipath_debug & __IPATH_VERBDBG) {
1380
1381 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1382 unsigned long *shadow = dd->ipath_pioavailshadow;
1383
1384 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1385 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1386 "s3=%lx\n",
1387 (unsigned long long) le64_to_cpu(dma[0]),
1388 shadow[0],
1389 (unsigned long long) le64_to_cpu(dma[1]),
1390 shadow[1],
1391 (unsigned long long) le64_to_cpu(dma[2]),
1392 shadow[2],
1393 (unsigned long long) le64_to_cpu(dma[3]),
1394 shadow[3]);
1395 if (piobregs > 4)
1396 ipath_cdbg(
1397 PKT, "2nd group, dma4=%llx shad4=%lx, "
1398 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1399 "d7=%llx s7=%lx\n",
1400 (unsigned long long) le64_to_cpu(dma[4]),
1401 shadow[4],
1402 (unsigned long long) le64_to_cpu(dma[5]),
1403 shadow[5],
1404 (unsigned long long) le64_to_cpu(dma[6]),
1405 shadow[6],
1406 (unsigned long long) le64_to_cpu(dma[7]),
1407 shadow[7]);
1408 }
1409 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1410 for (i = 0; i < piobregs; i++) {
1411 u64 pchbusy, pchg, piov, pnew;
1412
1413
1414
1415 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1416 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1417 else
1418 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1419 pchg = dd->ipath_pioavailkernel[i] &
1420 ~(dd->ipath_pioavailshadow[i] ^ piov);
1421 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1422 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1423 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1424 pnew |= piov & pchbusy;
1425 dd->ipath_pioavailshadow[i] = pnew;
1426 }
1427 }
1428 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1429}
1430
1431
1432
1433
1434
1435
1436
1437static void ipath_reset_availshadow(struct ipath_devdata *dd)
1438{
1439 int i, im;
1440 unsigned long flags;
1441
1442 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1443 for (i = 0; i < dd->ipath_pioavregs; i++) {
1444 u64 val, oldval;
1445
1446 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1447 i ^ 1 : i;
1448 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1449
1450
1451
1452
1453 oldval = dd->ipath_pioavailshadow[i];
1454 dd->ipath_pioavailshadow[i] = val |
1455 ((~dd->ipath_pioavailkernel[i] <<
1456 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1457 0xaaaaaaaaaaaaaaaaULL);
1458 if (oldval != dd->ipath_pioavailshadow[i])
1459 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1460 i, (unsigned long long) oldval,
1461 dd->ipath_pioavailshadow[i]);
1462 }
1463 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1474{
1475 int ret = 0;
1476
1477 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1478 if (dd->ipath_rcvhdrsize != rhdrsize) {
1479 dev_info(&dd->pcidev->dev,
1480 "Error: can't set protocol header "
1481 "size %u, already %u\n",
1482 rhdrsize, dd->ipath_rcvhdrsize);
1483 ret = -EAGAIN;
1484 } else
1485 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1486 "size %u\n", dd->ipath_rcvhdrsize);
1487 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1488 (sizeof(u64) / sizeof(u32)))) {
1489 ipath_dbg("Error: can't set protocol header size %u "
1490 "(> max %u)\n", rhdrsize,
1491 dd->ipath_rcvhdrentsize -
1492 (u32) (sizeof(u64) / sizeof(u32)));
1493 ret = -EOVERFLOW;
1494 } else {
1495 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1496 dd->ipath_rcvhdrsize = rhdrsize;
1497 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1498 dd->ipath_rcvhdrsize);
1499 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1500 dd->ipath_rcvhdrsize);
1501 }
1502 return ret;
1503}
1504
1505
1506
1507
1508static noinline void no_pio_bufs(struct ipath_devdata *dd)
1509{
1510 unsigned long *shadow = dd->ipath_pioavailshadow;
1511 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1512
1513 dd->ipath_upd_pio_shadow = 1;
1514
1515
1516
1517
1518 ipath_stats.sps_nopiobufs++;
1519 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1520 ipath_force_pio_avail_update(dd);
1521 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1522 "%llx %llx %llx %llx\n"
1523 "ipath shadow: %lx %lx %lx %lx\n",
1524 dd->ipath_consec_nopiobuf,
1525 (unsigned long)get_cycles(),
1526 (unsigned long long) le64_to_cpu(dma[0]),
1527 (unsigned long long) le64_to_cpu(dma[1]),
1528 (unsigned long long) le64_to_cpu(dma[2]),
1529 (unsigned long long) le64_to_cpu(dma[3]),
1530 shadow[0], shadow[1], shadow[2], shadow[3]);
1531
1532
1533
1534
1535 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1536 (sizeof(shadow[0]) * 4 * 4))
1537 ipath_dbg("2nd group: dmacopy: "
1538 "%llx %llx %llx %llx\n"
1539 "ipath shadow: %lx %lx %lx %lx\n",
1540 (unsigned long long)le64_to_cpu(dma[4]),
1541 (unsigned long long)le64_to_cpu(dma[5]),
1542 (unsigned long long)le64_to_cpu(dma[6]),
1543 (unsigned long long)le64_to_cpu(dma[7]),
1544 shadow[4], shadow[5], shadow[6], shadow[7]);
1545
1546
1547 ipath_reset_availshadow(dd);
1548 }
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1559 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1560{
1561 int i, j, updated = 0;
1562 unsigned piobcnt;
1563 unsigned long flags;
1564 unsigned long *shadow = dd->ipath_pioavailshadow;
1565 u32 __iomem *buf;
1566
1567 piobcnt = last - first;
1568 if (dd->ipath_upd_pio_shadow) {
1569
1570
1571
1572
1573
1574 ipath_update_pio_bufs(dd);
1575 updated++;
1576 i = first;
1577 } else
1578 i = firsti;
1579rescan:
1580
1581
1582
1583
1584
1585 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1586 for (j = 0; j < piobcnt; j++, i++) {
1587 if (i >= last)
1588 i = first;
1589 if (__test_and_set_bit((2 * i) + 1, shadow))
1590 continue;
1591
1592 __change_bit(2 * i, shadow);
1593 break;
1594 }
1595 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1596
1597 if (j == piobcnt) {
1598 if (!updated) {
1599
1600
1601
1602
1603 ipath_update_pio_bufs(dd);
1604 updated++;
1605 i = first;
1606 goto rescan;
1607 } else if (updated == 1 && piobcnt <=
1608 ((dd->ipath_sendctrl
1609 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1610 INFINIPATH_S_UPDTHRESH_MASK)) {
1611
1612
1613
1614
1615
1616
1617 ipath_force_pio_avail_update(dd);
1618 ipath_update_pio_bufs(dd);
1619 updated++;
1620 i = first;
1621 goto rescan;
1622 }
1623
1624 no_pio_bufs(dd);
1625 buf = NULL;
1626 } else {
1627 if (i < dd->ipath_piobcnt2k)
1628 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1629 i * dd->ipath_palign);
1630 else
1631 buf = (u32 __iomem *)
1632 (dd->ipath_pio4kbase +
1633 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1634 if (pbufnum)
1635 *pbufnum = i;
1636 }
1637
1638 return buf;
1639}
1640
1641
1642
1643
1644
1645
1646
1647u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1648{
1649 u32 __iomem *buf;
1650 u32 pnum, nbufs;
1651 u32 first, lasti;
1652
1653 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1654 first = dd->ipath_piobcnt2k;
1655 lasti = dd->ipath_lastpioindexl;
1656 } else {
1657 first = 0;
1658 lasti = dd->ipath_lastpioindex;
1659 }
1660 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1661 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1662
1663 if (buf) {
1664
1665
1666
1667
1668 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1669 dd->ipath_lastpioindexl = pnum + 1;
1670 else
1671 dd->ipath_lastpioindex = pnum + 1;
1672 if (dd->ipath_upd_pio_shadow)
1673 dd->ipath_upd_pio_shadow = 0;
1674 if (dd->ipath_consec_nopiobuf)
1675 dd->ipath_consec_nopiobuf = 0;
1676 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1677 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1678 if (pbufnum)
1679 *pbufnum = pnum;
1680
1681 }
1682 return buf;
1683}
1684
1685
1686
1687
1688
1689
1690
1691
1692void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1693 unsigned len, int avail)
1694{
1695 unsigned long flags;
1696 unsigned end, cnt = 0, next;
1697
1698
1699 start *= 2;
1700 end = start + len * 2;
1701
1702 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1703
1704 while (start < end) {
1705 if (avail) {
1706 unsigned long dma;
1707 int i, im;
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 i = start / BITS_PER_LONG;
1723 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1724 i ^ 1 : i;
1725 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1726 + start, dd->ipath_pioavailshadow);
1727 dma = (unsigned long) le64_to_cpu(
1728 dd->ipath_pioavailregs_dma[im]);
1729 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1730 + start) % BITS_PER_LONG, &dma))
1731 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1732 + start, dd->ipath_pioavailshadow);
1733 else
1734 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1735 + start, dd->ipath_pioavailshadow);
1736 __set_bit(start, dd->ipath_pioavailkernel);
1737 } else {
1738 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1739 dd->ipath_pioavailshadow);
1740 __clear_bit(start, dd->ipath_pioavailkernel);
1741 }
1742 start += 2;
1743 }
1744
1745 if (dd->ipath_pioupd_thresh) {
1746 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1747 next = find_first_bit(dd->ipath_pioavailkernel, end);
1748 while (next < end) {
1749 cnt++;
1750 next = find_next_bit(dd->ipath_pioavailkernel, end,
1751 next + 1);
1752 }
1753 }
1754 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 if (!avail && len < cnt)
1768 cnt = len;
1769 if (cnt < dd->ipath_pioupd_thresh) {
1770 dd->ipath_pioupd_thresh = cnt;
1771 ipath_dbg("Decreased pio update threshold to %u\n",
1772 dd->ipath_pioupd_thresh);
1773 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1774 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1775 << INFINIPATH_S_UPDTHRESH_SHIFT);
1776 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1777 << INFINIPATH_S_UPDTHRESH_SHIFT;
1778 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1779 dd->ipath_sendctrl);
1780 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1781 }
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1794 struct ipath_portdata *pd)
1795{
1796 int ret = 0;
1797
1798 if (!pd->port_rcvhdrq) {
1799 dma_addr_t phys_hdrqtail;
1800 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1801 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1802 sizeof(u32), PAGE_SIZE);
1803
1804 pd->port_rcvhdrq = dma_alloc_coherent(
1805 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1806 gfp_flags);
1807
1808 if (!pd->port_rcvhdrq) {
1809 ipath_dev_err(dd, "attempt to allocate %d bytes "
1810 "for port %u rcvhdrq failed\n",
1811 amt, pd->port_port);
1812 ret = -ENOMEM;
1813 goto bail;
1814 }
1815
1816 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1817 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1818 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1819 GFP_KERNEL);
1820 if (!pd->port_rcvhdrtail_kvaddr) {
1821 ipath_dev_err(dd, "attempt to allocate 1 page "
1822 "for port %u rcvhdrqtailaddr "
1823 "failed\n", pd->port_port);
1824 ret = -ENOMEM;
1825 dma_free_coherent(&dd->pcidev->dev, amt,
1826 pd->port_rcvhdrq,
1827 pd->port_rcvhdrq_phys);
1828 pd->port_rcvhdrq = NULL;
1829 goto bail;
1830 }
1831 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1832 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1833 "physical\n", pd->port_port,
1834 (unsigned long long) phys_hdrqtail);
1835 }
1836
1837 pd->port_rcvhdrq_size = amt;
1838
1839 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1840 "for port %u rcvhdr Q\n",
1841 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1842 (unsigned long) pd->port_rcvhdrq_phys,
1843 (unsigned long) pd->port_rcvhdrq_size,
1844 pd->port_port);
1845 }
1846 else
1847 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1848 "hdrtailaddr@%p %llx physical\n",
1849 pd->port_port, pd->port_rcvhdrq,
1850 (unsigned long long) pd->port_rcvhdrq_phys,
1851 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1852 pd->port_rcvhdrqtailaddr_phys);
1853
1854
1855 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1856 if (pd->port_rcvhdrtail_kvaddr)
1857 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1858
1859
1860
1861
1862
1863 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1864 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
1865 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1866 pd->port_port, pd->port_rcvhdrq_phys);
1867
1868bail:
1869 return ret;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1883{
1884 unsigned long flags;
1885
1886 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1887 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1888 goto bail;
1889 }
1890
1891
1892
1893
1894
1895
1896 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1897 int skip_cancel;
1898 unsigned long *statp = &dd->ipath_sdma_status;
1899
1900 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1901 skip_cancel =
1902 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1903 && !test_bit(IPATH_SDMA_DISABLED, statp);
1904 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1905 if (skip_cancel)
1906 goto bail;
1907 }
1908
1909 ipath_dbg("Cancelling all in-progress send buffers\n");
1910
1911
1912 dd->ipath_lastcancel = jiffies + HZ / 2;
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1923 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1924 | INFINIPATH_S_PIOENABLE);
1925 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1926 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1927 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1928 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1929
1930
1931 ipath_disarm_piobufs(dd, 0,
1932 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1933
1934 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1935 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1936
1937 if (restore_sendctrl) {
1938
1939 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1940 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1941 INFINIPATH_S_PIOENABLE;
1942 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1943 dd->ipath_sendctrl);
1944
1945 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1946 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1947 }
1948
1949 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1950 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1951 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1952 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1953
1954 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1955 dd->ipath_sdma_reset_wait = 200;
1956 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1957 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1958 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1959 }
1960bail:;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1972{
1973 unsigned long flags;
1974
1975 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1976 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1977 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1978 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1979 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1980 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1981 dd->ipath_sendctrl);
1982 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1983 }
1984 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1985}
1986
1987static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1988 int linitcmd)
1989{
1990 u64 mod_wd;
1991 static const char *what[4] = {
1992 [0] = "NOP",
1993 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1994 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1995 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1996 };
1997
1998 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1999
2000
2001
2002
2003 preempt_disable();
2004 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
2005 preempt_enable();
2006 } else if (linitcmd) {
2007
2008
2009
2010
2011
2012 preempt_disable();
2013 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
2014 preempt_enable();
2015 }
2016
2017 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
2018 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
2019 ipath_cdbg(VERBOSE,
2020 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
2021 dd->ipath_unit, what[linkcmd], linitcmd,
2022 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
2023 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
2024
2025 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2026 dd->ipath_ibcctrl | mod_wd);
2027
2028 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2029}
2030
2031int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
2032{
2033 u32 lstate;
2034 int ret;
2035
2036 switch (newstate) {
2037 case IPATH_IB_LINKDOWN_ONLY:
2038 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2039
2040 ret = 0;
2041 goto bail;
2042
2043 case IPATH_IB_LINKDOWN:
2044 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2045 INFINIPATH_IBCC_LINKINITCMD_POLL);
2046
2047 ret = 0;
2048 goto bail;
2049
2050 case IPATH_IB_LINKDOWN_SLEEP:
2051 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2052 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
2053
2054 ret = 0;
2055 goto bail;
2056
2057 case IPATH_IB_LINKDOWN_DISABLE:
2058 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2059 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2060
2061 ret = 0;
2062 goto bail;
2063
2064 case IPATH_IB_LINKARM:
2065 if (dd->ipath_flags & IPATH_LINKARMED) {
2066 ret = 0;
2067 goto bail;
2068 }
2069 if (!(dd->ipath_flags &
2070 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
2071 ret = -EINVAL;
2072 goto bail;
2073 }
2074 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
2075
2076
2077
2078
2079
2080 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
2081 break;
2082
2083 case IPATH_IB_LINKACTIVE:
2084 if (dd->ipath_flags & IPATH_LINKACTIVE) {
2085 ret = 0;
2086 goto bail;
2087 }
2088 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
2089 ret = -EINVAL;
2090 goto bail;
2091 }
2092 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2093 lstate = IPATH_LINKACTIVE;
2094 break;
2095
2096 case IPATH_IB_LINK_LOOPBACK:
2097 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
2098 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
2099 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2100 dd->ipath_ibcctrl);
2101
2102
2103 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2104 IPATH_IB_HRTBT_OFF);
2105
2106 ret = 0;
2107 goto bail;
2108
2109 case IPATH_IB_LINK_EXTERNAL:
2110 dev_info(&dd->pcidev->dev,
2111 "Disabling IB local loopback (normal)\n");
2112 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2113 IPATH_IB_HRTBT_ON);
2114 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
2115 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2116 dd->ipath_ibcctrl);
2117
2118 ret = 0;
2119 goto bail;
2120
2121
2122
2123
2124
2125
2126
2127 case IPATH_IB_LINK_HRTBT:
2128 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2129 IPATH_IB_HRTBT_ON);
2130 goto bail;
2131
2132 case IPATH_IB_LINK_NO_HRTBT:
2133 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2134 IPATH_IB_HRTBT_OFF);
2135 goto bail;
2136
2137 default:
2138 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
2139 ret = -EINVAL;
2140 goto bail;
2141 }
2142 ret = ipath_wait_linkstate(dd, lstate, 2000);
2143
2144bail:
2145 return ret;
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
2161{
2162 u32 piosize;
2163 int changed = 0;
2164 int ret;
2165
2166
2167
2168
2169
2170
2171
2172 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
2173 (arg != 4096 || !ipath_mtu4096)) {
2174 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
2175 ret = -EINVAL;
2176 goto bail;
2177 }
2178 if (dd->ipath_ibmtu == arg) {
2179 ret = 0;
2180 goto bail;
2181 }
2182
2183 piosize = dd->ipath_ibmaxlen;
2184 dd->ipath_ibmtu = arg;
2185
2186 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
2187
2188 if (piosize != dd->ipath_init_ibmaxlen) {
2189 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2190 piosize = dd->ipath_init_ibmaxlen;
2191 dd->ipath_ibmaxlen = piosize;
2192 changed = 1;
2193 }
2194 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
2195 piosize = arg + IPATH_PIO_MAXIBHDR;
2196 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
2197 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
2198 arg);
2199 dd->ipath_ibmaxlen = piosize;
2200 changed = 1;
2201 }
2202
2203 if (changed) {
2204 u64 ibc = dd->ipath_ibcctrl, ibdw;
2205
2206
2207
2208
2209
2210 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2211 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2212 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
2213 dd->ibcc_mpl_shift);
2214 ibc |= ibdw << dd->ibcc_mpl_shift;
2215 dd->ipath_ibcctrl = ibc;
2216 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2217 dd->ipath_ibcctrl);
2218 dd->ipath_f_tidtemplate(dd);
2219 }
2220
2221 ret = 0;
2222
2223bail:
2224 return ret;
2225}
2226
2227int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2228{
2229 dd->ipath_lid = lid;
2230 dd->ipath_lmc = lmc;
2231
2232 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2233 (~((1U << lmc) - 1)) << 16);
2234
2235 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2236
2237 return 0;
2238}
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2252 unsigned port, u64 value)
2253{
2254 u16 where;
2255
2256 if (port < dd->ipath_portcnt &&
2257 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2258 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2259 where = regno + port;
2260 else
2261 where = -1;
2262
2263 ipath_write_kreg(dd, where, value);
2264}
2265
2266
2267
2268
2269
2270
2271
2272
2273#define LED_OVER_FREQ_SHIFT 8
2274#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
2275
2276#define LED_OVER_BOTH_OFF (8)
2277
2278static void ipath_run_led_override(unsigned long opaque)
2279{
2280 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2281 int timeoff;
2282 int pidx;
2283 u64 lstate, ltstate, val;
2284
2285 if (!(dd->ipath_flags & IPATH_INITTED))
2286 return;
2287
2288 pidx = dd->ipath_led_override_phase++ & 1;
2289 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2290 timeoff = dd->ipath_led_override_timeoff;
2291
2292
2293
2294
2295
2296
2297 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2298 ltstate = ipath_ib_linktrstate(dd, val);
2299 lstate = ipath_ib_linkstate(dd, val);
2300
2301 dd->ipath_f_setextled(dd, lstate, ltstate);
2302 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2303}
2304
2305void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2306{
2307 int timeoff, freq;
2308
2309 if (!(dd->ipath_flags & IPATH_INITTED))
2310 return;
2311
2312
2313 timeoff = HZ;
2314 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2315
2316 if (freq) {
2317
2318 dd->ipath_led_override_vals[0] = val & 0xF;
2319 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2320 timeoff = (HZ << 4)/freq;
2321 } else {
2322
2323 dd->ipath_led_override_vals[0] = val & 0xF;
2324 dd->ipath_led_override_vals[1] = val & 0xF;
2325 }
2326 dd->ipath_led_override_timeoff = timeoff;
2327
2328
2329
2330
2331
2332 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2333
2334 init_timer(&dd->ipath_led_override_timer);
2335 dd->ipath_led_override_timer.function =
2336 ipath_run_led_override;
2337 dd->ipath_led_override_timer.data = (unsigned long) dd;
2338 dd->ipath_led_override_timer.expires = jiffies + 1;
2339 add_timer(&dd->ipath_led_override_timer);
2340 } else
2341 atomic_dec(&dd->ipath_led_override_timer_active);
2342}
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353void ipath_shutdown_device(struct ipath_devdata *dd)
2354{
2355 unsigned long flags;
2356
2357 ipath_dbg("Shutting down the device\n");
2358
2359 ipath_hol_up(dd);
2360
2361 dd->ipath_flags |= IPATH_LINKUNK;
2362 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2363 IPATH_LINKINIT | IPATH_LINKARMED |
2364 IPATH_LINKACTIVE);
2365 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2366 IPATH_STATUS_IB_READY);
2367
2368
2369 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2370
2371 dd->ipath_rcvctrl = 0;
2372 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2373 dd->ipath_rcvctrl);
2374
2375 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2376 teardown_sdma(dd);
2377
2378
2379
2380
2381
2382 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2383 dd->ipath_sendctrl = 0;
2384 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2385
2386 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2387 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2388
2389
2390
2391
2392
2393 udelay(5);
2394
2395 dd->ipath_f_setextled(dd, 0, 0);
2396
2397 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2398 ipath_cancel_sends(dd, 0);
2399
2400
2401
2402
2403
2404
2405 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2406
2407
2408 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2409 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2410 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
2411
2412
2413
2414
2415
2416
2417 dd->ipath_f_quiet_serdes(dd);
2418
2419
2420 del_timer_sync(&dd->ipath_hol_timer);
2421 if (dd->ipath_stats_timer_active) {
2422 del_timer_sync(&dd->ipath_stats_timer);
2423 dd->ipath_stats_timer_active = 0;
2424 }
2425 if (dd->ipath_intrchk_timer.data) {
2426 del_timer_sync(&dd->ipath_intrchk_timer);
2427 dd->ipath_intrchk_timer.data = 0;
2428 }
2429 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2430 del_timer_sync(&dd->ipath_led_override_timer);
2431 atomic_set(&dd->ipath_led_override_timer_active, 0);
2432 }
2433
2434
2435
2436
2437
2438
2439 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2440 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2441 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2442 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2443
2444 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2445 ipath_update_eeprom_log(dd);
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2461{
2462 if (!pd)
2463 return;
2464
2465 if (pd->port_rcvhdrq) {
2466 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2467 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2468 (unsigned long) pd->port_rcvhdrq_size);
2469 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2470 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2471 pd->port_rcvhdrq = NULL;
2472 if (pd->port_rcvhdrtail_kvaddr) {
2473 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2474 pd->port_rcvhdrtail_kvaddr,
2475 pd->port_rcvhdrqtailaddr_phys);
2476 pd->port_rcvhdrtail_kvaddr = NULL;
2477 }
2478 }
2479 if (pd->port_port && pd->port_rcvegrbuf) {
2480 unsigned e;
2481
2482 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2483 void *base = pd->port_rcvegrbuf[e];
2484 size_t size = pd->port_rcvegrbuf_size;
2485
2486 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2487 "chunk %u/%u\n", base,
2488 (unsigned long) size,
2489 e, pd->port_rcvegrbuf_chunks);
2490 dma_free_coherent(&dd->pcidev->dev, size,
2491 base, pd->port_rcvegrbuf_phys[e]);
2492 }
2493 kfree(pd->port_rcvegrbuf);
2494 pd->port_rcvegrbuf = NULL;
2495 kfree(pd->port_rcvegrbuf_phys);
2496 pd->port_rcvegrbuf_phys = NULL;
2497 pd->port_rcvegrbuf_chunks = 0;
2498 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
2499 unsigned e;
2500 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
2501
2502 dd->ipath_port0_skbinfo = NULL;
2503 ipath_cdbg(VERBOSE, "free closed port %d "
2504 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2505 skbinfo);
2506 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2507 if (skbinfo[e].skb) {
2508 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2509 dd->ipath_ibmaxlen,
2510 PCI_DMA_FROMDEVICE);
2511 dev_kfree_skb(skbinfo[e].skb);
2512 }
2513 vfree(skbinfo);
2514 }
2515 kfree(pd->port_tid_pg_list);
2516 vfree(pd->subport_uregbase);
2517 vfree(pd->subport_rcvegrbuf);
2518 vfree(pd->subport_rcvhdr_base);
2519 kfree(pd);
2520}
2521
2522static int __init infinipath_init(void)
2523{
2524 int ret;
2525
2526 if (ipath_debug & __IPATH_DBG)
2527 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
2528
2529
2530
2531
2532
2533 idr_init(&unit_table);
2534 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2535 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2536 ret = -ENOMEM;
2537 goto bail;
2538 }
2539
2540 ret = pci_register_driver(&ipath_driver);
2541 if (ret < 0) {
2542 printk(KERN_ERR IPATH_DRV_NAME
2543 ": Unable to register driver: error %d\n", -ret);
2544 goto bail_unit;
2545 }
2546
2547 ret = ipath_init_ipathfs();
2548 if (ret < 0) {
2549 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2550 "ipathfs: error %d\n", -ret);
2551 goto bail_pci;
2552 }
2553
2554 goto bail;
2555
2556bail_pci:
2557 pci_unregister_driver(&ipath_driver);
2558
2559bail_unit:
2560 idr_destroy(&unit_table);
2561
2562bail:
2563 return ret;
2564}
2565
2566static void __exit infinipath_cleanup(void)
2567{
2568 ipath_exit_ipathfs();
2569
2570 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2571 pci_unregister_driver(&ipath_driver);
2572
2573 idr_destroy(&unit_table);
2574}
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585int ipath_reset_device(int unit)
2586{
2587 int ret, i;
2588 struct ipath_devdata *dd = ipath_lookup(unit);
2589
2590 if (!dd) {
2591 ret = -ENODEV;
2592 goto bail;
2593 }
2594
2595 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2596
2597 del_timer_sync(&dd->ipath_led_override_timer);
2598 atomic_set(&dd->ipath_led_override_timer_active, 0);
2599 }
2600
2601
2602 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2603 dd->ipath_f_setextled(dd, 0, 0);
2604
2605 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2606
2607 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2608 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2609 "not initialized or not present\n", unit);
2610 ret = -ENXIO;
2611 goto bail;
2612 }
2613
2614 if (dd->ipath_pd)
2615 for (i = 1; i < dd->ipath_cfgports; i++) {
2616 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
2617 ipath_dbg("unit %u port %d is in use "
2618 "(PID %u cmd %s), can't reset\n",
2619 unit, i,
2620 pid_nr(dd->ipath_pd[i]->port_pid),
2621 dd->ipath_pd[i]->port_comm);
2622 ret = -EBUSY;
2623 goto bail;
2624 }
2625 }
2626
2627 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2628 teardown_sdma(dd);
2629
2630 dd->ipath_flags &= ~IPATH_INITTED;
2631 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2632 ret = dd->ipath_f_reset(dd);
2633 if (ret == 1) {
2634 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2635 unit);
2636 ret = ipath_init_chip(dd, 1);
2637 } else
2638 ret = -EAGAIN;
2639 if (ret)
2640 ipath_dev_err(dd, "Reinitialize unit %u after "
2641 "reset failed with %d\n", unit, ret);
2642 else
2643 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2644 "resetting\n", unit);
2645
2646bail:
2647 return ret;
2648}
2649
2650
2651
2652
2653
2654
2655static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2656{
2657 int i, sub, any = 0;
2658 struct pid *pid;
2659
2660 if (!dd->ipath_pd)
2661 return 0;
2662 for (i = 1; i < dd->ipath_cfgports; i++) {
2663 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2664 continue;
2665 pid = dd->ipath_pd[i]->port_pid;
2666 if (!pid)
2667 continue;
2668
2669 dev_info(&dd->pcidev->dev, "context %d in use "
2670 "(PID %u), sending signal %d\n",
2671 i, pid_nr(pid), sig);
2672 kill_pid(pid, sig, 1);
2673 any++;
2674 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2675 pid = dd->ipath_pd[i]->port_subpid[sub];
2676 if (!pid)
2677 continue;
2678 dev_info(&dd->pcidev->dev, "sub-context "
2679 "%d:%d in use (PID %u), sending "
2680 "signal %d\n", i, sub, pid_nr(pid), sig);
2681 kill_pid(pid, sig, 1);
2682 any++;
2683 }
2684 }
2685 return any;
2686}
2687
2688static void ipath_hol_signal_down(struct ipath_devdata *dd)
2689{
2690 if (ipath_signal_procs(dd, SIGSTOP))
2691 ipath_dbg("Stopped some processes\n");
2692 ipath_cancel_sends(dd, 1);
2693}
2694
2695
2696static void ipath_hol_signal_up(struct ipath_devdata *dd)
2697{
2698 if (ipath_signal_procs(dd, SIGCONT))
2699 ipath_dbg("Continued some processes\n");
2700}
2701
2702
2703
2704
2705
2706
2707
2708
2709void ipath_hol_down(struct ipath_devdata *dd)
2710{
2711 dd->ipath_hol_state = IPATH_HOL_DOWN;
2712 ipath_hol_signal_down(dd);
2713 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2714 dd->ipath_hol_timer.expires = jiffies +
2715 msecs_to_jiffies(ipath_hol_timeout_ms);
2716 __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2717}
2718
2719
2720
2721
2722
2723
2724void ipath_hol_up(struct ipath_devdata *dd)
2725{
2726 ipath_hol_signal_up(dd);
2727 dd->ipath_hol_state = IPATH_HOL_UP;
2728}
2729
2730
2731
2732
2733
2734
2735
2736void ipath_hol_event(unsigned long opaque)
2737{
2738 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2739
2740 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2741 && dd->ipath_hol_state != IPATH_HOL_UP) {
2742 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2743 ipath_dbg("Stopping processes\n");
2744 ipath_hol_signal_down(dd);
2745 } else {
2746 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2747 ipath_dbg("Continuing processes\n");
2748 ipath_hol_signal_up(dd);
2749 }
2750 if (dd->ipath_hol_state == IPATH_HOL_UP)
2751 ipath_dbg("link's up, don't resched timer\n");
2752 else {
2753 dd->ipath_hol_timer.expires = jiffies +
2754 msecs_to_jiffies(ipath_hol_timeout_ms);
2755 __mod_timer(&dd->ipath_hol_timer,
2756 dd->ipath_hol_timer.expires);
2757 }
2758}
2759
2760int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2761{
2762 u64 val;
2763
2764 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2765 return -1;
2766 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2767 dd->ipath_rx_pol_inv = new_pol_inv;
2768 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2769 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
2770 INFINIPATH_XGXS_RX_POL_SHIFT);
2771 val |= ((u64)dd->ipath_rx_pol_inv) <<
2772 INFINIPATH_XGXS_RX_POL_SHIFT;
2773 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2774 }
2775 return 0;
2776}
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787void ipath_enable_armlaunch(struct ipath_devdata *dd)
2788{
2789 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2790 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2791 INFINIPATH_E_SPIOARMLAUNCH);
2792 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2793 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2794 dd->ipath_errormask);
2795}
2796
2797void ipath_disable_armlaunch(struct ipath_devdata *dd)
2798{
2799
2800 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2801 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2802 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2803 dd->ipath_errormask);
2804}
2805
2806module_init(infinipath_init);
2807module_exit(infinipath_cleanup);