1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
62#include <linux/dma-mapping.h>
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
66#include <linux/skbuff.h>
67#include <linux/init.h>
68#include <linux/delay.h>
69#include <linux/stddef.h>
70#include <linux/ioctl.h>
71#include <linux/timex.h>
72#include <linux/ethtool.h>
73#include <linux/workqueue.h>
74#include <linux/if_vlan.h>
75#include <linux/ip.h>
76#include <linux/tcp.h>
77#include <net/tcp.h>
78
79#include <asm/system.h>
80#include <asm/uaccess.h>
81#include <asm/io.h>
82#include <asm/div64.h>
83#include <asm/irq.h>
84
85
86#include "s2io.h"
87#include "s2io-regs.h"
88
89#define DRV_VERSION "2.0.26.25"
90
91
92static char s2io_driver_name[] = "Neterion";
93static char s2io_driver_version[] = DRV_VERSION;
94
95static int rxd_size[2] = {32,48};
96static int rxd_count[2] = {127,85};
97
98static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99{
100 int ret;
101
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105 return ret;
106}
107
108
109
110
111
112
113#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121static inline int is_s2io_card_up(const struct s2io_nic * sp)
122{
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124}
125
126
127static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
133};
134
135static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136 {"tmac_frms"},
137 {"tmac_data_octets"},
138 {"tmac_drop_frms"},
139 {"tmac_mcst_frms"},
140 {"tmac_bcst_frms"},
141 {"tmac_pause_ctrl_frms"},
142 {"tmac_ttl_octets"},
143 {"tmac_ucst_frms"},
144 {"tmac_nucst_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
148 {"tmac_vld_ip"},
149 {"tmac_drop_ip"},
150 {"tmac_icmp"},
151 {"tmac_rst_tcp"},
152 {"tmac_tcp"},
153 {"tmac_udp"},
154 {"rmac_vld_frms"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
157 {"rmac_drop_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
162 {"rmac_long_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
165 {"rmac_ttl_octets"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
171 {"rmac_ttl_frms"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
174 {"rmac_frag_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
182 {"rmac_ip"},
183 {"rmac_ip_octets"},
184 {"rmac_hdr_err_ip"},
185 {"rmac_drop_ip"},
186 {"rmac_icmp"},
187 {"rmac_tcp"},
188 {"rmac_udp"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
191 {"rmac_frms_q0"},
192 {"rmac_frms_q1"},
193 {"rmac_frms_q2"},
194 {"rmac_frms_q3"},
195 {"rmac_frms_q4"},
196 {"rmac_frms_q5"},
197 {"rmac_frms_q6"},
198 {"rmac_frms_q7"},
199 {"rmac_full_q0"},
200 {"rmac_full_q1"},
201 {"rmac_full_q2"},
202 {"rmac_full_q3"},
203 {"rmac_full_q4"},
204 {"rmac_full_q5"},
205 {"rmac_full_q6"},
206 {"rmac_full_q7"},
207 {"rmac_pause_cnt"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
211 {"rmac_err_tcp"},
212 {"rd_req_cnt"},
213 {"new_rd_req_cnt"},
214 {"new_rd_req_rtry_cnt"},
215 {"rd_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
217 {"wr_req_cnt"},
218 {"new_wr_req_cnt"},
219 {"new_wr_req_rtry_cnt"},
220 {"wr_rtry_cnt"},
221 {"wr_disc_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
223 {"txp_wr_cnt"},
224 {"txd_rd_cnt"},
225 {"txd_wr_cnt"},
226 {"rxd_rd_cnt"},
227 {"rxd_wr_cnt"},
228 {"txf_rd_cnt"},
229 {"rxf_wr_cnt"}
230};
231
232static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
240 {"rmac_vlan_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
243 {"rmac_pf_discard"},
244 {"rmac_da_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
248 {"link_fault_cnt"}
249};
250
251static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
255 {"parity_err_cnt"},
256 {"serious_err_cnt"},
257 {"soft_reset_cnt"},
258 {"fifo_full_cnt"},
259 {"ring_0_full_cnt"},
260 {"ring_1_full_cnt"},
261 {"ring_2_full_cnt"},
262 {"ring_3_full_cnt"},
263 {"ring_4_full_cnt"},
264 {"ring_5_full_cnt"},
265 {"ring_6_full_cnt"},
266 {"ring_7_full_cnt"},
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
287 {"mem_allocated"},
288 {"mem_freed"},
289 {"link_up_cnt"},
290 {"link_down_cnt"},
291 {"link_up_time"},
292 {"link_down_time"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
307 {"tda_err_cnt"},
308 {"pfc_err_cnt"},
309 {"pcc_err_cnt"},
310 {"tti_err_cnt"},
311 {"tpa_err_cnt"},
312 {"sm_err_cnt"},
313 {"lso_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
318 {"rc_err_cnt"},
319 {"prc_pcix_err_cnt"},
320 {"rpa_err_cnt"},
321 {"rda_err_cnt"},
322 {"rti_err_cnt"},
323 {"mc_err_cnt"}
324};
325
326#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
329
330#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
337#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
344
345
346static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347{
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354}
355
356static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp)
358{
359 int i;
360 struct s2io_nic *nic = dev->priv;
361 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config;
364
365 for (i = 0; i < config->tx_fifo_num; i++)
366 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368 nic->vlgrp = grp;
369 for (i = config->tx_fifo_num - 1; i >= 0; i--)
370 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371 flags[i]);
372}
373
374
375static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
376{
377 int i;
378 struct s2io_nic *nic = dev->priv;
379 unsigned long flags[MAX_TX_FIFOS];
380 struct mac_info *mac_control = &nic->mac_control;
381 struct config_param *config = &nic->config;
382
383 for (i = 0; i < config->tx_fifo_num; i++)
384 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
385
386 if (nic->vlgrp)
387 vlan_group_set_device(nic->vlgrp, vid, NULL);
388
389 for (i = config->tx_fifo_num - 1; i >= 0; i--)
390 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
391 flags[i]);
392}
393
394
395
396
397
398
399#define END_SIGN 0x0
400static const u64 herc_act_dtx_cfg[] = {
401
402 0x8000051536750000ULL, 0x80000515367500E0ULL,
403
404 0x8000051536750004ULL, 0x80000515367500E4ULL,
405
406 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
407
408 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
409
410 0x801205150D440000ULL, 0x801205150D4400E0ULL,
411
412 0x801205150D440004ULL, 0x801205150D4400E4ULL,
413
414 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
415
416 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
417
418 END_SIGN
419};
420
421static const u64 xena_dtx_cfg[] = {
422
423 0x8000051500000000ULL, 0x80000515000000E0ULL,
424
425 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
426
427 0x8001051500000000ULL, 0x80010515000000E0ULL,
428
429 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
430
431 0x8002051500000000ULL, 0x80020515000000E0ULL,
432
433 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
434 END_SIGN
435};
436
437
438
439
440
441static const u64 fix_mac[] = {
442 0x0060000000000000ULL, 0x0060600000000000ULL,
443 0x0040600000000000ULL, 0x0000600000000000ULL,
444 0x0020600000000000ULL, 0x0060600000000000ULL,
445 0x0020600000000000ULL, 0x0060600000000000ULL,
446 0x0020600000000000ULL, 0x0060600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0000600000000000ULL,
455 0x0040600000000000ULL, 0x0060600000000000ULL,
456 END_SIGN
457};
458
459MODULE_LICENSE("GPL");
460MODULE_VERSION(DRV_VERSION);
461
462
463
464S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
465S2IO_PARM_INT(rx_ring_num, 1);
466S2IO_PARM_INT(multiq, 0);
467S2IO_PARM_INT(rx_ring_mode, 1);
468S2IO_PARM_INT(use_continuous_tx_intrs, 1);
469S2IO_PARM_INT(rmac_pause_time, 0x100);
470S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
471S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
472S2IO_PARM_INT(shared_splits, 0);
473S2IO_PARM_INT(tmac_util_period, 5);
474S2IO_PARM_INT(rmac_util_period, 5);
475S2IO_PARM_INT(l3l4hdr_size, 128);
476
477S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
478
479S2IO_PARM_INT(rxsync_frequency, 3);
480
481S2IO_PARM_INT(intr_type, 2);
482
483static unsigned int lro_enable;
484module_param_named(lro, lro_enable, uint, 0);
485
486
487
488
489S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
490S2IO_PARM_INT(indicate_max_pkts, 0);
491
492S2IO_PARM_INT(napi, 1);
493S2IO_PARM_INT(ufo, 0);
494S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
495
496static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
497 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
498static unsigned int rx_ring_sz[MAX_RX_RINGS] =
499 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
500static unsigned int rts_frm_len[MAX_RX_RINGS] =
501 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
502
503module_param_array(tx_fifo_len, uint, NULL, 0);
504module_param_array(rx_ring_sz, uint, NULL, 0);
505module_param_array(rts_frm_len, uint, NULL, 0);
506
507
508
509
510
511static struct pci_device_id s2io_tbl[] __devinitdata = {
512 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
513 PCI_ANY_ID, PCI_ANY_ID},
514 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
515 PCI_ANY_ID, PCI_ANY_ID},
516 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
517 PCI_ANY_ID, PCI_ANY_ID},
518 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
519 PCI_ANY_ID, PCI_ANY_ID},
520 {0,}
521};
522
523MODULE_DEVICE_TABLE(pci, s2io_tbl);
524
525static struct pci_error_handlers s2io_err_handler = {
526 .error_detected = s2io_io_error_detected,
527 .slot_reset = s2io_io_slot_reset,
528 .resume = s2io_io_resume,
529};
530
531static struct pci_driver s2io_driver = {
532 .name = "S2IO",
533 .id_table = s2io_tbl,
534 .probe = s2io_init_nic,
535 .remove = __devexit_p(s2io_rem_nic),
536 .err_handler = &s2io_err_handler,
537};
538
539
540#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
541
542
543static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
544{
545 if (!sp->config.multiq) {
546 int i;
547
548 for (i = 0; i < sp->config.tx_fifo_num; i++)
549 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
550 }
551 netif_tx_stop_all_queues(sp->dev);
552}
553
554static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
555{
556 if (!sp->config.multiq)
557 sp->mac_control.fifos[fifo_no].queue_state =
558 FIFO_QUEUE_STOP;
559
560 netif_tx_stop_all_queues(sp->dev);
561}
562
563static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
564{
565 if (!sp->config.multiq) {
566 int i;
567
568 for (i = 0; i < sp->config.tx_fifo_num; i++)
569 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
570 }
571 netif_tx_start_all_queues(sp->dev);
572}
573
574static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
575{
576 if (!sp->config.multiq)
577 sp->mac_control.fifos[fifo_no].queue_state =
578 FIFO_QUEUE_START;
579
580 netif_tx_start_all_queues(sp->dev);
581}
582
583static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
584{
585 if (!sp->config.multiq) {
586 int i;
587
588 for (i = 0; i < sp->config.tx_fifo_num; i++)
589 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
590 }
591 netif_tx_wake_all_queues(sp->dev);
592}
593
594static inline void s2io_wake_tx_queue(
595 struct fifo_info *fifo, int cnt, u8 multiq)
596{
597
598 if (multiq) {
599 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
600 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
601 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
602 if (netif_queue_stopped(fifo->dev)) {
603 fifo->queue_state = FIFO_QUEUE_START;
604 netif_wake_queue(fifo->dev);
605 }
606 }
607}
608
609
610
611
612
613
614
615
616
617static int init_shared_mem(struct s2io_nic *nic)
618{
619 u32 size;
620 void *tmp_v_addr, *tmp_v_addr_next;
621 dma_addr_t tmp_p_addr, tmp_p_addr_next;
622 struct RxD_block *pre_rxd_blk = NULL;
623 int i, j, blk_cnt;
624 int lst_size, lst_per_page;
625 struct net_device *dev = nic->dev;
626 unsigned long tmp;
627 struct buffAdd *ba;
628
629 struct mac_info *mac_control;
630 struct config_param *config;
631 unsigned long long mem_allocated = 0;
632
633 mac_control = &nic->mac_control;
634 config = &nic->config;
635
636
637
638 size = 0;
639 for (i = 0; i < config->tx_fifo_num; i++) {
640 size += config->tx_cfg[i].fifo_len;
641 }
642 if (size > MAX_AVAILABLE_TXDS) {
643 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
644 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
645 return -EINVAL;
646 }
647
648 size = 0;
649 for (i = 0; i < config->tx_fifo_num; i++) {
650 size = config->tx_cfg[i].fifo_len;
651
652
653
654 if (size < 2) {
655 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
656 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
657 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
658 "are 2 to 8192\n");
659 return -EINVAL;
660 }
661 }
662
663 lst_size = (sizeof(struct TxD) * config->max_txds);
664 lst_per_page = PAGE_SIZE / lst_size;
665
666 for (i = 0; i < config->tx_fifo_num; i++) {
667 int fifo_len = config->tx_cfg[i].fifo_len;
668 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
669 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
670 GFP_KERNEL);
671 if (!mac_control->fifos[i].list_info) {
672 DBG_PRINT(INFO_DBG,
673 "Malloc failed for list_info\n");
674 return -ENOMEM;
675 }
676 mem_allocated += list_holder_size;
677 }
678 for (i = 0; i < config->tx_fifo_num; i++) {
679 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
680 lst_per_page);
681 mac_control->fifos[i].tx_curr_put_info.offset = 0;
682 mac_control->fifos[i].tx_curr_put_info.fifo_len =
683 config->tx_cfg[i].fifo_len - 1;
684 mac_control->fifos[i].tx_curr_get_info.offset = 0;
685 mac_control->fifos[i].tx_curr_get_info.fifo_len =
686 config->tx_cfg[i].fifo_len - 1;
687 mac_control->fifos[i].fifo_no = i;
688 mac_control->fifos[i].nic = nic;
689 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
690 mac_control->fifos[i].dev = dev;
691
692 for (j = 0; j < page_num; j++) {
693 int k = 0;
694 dma_addr_t tmp_p;
695 void *tmp_v;
696 tmp_v = pci_alloc_consistent(nic->pdev,
697 PAGE_SIZE, &tmp_p);
698 if (!tmp_v) {
699 DBG_PRINT(INFO_DBG,
700 "pci_alloc_consistent ");
701 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
702 return -ENOMEM;
703 }
704
705
706
707
708
709 if (!tmp_p) {
710 mac_control->zerodma_virt_addr = tmp_v;
711 DBG_PRINT(INIT_DBG,
712 "%s: Zero DMA address for TxDL. ", dev->name);
713 DBG_PRINT(INIT_DBG,
714 "Virtual address %p\n", tmp_v);
715 tmp_v = pci_alloc_consistent(nic->pdev,
716 PAGE_SIZE, &tmp_p);
717 if (!tmp_v) {
718 DBG_PRINT(INFO_DBG,
719 "pci_alloc_consistent ");
720 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
721 return -ENOMEM;
722 }
723 mem_allocated += PAGE_SIZE;
724 }
725 while (k < lst_per_page) {
726 int l = (j * lst_per_page) + k;
727 if (l == config->tx_cfg[i].fifo_len)
728 break;
729 mac_control->fifos[i].list_info[l].list_virt_addr =
730 tmp_v + (k * lst_size);
731 mac_control->fifos[i].list_info[l].list_phy_addr =
732 tmp_p + (k * lst_size);
733 k++;
734 }
735 }
736 }
737
738 for (i = 0; i < config->tx_fifo_num; i++) {
739 size = config->tx_cfg[i].fifo_len;
740 mac_control->fifos[i].ufo_in_band_v
741 = kcalloc(size, sizeof(u64), GFP_KERNEL);
742 if (!mac_control->fifos[i].ufo_in_band_v)
743 return -ENOMEM;
744 mem_allocated += (size * sizeof(u64));
745 }
746
747
748 size = 0;
749 for (i = 0; i < config->rx_ring_num; i++) {
750 if (config->rx_cfg[i].num_rxd %
751 (rxd_count[nic->rxd_mode] + 1)) {
752 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
753 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
754 i);
755 DBG_PRINT(ERR_DBG, "RxDs per Block");
756 return FAILURE;
757 }
758 size += config->rx_cfg[i].num_rxd;
759 mac_control->rings[i].block_count =
760 config->rx_cfg[i].num_rxd /
761 (rxd_count[nic->rxd_mode] + 1 );
762 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
763 mac_control->rings[i].block_count;
764 }
765 if (nic->rxd_mode == RXD_MODE_1)
766 size = (size * (sizeof(struct RxD1)));
767 else
768 size = (size * (sizeof(struct RxD3)));
769
770 for (i = 0; i < config->rx_ring_num; i++) {
771 mac_control->rings[i].rx_curr_get_info.block_index = 0;
772 mac_control->rings[i].rx_curr_get_info.offset = 0;
773 mac_control->rings[i].rx_curr_get_info.ring_len =
774 config->rx_cfg[i].num_rxd - 1;
775 mac_control->rings[i].rx_curr_put_info.block_index = 0;
776 mac_control->rings[i].rx_curr_put_info.offset = 0;
777 mac_control->rings[i].rx_curr_put_info.ring_len =
778 config->rx_cfg[i].num_rxd - 1;
779 mac_control->rings[i].nic = nic;
780 mac_control->rings[i].ring_no = i;
781 mac_control->rings[i].lro = lro_enable;
782
783 blk_cnt = config->rx_cfg[i].num_rxd /
784 (rxd_count[nic->rxd_mode] + 1);
785
786 for (j = 0; j < blk_cnt; j++) {
787 struct rx_block_info *rx_blocks;
788 int l;
789
790 rx_blocks = &mac_control->rings[i].rx_blocks[j];
791 size = SIZE_OF_BLOCK;
792 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
793 &tmp_p_addr);
794 if (tmp_v_addr == NULL) {
795
796
797
798
799
800
801 rx_blocks->block_virt_addr = tmp_v_addr;
802 return -ENOMEM;
803 }
804 mem_allocated += size;
805 memset(tmp_v_addr, 0, size);
806 rx_blocks->block_virt_addr = tmp_v_addr;
807 rx_blocks->block_dma_addr = tmp_p_addr;
808 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
809 rxd_count[nic->rxd_mode],
810 GFP_KERNEL);
811 if (!rx_blocks->rxds)
812 return -ENOMEM;
813 mem_allocated +=
814 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
815 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
816 rx_blocks->rxds[l].virt_addr =
817 rx_blocks->block_virt_addr +
818 (rxd_size[nic->rxd_mode] * l);
819 rx_blocks->rxds[l].dma_addr =
820 rx_blocks->block_dma_addr +
821 (rxd_size[nic->rxd_mode] * l);
822 }
823 }
824
825 for (j = 0; j < blk_cnt; j++) {
826 tmp_v_addr =
827 mac_control->rings[i].rx_blocks[j].block_virt_addr;
828 tmp_v_addr_next =
829 mac_control->rings[i].rx_blocks[(j + 1) %
830 blk_cnt].block_virt_addr;
831 tmp_p_addr =
832 mac_control->rings[i].rx_blocks[j].block_dma_addr;
833 tmp_p_addr_next =
834 mac_control->rings[i].rx_blocks[(j + 1) %
835 blk_cnt].block_dma_addr;
836
837 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
838 pre_rxd_blk->reserved_2_pNext_RxD_block =
839 (unsigned long) tmp_v_addr_next;
840 pre_rxd_blk->pNext_RxD_Blk_physical =
841 (u64) tmp_p_addr_next;
842 }
843 }
844 if (nic->rxd_mode == RXD_MODE_3B) {
845
846
847
848
849 for (i = 0; i < config->rx_ring_num; i++) {
850 blk_cnt = config->rx_cfg[i].num_rxd /
851 (rxd_count[nic->rxd_mode]+ 1);
852 mac_control->rings[i].ba =
853 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
854 GFP_KERNEL);
855 if (!mac_control->rings[i].ba)
856 return -ENOMEM;
857 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
858 for (j = 0; j < blk_cnt; j++) {
859 int k = 0;
860 mac_control->rings[i].ba[j] =
861 kmalloc((sizeof(struct buffAdd) *
862 (rxd_count[nic->rxd_mode] + 1)),
863 GFP_KERNEL);
864 if (!mac_control->rings[i].ba[j])
865 return -ENOMEM;
866 mem_allocated += (sizeof(struct buffAdd) * \
867 (rxd_count[nic->rxd_mode] + 1));
868 while (k != rxd_count[nic->rxd_mode]) {
869 ba = &mac_control->rings[i].ba[j][k];
870
871 ba->ba_0_org = (void *) kmalloc
872 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
873 if (!ba->ba_0_org)
874 return -ENOMEM;
875 mem_allocated +=
876 (BUF0_LEN + ALIGN_SIZE);
877 tmp = (unsigned long)ba->ba_0_org;
878 tmp += ALIGN_SIZE;
879 tmp &= ~((unsigned long) ALIGN_SIZE);
880 ba->ba_0 = (void *) tmp;
881
882 ba->ba_1_org = (void *) kmalloc
883 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
884 if (!ba->ba_1_org)
885 return -ENOMEM;
886 mem_allocated
887 += (BUF1_LEN + ALIGN_SIZE);
888 tmp = (unsigned long) ba->ba_1_org;
889 tmp += ALIGN_SIZE;
890 tmp &= ~((unsigned long) ALIGN_SIZE);
891 ba->ba_1 = (void *) tmp;
892 k++;
893 }
894 }
895 }
896 }
897
898
899 size = sizeof(struct stat_block);
900 mac_control->stats_mem = pci_alloc_consistent
901 (nic->pdev, size, &mac_control->stats_mem_phy);
902
903 if (!mac_control->stats_mem) {
904
905
906
907
908
909 return -ENOMEM;
910 }
911 mem_allocated += size;
912 mac_control->stats_mem_sz = size;
913
914 tmp_v_addr = mac_control->stats_mem;
915 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
916 memset(tmp_v_addr, 0, size);
917 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
918 (unsigned long long) tmp_p_addr);
919 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
920 return SUCCESS;
921}
922
923
924
925
926
927
928
929
930static void free_shared_mem(struct s2io_nic *nic)
931{
932 int i, j, blk_cnt, size;
933 void *tmp_v_addr;
934 dma_addr_t tmp_p_addr;
935 struct mac_info *mac_control;
936 struct config_param *config;
937 int lst_size, lst_per_page;
938 struct net_device *dev;
939 int page_num = 0;
940
941 if (!nic)
942 return;
943
944 dev = nic->dev;
945
946 mac_control = &nic->mac_control;
947 config = &nic->config;
948
949 lst_size = (sizeof(struct TxD) * config->max_txds);
950 lst_per_page = PAGE_SIZE / lst_size;
951
952 for (i = 0; i < config->tx_fifo_num; i++) {
953 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
954 lst_per_page);
955 for (j = 0; j < page_num; j++) {
956 int mem_blks = (j * lst_per_page);
957 if (!mac_control->fifos[i].list_info)
958 return;
959 if (!mac_control->fifos[i].list_info[mem_blks].
960 list_virt_addr)
961 break;
962 pci_free_consistent(nic->pdev, PAGE_SIZE,
963 mac_control->fifos[i].
964 list_info[mem_blks].
965 list_virt_addr,
966 mac_control->fifos[i].
967 list_info[mem_blks].
968 list_phy_addr);
969 nic->mac_control.stats_info->sw_stat.mem_freed
970 += PAGE_SIZE;
971 }
972
973
974
975 if (mac_control->zerodma_virt_addr) {
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
977 mac_control->zerodma_virt_addr,
978 (dma_addr_t)0);
979 DBG_PRINT(INIT_DBG,
980 "%s: Freeing TxDL with zero DMA addr. ",
981 dev->name);
982 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
983 mac_control->zerodma_virt_addr);
984 nic->mac_control.stats_info->sw_stat.mem_freed
985 += PAGE_SIZE;
986 }
987 kfree(mac_control->fifos[i].list_info);
988 nic->mac_control.stats_info->sw_stat.mem_freed +=
989 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
990 }
991
992 size = SIZE_OF_BLOCK;
993 for (i = 0; i < config->rx_ring_num; i++) {
994 blk_cnt = mac_control->rings[i].block_count;
995 for (j = 0; j < blk_cnt; j++) {
996 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
997 block_virt_addr;
998 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
999 block_dma_addr;
1000 if (tmp_v_addr == NULL)
1001 break;
1002 pci_free_consistent(nic->pdev, size,
1003 tmp_v_addr, tmp_p_addr);
1004 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1005 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1006 nic->mac_control.stats_info->sw_stat.mem_freed +=
1007 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1008 }
1009 }
1010
1011 if (nic->rxd_mode == RXD_MODE_3B) {
1012
1013 for (i = 0; i < config->rx_ring_num; i++) {
1014 blk_cnt = config->rx_cfg[i].num_rxd /
1015 (rxd_count[nic->rxd_mode] + 1);
1016 for (j = 0; j < blk_cnt; j++) {
1017 int k = 0;
1018 if (!mac_control->rings[i].ba[j])
1019 continue;
1020 while (k != rxd_count[nic->rxd_mode]) {
1021 struct buffAdd *ba =
1022 &mac_control->rings[i].ba[j][k];
1023 kfree(ba->ba_0_org);
1024 nic->mac_control.stats_info->sw_stat.\
1025 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1026 kfree(ba->ba_1_org);
1027 nic->mac_control.stats_info->sw_stat.\
1028 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1029 k++;
1030 }
1031 kfree(mac_control->rings[i].ba[j]);
1032 nic->mac_control.stats_info->sw_stat.mem_freed +=
1033 (sizeof(struct buffAdd) *
1034 (rxd_count[nic->rxd_mode] + 1));
1035 }
1036 kfree(mac_control->rings[i].ba);
1037 nic->mac_control.stats_info->sw_stat.mem_freed +=
1038 (sizeof(struct buffAdd *) * blk_cnt);
1039 }
1040 }
1041
1042 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1043 if (mac_control->fifos[i].ufo_in_band_v) {
1044 nic->mac_control.stats_info->sw_stat.mem_freed
1045 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1046 kfree(mac_control->fifos[i].ufo_in_band_v);
1047 }
1048 }
1049
1050 if (mac_control->stats_mem) {
1051 nic->mac_control.stats_info->sw_stat.mem_freed +=
1052 mac_control->stats_mem_sz;
1053 pci_free_consistent(nic->pdev,
1054 mac_control->stats_mem_sz,
1055 mac_control->stats_mem,
1056 mac_control->stats_mem_phy);
1057 }
1058}
1059
1060
1061
1062
1063
1064static int s2io_verify_pci_mode(struct s2io_nic *nic)
1065{
1066 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1067 register u64 val64 = 0;
1068 int mode;
1069
1070 val64 = readq(&bar0->pci_mode);
1071 mode = (u8)GET_PCI_MODE(val64);
1072
1073 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1074 return -1;
1075 return mode;
1076}
1077
1078#define NEC_VENID 0x1033
1079#define NEC_DEVID 0x0125
1080static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1081{
1082 struct pci_dev *tdev = NULL;
1083 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1084 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1085 if (tdev->bus == s2io_pdev->bus->parent) {
1086 pci_dev_put(tdev);
1087 return 1;
1088 }
1089 }
1090 }
1091 return 0;
1092}
1093
1094static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1095
1096
1097
1098static int s2io_print_pci_mode(struct s2io_nic *nic)
1099{
1100 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1101 register u64 val64 = 0;
1102 int mode;
1103 struct config_param *config = &nic->config;
1104
1105 val64 = readq(&bar0->pci_mode);
1106 mode = (u8)GET_PCI_MODE(val64);
1107
1108 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1109 return -1;
1110
1111 config->bus_speed = bus_speed[mode];
1112
1113 if (s2io_on_nec_bridge(nic->pdev)) {
1114 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1115 nic->dev->name);
1116 return mode;
1117 }
1118
1119 if (val64 & PCI_MODE_32_BITS) {
1120 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1121 } else {
1122 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1123 }
1124
1125 switch(mode) {
1126 case PCI_MODE_PCI_33:
1127 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1128 break;
1129 case PCI_MODE_PCI_66:
1130 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1131 break;
1132 case PCI_MODE_PCIX_M1_66:
1133 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1134 break;
1135 case PCI_MODE_PCIX_M1_100:
1136 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1137 break;
1138 case PCI_MODE_PCIX_M1_133:
1139 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1140 break;
1141 case PCI_MODE_PCIX_M2_66:
1142 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1143 break;
1144 case PCI_MODE_PCIX_M2_100:
1145 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1146 break;
1147 case PCI_MODE_PCIX_M2_133:
1148 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1149 break;
1150 default:
1151 return -1;
1152 }
1153
1154 return mode;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static int init_tti(struct s2io_nic *nic, int link)
1168{
1169 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1170 register u64 val64 = 0;
1171 int i;
1172 struct config_param *config;
1173
1174 config = &nic->config;
1175
1176 for (i = 0; i < config->tx_fifo_num; i++) {
1177
1178
1179
1180
1181
1182 if (nic->device_type == XFRAME_II_DEVICE) {
1183 int count = (nic->config.bus_speed * 125)/2;
1184 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1185 } else
1186 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1187
1188 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1189 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1190 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1191 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1192 if (i == 0)
1193 if (use_continuous_tx_intrs && (link == LINK_UP))
1194 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1195 writeq(val64, &bar0->tti_data1_mem);
1196
1197 if (nic->config.intr_type == MSI_X) {
1198 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1199 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1200 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1201 TTI_DATA2_MEM_TX_UFC_D(0x300);
1202 } else {
1203 if ((nic->config.tx_steering_type ==
1204 TX_DEFAULT_STEERING) &&
1205 (config->tx_fifo_num > 1) &&
1206 (i >= nic->udp_fifo_idx) &&
1207 (i < (nic->udp_fifo_idx +
1208 nic->total_udp_fifos)))
1209 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1210 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1211 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1212 TTI_DATA2_MEM_TX_UFC_D(0x120);
1213 else
1214 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1215 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1216 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1217 TTI_DATA2_MEM_TX_UFC_D(0x80);
1218 }
1219
1220 writeq(val64, &bar0->tti_data2_mem);
1221
1222 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1223 TTI_CMD_MEM_OFFSET(i);
1224 writeq(val64, &bar0->tti_command_mem);
1225
1226 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1227 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1228 return FAILURE;
1229 }
1230
1231 return SUCCESS;
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243static int init_nic(struct s2io_nic *nic)
1244{
1245 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1246 struct net_device *dev = nic->dev;
1247 register u64 val64 = 0;
1248 void __iomem *add;
1249 u32 time;
1250 int i, j;
1251 struct mac_info *mac_control;
1252 struct config_param *config;
1253 int dtx_cnt = 0;
1254 unsigned long long mem_share;
1255 int mem_size;
1256
1257 mac_control = &nic->mac_control;
1258 config = &nic->config;
1259
1260
1261 if(s2io_set_swapper(nic)) {
1262 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1263 return -EIO;
1264 }
1265
1266
1267
1268
1269 if (nic->device_type & XFRAME_II_DEVICE) {
1270 val64 = 0xA500000000ULL;
1271 writeq(val64, &bar0->sw_reset);
1272 msleep(500);
1273 val64 = readq(&bar0->sw_reset);
1274 }
1275
1276
1277 val64 = 0;
1278 writeq(val64, &bar0->sw_reset);
1279 msleep(500);
1280 val64 = readq(&bar0->sw_reset);
1281
1282
1283
1284
1285 if (nic->device_type == XFRAME_II_DEVICE) {
1286 for (i = 0; i < 50; i++) {
1287 val64 = readq(&bar0->adapter_status);
1288 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1289 break;
1290 msleep(10);
1291 }
1292 if (i == 50)
1293 return -ENODEV;
1294 }
1295
1296
1297 add = &bar0->mac_cfg;
1298 val64 = readq(&bar0->mac_cfg);
1299 val64 |= MAC_RMAC_BCAST_ENABLE;
1300 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1301 writel((u32) val64, add);
1302 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1303 writel((u32) (val64 >> 32), (add + 4));
1304
1305
1306 val64 = readq(&bar0->mac_int_mask);
1307 val64 = readq(&bar0->mc_int_mask);
1308 val64 = readq(&bar0->xgxs_int_mask);
1309
1310
1311 val64 = dev->mtu;
1312 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1313
1314 if (nic->device_type & XFRAME_II_DEVICE) {
1315 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1316 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1317 &bar0->dtx_control, UF);
1318 if (dtx_cnt & 0x1)
1319 msleep(1);
1320 dtx_cnt++;
1321 }
1322 } else {
1323 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1324 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1325 &bar0->dtx_control, UF);
1326 val64 = readq(&bar0->dtx_control);
1327 dtx_cnt++;
1328 }
1329 }
1330
1331
1332 val64 = 0;
1333 writeq(val64, &bar0->tx_fifo_partition_0);
1334 writeq(val64, &bar0->tx_fifo_partition_1);
1335 writeq(val64, &bar0->tx_fifo_partition_2);
1336 writeq(val64, &bar0->tx_fifo_partition_3);
1337
1338
1339 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1340 val64 |=
1341 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1342 13) | vBIT(config->tx_cfg[i].fifo_priority,
1343 ((j * 32) + 5), 3);
1344
1345 if (i == (config->tx_fifo_num - 1)) {
1346 if (i % 2 == 0)
1347 i++;
1348 }
1349
1350 switch (i) {
1351 case 1:
1352 writeq(val64, &bar0->tx_fifo_partition_0);
1353 val64 = 0;
1354 j = 0;
1355 break;
1356 case 3:
1357 writeq(val64, &bar0->tx_fifo_partition_1);
1358 val64 = 0;
1359 j = 0;
1360 break;
1361 case 5:
1362 writeq(val64, &bar0->tx_fifo_partition_2);
1363 val64 = 0;
1364 j = 0;
1365 break;
1366 case 7:
1367 writeq(val64, &bar0->tx_fifo_partition_3);
1368 val64 = 0;
1369 j = 0;
1370 break;
1371 default:
1372 j++;
1373 break;
1374 }
1375 }
1376
1377
1378
1379
1380
1381 if ((nic->device_type == XFRAME_I_DEVICE) &&
1382 (nic->pdev->revision < 4))
1383 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1384
1385 val64 = readq(&bar0->tx_fifo_partition_0);
1386 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1387 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1388
1389
1390
1391
1392
1393 val64 = readq(&bar0->tx_pa_cfg);
1394 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1395 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1396 writeq(val64, &bar0->tx_pa_cfg);
1397
1398
1399 val64 = 0;
1400 for (i = 0; i < config->rx_ring_num; i++) {
1401 val64 |=
1402 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1403 3);
1404 }
1405 writeq(val64, &bar0->rx_queue_priority);
1406
1407
1408
1409
1410
1411 val64 = 0;
1412 if (nic->device_type & XFRAME_II_DEVICE)
1413 mem_size = 32;
1414 else
1415 mem_size = 64;
1416
1417 for (i = 0; i < config->rx_ring_num; i++) {
1418 switch (i) {
1419 case 0:
1420 mem_share = (mem_size / config->rx_ring_num +
1421 mem_size % config->rx_ring_num);
1422 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1423 continue;
1424 case 1:
1425 mem_share = (mem_size / config->rx_ring_num);
1426 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1427 continue;
1428 case 2:
1429 mem_share = (mem_size / config->rx_ring_num);
1430 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1431 continue;
1432 case 3:
1433 mem_share = (mem_size / config->rx_ring_num);
1434 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1435 continue;
1436 case 4:
1437 mem_share = (mem_size / config->rx_ring_num);
1438 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1439 continue;
1440 case 5:
1441 mem_share = (mem_size / config->rx_ring_num);
1442 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1443 continue;
1444 case 6:
1445 mem_share = (mem_size / config->rx_ring_num);
1446 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1447 continue;
1448 case 7:
1449 mem_share = (mem_size / config->rx_ring_num);
1450 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1451 continue;
1452 }
1453 }
1454 writeq(val64, &bar0->rx_queue_cfg);
1455
1456
1457
1458
1459
1460 switch (config->tx_fifo_num) {
1461 case 1:
1462 val64 = 0x0;
1463 writeq(val64, &bar0->tx_w_round_robin_0);
1464 writeq(val64, &bar0->tx_w_round_robin_1);
1465 writeq(val64, &bar0->tx_w_round_robin_2);
1466 writeq(val64, &bar0->tx_w_round_robin_3);
1467 writeq(val64, &bar0->tx_w_round_robin_4);
1468 break;
1469 case 2:
1470 val64 = 0x0001000100010001ULL;
1471 writeq(val64, &bar0->tx_w_round_robin_0);
1472 writeq(val64, &bar0->tx_w_round_robin_1);
1473 writeq(val64, &bar0->tx_w_round_robin_2);
1474 writeq(val64, &bar0->tx_w_round_robin_3);
1475 val64 = 0x0001000100000000ULL;
1476 writeq(val64, &bar0->tx_w_round_robin_4);
1477 break;
1478 case 3:
1479 val64 = 0x0001020001020001ULL;
1480 writeq(val64, &bar0->tx_w_round_robin_0);
1481 val64 = 0x0200010200010200ULL;
1482 writeq(val64, &bar0->tx_w_round_robin_1);
1483 val64 = 0x0102000102000102ULL;
1484 writeq(val64, &bar0->tx_w_round_robin_2);
1485 val64 = 0x0001020001020001ULL;
1486 writeq(val64, &bar0->tx_w_round_robin_3);
1487 val64 = 0x0200010200000000ULL;
1488 writeq(val64, &bar0->tx_w_round_robin_4);
1489 break;
1490 case 4:
1491 val64 = 0x0001020300010203ULL;
1492 writeq(val64, &bar0->tx_w_round_robin_0);
1493 writeq(val64, &bar0->tx_w_round_robin_1);
1494 writeq(val64, &bar0->tx_w_round_robin_2);
1495 writeq(val64, &bar0->tx_w_round_robin_3);
1496 val64 = 0x0001020300000000ULL;
1497 writeq(val64, &bar0->tx_w_round_robin_4);
1498 break;
1499 case 5:
1500 val64 = 0x0001020304000102ULL;
1501 writeq(val64, &bar0->tx_w_round_robin_0);
1502 val64 = 0x0304000102030400ULL;
1503 writeq(val64, &bar0->tx_w_round_robin_1);
1504 val64 = 0x0102030400010203ULL;
1505 writeq(val64, &bar0->tx_w_round_robin_2);
1506 val64 = 0x0400010203040001ULL;
1507 writeq(val64, &bar0->tx_w_round_robin_3);
1508 val64 = 0x0203040000000000ULL;
1509 writeq(val64, &bar0->tx_w_round_robin_4);
1510 break;
1511 case 6:
1512 val64 = 0x0001020304050001ULL;
1513 writeq(val64, &bar0->tx_w_round_robin_0);
1514 val64 = 0x0203040500010203ULL;
1515 writeq(val64, &bar0->tx_w_round_robin_1);
1516 val64 = 0x0405000102030405ULL;
1517 writeq(val64, &bar0->tx_w_round_robin_2);
1518 val64 = 0x0001020304050001ULL;
1519 writeq(val64, &bar0->tx_w_round_robin_3);
1520 val64 = 0x0203040500000000ULL;
1521 writeq(val64, &bar0->tx_w_round_robin_4);
1522 break;
1523 case 7:
1524 val64 = 0x0001020304050600ULL;
1525 writeq(val64, &bar0->tx_w_round_robin_0);
1526 val64 = 0x0102030405060001ULL;
1527 writeq(val64, &bar0->tx_w_round_robin_1);
1528 val64 = 0x0203040506000102ULL;
1529 writeq(val64, &bar0->tx_w_round_robin_2);
1530 val64 = 0x0304050600010203ULL;
1531 writeq(val64, &bar0->tx_w_round_robin_3);
1532 val64 = 0x0405060000000000ULL;
1533 writeq(val64, &bar0->tx_w_round_robin_4);
1534 break;
1535 case 8:
1536 val64 = 0x0001020304050607ULL;
1537 writeq(val64, &bar0->tx_w_round_robin_0);
1538 writeq(val64, &bar0->tx_w_round_robin_1);
1539 writeq(val64, &bar0->tx_w_round_robin_2);
1540 writeq(val64, &bar0->tx_w_round_robin_3);
1541 val64 = 0x0001020300000000ULL;
1542 writeq(val64, &bar0->tx_w_round_robin_4);
1543 break;
1544 }
1545
1546
1547 val64 = readq(&bar0->tx_fifo_partition_0);
1548 val64 |= (TX_FIFO_PARTITION_EN);
1549 writeq(val64, &bar0->tx_fifo_partition_0);
1550
1551
1552
1553
1554
1555 switch (config->rx_ring_num) {
1556 case 1:
1557 val64 = 0x0;
1558 writeq(val64, &bar0->rx_w_round_robin_0);
1559 writeq(val64, &bar0->rx_w_round_robin_1);
1560 writeq(val64, &bar0->rx_w_round_robin_2);
1561 writeq(val64, &bar0->rx_w_round_robin_3);
1562 writeq(val64, &bar0->rx_w_round_robin_4);
1563
1564 val64 = 0x8080808080808080ULL;
1565 writeq(val64, &bar0->rts_qos_steering);
1566 break;
1567 case 2:
1568 val64 = 0x0001000100010001ULL;
1569 writeq(val64, &bar0->rx_w_round_robin_0);
1570 writeq(val64, &bar0->rx_w_round_robin_1);
1571 writeq(val64, &bar0->rx_w_round_robin_2);
1572 writeq(val64, &bar0->rx_w_round_robin_3);
1573 val64 = 0x0001000100000000ULL;
1574 writeq(val64, &bar0->rx_w_round_robin_4);
1575
1576 val64 = 0x8080808040404040ULL;
1577 writeq(val64, &bar0->rts_qos_steering);
1578 break;
1579 case 3:
1580 val64 = 0x0001020001020001ULL;
1581 writeq(val64, &bar0->rx_w_round_robin_0);
1582 val64 = 0x0200010200010200ULL;
1583 writeq(val64, &bar0->rx_w_round_robin_1);
1584 val64 = 0x0102000102000102ULL;
1585 writeq(val64, &bar0->rx_w_round_robin_2);
1586 val64 = 0x0001020001020001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_3);
1588 val64 = 0x0200010200000000ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_4);
1590
1591 val64 = 0x8080804040402020ULL;
1592 writeq(val64, &bar0->rts_qos_steering);
1593 break;
1594 case 4:
1595 val64 = 0x0001020300010203ULL;
1596 writeq(val64, &bar0->rx_w_round_robin_0);
1597 writeq(val64, &bar0->rx_w_round_robin_1);
1598 writeq(val64, &bar0->rx_w_round_robin_2);
1599 writeq(val64, &bar0->rx_w_round_robin_3);
1600 val64 = 0x0001020300000000ULL;
1601 writeq(val64, &bar0->rx_w_round_robin_4);
1602
1603 val64 = 0x8080404020201010ULL;
1604 writeq(val64, &bar0->rts_qos_steering);
1605 break;
1606 case 5:
1607 val64 = 0x0001020304000102ULL;
1608 writeq(val64, &bar0->rx_w_round_robin_0);
1609 val64 = 0x0304000102030400ULL;
1610 writeq(val64, &bar0->rx_w_round_robin_1);
1611 val64 = 0x0102030400010203ULL;
1612 writeq(val64, &bar0->rx_w_round_robin_2);
1613 val64 = 0x0400010203040001ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_3);
1615 val64 = 0x0203040000000000ULL;
1616 writeq(val64, &bar0->rx_w_round_robin_4);
1617
1618 val64 = 0x8080404020201008ULL;
1619 writeq(val64, &bar0->rts_qos_steering);
1620 break;
1621 case 6:
1622 val64 = 0x0001020304050001ULL;
1623 writeq(val64, &bar0->rx_w_round_robin_0);
1624 val64 = 0x0203040500010203ULL;
1625 writeq(val64, &bar0->rx_w_round_robin_1);
1626 val64 = 0x0405000102030405ULL;
1627 writeq(val64, &bar0->rx_w_round_robin_2);
1628 val64 = 0x0001020304050001ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_3);
1630 val64 = 0x0203040500000000ULL;
1631 writeq(val64, &bar0->rx_w_round_robin_4);
1632
1633 val64 = 0x8080404020100804ULL;
1634 writeq(val64, &bar0->rts_qos_steering);
1635 break;
1636 case 7:
1637 val64 = 0x0001020304050600ULL;
1638 writeq(val64, &bar0->rx_w_round_robin_0);
1639 val64 = 0x0102030405060001ULL;
1640 writeq(val64, &bar0->rx_w_round_robin_1);
1641 val64 = 0x0203040506000102ULL;
1642 writeq(val64, &bar0->rx_w_round_robin_2);
1643 val64 = 0x0304050600010203ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_3);
1645 val64 = 0x0405060000000000ULL;
1646 writeq(val64, &bar0->rx_w_round_robin_4);
1647
1648 val64 = 0x8080402010080402ULL;
1649 writeq(val64, &bar0->rts_qos_steering);
1650 break;
1651 case 8:
1652 val64 = 0x0001020304050607ULL;
1653 writeq(val64, &bar0->rx_w_round_robin_0);
1654 writeq(val64, &bar0->rx_w_round_robin_1);
1655 writeq(val64, &bar0->rx_w_round_robin_2);
1656 writeq(val64, &bar0->rx_w_round_robin_3);
1657 val64 = 0x0001020300000000ULL;
1658 writeq(val64, &bar0->rx_w_round_robin_4);
1659
1660 val64 = 0x8040201008040201ULL;
1661 writeq(val64, &bar0->rts_qos_steering);
1662 break;
1663 }
1664
1665
1666 val64 = 0;
1667 for (i = 0; i < 8; i++)
1668 writeq(val64, &bar0->rts_frm_len_n[i]);
1669
1670
1671 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1672 for (i = 0 ; i < config->rx_ring_num ; i++)
1673 writeq(val64, &bar0->rts_frm_len_n[i]);
1674
1675
1676
1677
1678 for (i = 0; i < config->rx_ring_num; i++) {
1679
1680
1681
1682
1683
1684
1685 if (rts_frm_len[i] != 0) {
1686 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1687 &bar0->rts_frm_len_n[i]);
1688 }
1689 }
1690
1691
1692 for (i = 0; i < 64; i++) {
1693 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1694 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1695 dev->name);
1696 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1697 return -ENODEV;
1698 }
1699 }
1700
1701
1702 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1703
1704 if (nic->device_type == XFRAME_II_DEVICE) {
1705 val64 = STAT_BC(0x320);
1706 writeq(val64, &bar0->stat_byte_cnt);
1707 }
1708
1709
1710
1711
1712
1713 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1714 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1715 writeq(val64, &bar0->mac_link_util);
1716
1717
1718
1719
1720
1721
1722
1723 if (SUCCESS != init_tti(nic, nic->last_link_state))
1724 return -ENODEV;
1725
1726
1727 if (nic->device_type == XFRAME_II_DEVICE) {
1728
1729
1730
1731
1732 int count = (nic->config.bus_speed * 125)/4;
1733 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1734 } else
1735 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1736 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1737 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1738 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1739
1740 writeq(val64, &bar0->rti_data1_mem);
1741
1742 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1743 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1744 if (nic->config.intr_type == MSI_X)
1745 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1746 RTI_DATA2_MEM_RX_UFC_D(0x40));
1747 else
1748 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1749 RTI_DATA2_MEM_RX_UFC_D(0x80));
1750 writeq(val64, &bar0->rti_data2_mem);
1751
1752 for (i = 0; i < config->rx_ring_num; i++) {
1753 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1754 | RTI_CMD_MEM_OFFSET(i);
1755 writeq(val64, &bar0->rti_command_mem);
1756
1757
1758
1759
1760
1761
1762
1763
1764 time = 0;
1765 while (TRUE) {
1766 val64 = readq(&bar0->rti_command_mem);
1767 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1768 break;
1769
1770 if (time > 10) {
1771 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1772 dev->name);
1773 return -ENODEV;
1774 }
1775 time++;
1776 msleep(50);
1777 }
1778 }
1779
1780
1781
1782
1783
1784 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1785 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1786
1787
1788 add = &bar0->mac_cfg;
1789 val64 = readq(&bar0->mac_cfg);
1790 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1791 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1792 writel((u32) (val64), add);
1793 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1794 writel((u32) (val64 >> 32), (add + 4));
1795 val64 = readq(&bar0->mac_cfg);
1796
1797
1798 add = &bar0->mac_cfg;
1799 val64 = readq(&bar0->mac_cfg);
1800 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1801 if (nic->device_type == XFRAME_II_DEVICE)
1802 writeq(val64, &bar0->mac_cfg);
1803 else {
1804 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805 writel((u32) (val64), add);
1806 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807 writel((u32) (val64 >> 32), (add + 4));
1808 }
1809
1810
1811
1812
1813
1814 val64 = readq(&bar0->rmac_pause_cfg);
1815 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1816 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1817 writeq(val64, &bar0->rmac_pause_cfg);
1818
1819
1820
1821
1822
1823
1824
1825 val64 = 0;
1826 for (i = 0; i < 4; i++) {
1827 val64 |=
1828 (((u64) 0xFF00 | nic->mac_control.
1829 mc_pause_threshold_q0q3)
1830 << (i * 2 * 8));
1831 }
1832 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1833
1834 val64 = 0;
1835 for (i = 0; i < 4; i++) {
1836 val64 |=
1837 (((u64) 0xFF00 | nic->mac_control.
1838 mc_pause_threshold_q4q7)
1839 << (i * 2 * 8));
1840 }
1841 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1842
1843
1844
1845
1846
1847 val64 = readq(&bar0->pic_control);
1848 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1849 writeq(val64, &bar0->pic_control);
1850
1851 if (nic->config.bus_speed == 266) {
1852 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1853 writeq(0x0, &bar0->read_retry_delay);
1854 writeq(0x0, &bar0->write_retry_delay);
1855 }
1856
1857
1858
1859
1860
1861 if (nic->device_type == XFRAME_II_DEVICE) {
1862 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1863 MISC_LINK_STABILITY_PRD(3);
1864 writeq(val64, &bar0->misc_control);
1865 val64 = readq(&bar0->pic_control2);
1866 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1867 writeq(val64, &bar0->pic_control2);
1868 }
1869 if (strstr(nic->product_name, "CX4")) {
1870 val64 = TMAC_AVG_IPG(0x17);
1871 writeq(val64, &bar0->tmac_avg_ipg);
1872 }
1873
1874 return SUCCESS;
1875}
1876#define LINK_UP_DOWN_INTERRUPT 1
1877#define MAC_RMAC_ERR_TIMER 2
1878
1879static int s2io_link_fault_indication(struct s2io_nic *nic)
1880{
1881 if (nic->device_type == XFRAME_II_DEVICE)
1882 return LINK_UP_DOWN_INTERRUPT;
1883 else
1884 return MAC_RMAC_ERR_TIMER;
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1897{
1898 u64 temp64;
1899
1900 temp64 = readq(addr);
1901
1902 if(flag == ENABLE_INTRS)
1903 temp64 &= ~((u64) value);
1904 else
1905 temp64 |= ((u64) value);
1906 writeq(temp64, addr);
1907}
1908
1909static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1910{
1911 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1912 register u64 gen_int_mask = 0;
1913 u64 interruptible;
1914
1915 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1916 if (mask & TX_DMA_INTR) {
1917
1918 gen_int_mask |= TXDMA_INT_M;
1919
1920 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1921 TXDMA_PCC_INT | TXDMA_TTI_INT |
1922 TXDMA_LSO_INT | TXDMA_TPA_INT |
1923 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1924
1925 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1926 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1927 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1928 &bar0->pfc_err_mask);
1929
1930 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1931 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1932 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1933
1934 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1935 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1936 PCC_N_SERR | PCC_6_COF_OV_ERR |
1937 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1938 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1939 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1940
1941 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1942 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1943
1944 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1945 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1946 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1947 flag, &bar0->lso_err_mask);
1948
1949 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1950 flag, &bar0->tpa_err_mask);
1951
1952 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1953
1954 }
1955
1956 if (mask & TX_MAC_INTR) {
1957 gen_int_mask |= TXMAC_INT_M;
1958 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1959 &bar0->mac_int_mask);
1960 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1961 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1962 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1963 flag, &bar0->mac_tmac_err_mask);
1964 }
1965
1966 if (mask & TX_XGXS_INTR) {
1967 gen_int_mask |= TXXGXS_INT_M;
1968 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1969 &bar0->xgxs_int_mask);
1970 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1971 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1972 flag, &bar0->xgxs_txgxs_err_mask);
1973 }
1974
1975 if (mask & RX_DMA_INTR) {
1976 gen_int_mask |= RXDMA_INT_M;
1977 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1978 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1979 flag, &bar0->rxdma_int_mask);
1980 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1981 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1982 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1983 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1984 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1985 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1986 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1987 &bar0->prc_pcix_err_mask);
1988 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1989 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1990 &bar0->rpa_err_mask);
1991 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1992 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1993 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1994 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1995 flag, &bar0->rda_err_mask);
1996 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1997 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1998 flag, &bar0->rti_err_mask);
1999 }
2000
2001 if (mask & RX_MAC_INTR) {
2002 gen_int_mask |= RXMAC_INT_M;
2003 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2004 &bar0->mac_int_mask);
2005 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2006 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2007 RMAC_DOUBLE_ECC_ERR;
2008 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2009 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2010 do_s2io_write_bits(interruptible,
2011 flag, &bar0->mac_rmac_err_mask);
2012 }
2013
2014 if (mask & RX_XGXS_INTR)
2015 {
2016 gen_int_mask |= RXXGXS_INT_M;
2017 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2018 &bar0->xgxs_int_mask);
2019 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2020 &bar0->xgxs_rxgxs_err_mask);
2021 }
2022
2023 if (mask & MC_INTR) {
2024 gen_int_mask |= MC_INT_M;
2025 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2026 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2027 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2028 &bar0->mc_err_mask);
2029 }
2030 nic->general_int_mask = gen_int_mask;
2031
2032
2033 nic->general_int_mask = 0;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2047{
2048 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2049 register u64 temp64 = 0, intr_mask = 0;
2050
2051 intr_mask = nic->general_int_mask;
2052
2053
2054
2055 if (mask & TX_PIC_INTR) {
2056
2057 intr_mask |= TXPIC_INT_M;
2058 if (flag == ENABLE_INTRS) {
2059
2060
2061
2062
2063
2064
2065 if (s2io_link_fault_indication(nic) ==
2066 LINK_UP_DOWN_INTERRUPT ) {
2067 do_s2io_write_bits(PIC_INT_GPIO, flag,
2068 &bar0->pic_int_mask);
2069 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2070 &bar0->gpio_int_mask);
2071 } else
2072 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2073 } else if (flag == DISABLE_INTRS) {
2074
2075
2076
2077
2078 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2079 }
2080 }
2081
2082
2083 if (mask & TX_TRAFFIC_INTR) {
2084 intr_mask |= TXTRAFFIC_INT_M;
2085 if (flag == ENABLE_INTRS) {
2086
2087
2088
2089
2090 writeq(0x0, &bar0->tx_traffic_mask);
2091 } else if (flag == DISABLE_INTRS) {
2092
2093
2094
2095
2096 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2097 }
2098 }
2099
2100
2101 if (mask & RX_TRAFFIC_INTR) {
2102 intr_mask |= RXTRAFFIC_INT_M;
2103 if (flag == ENABLE_INTRS) {
2104
2105 writeq(0x0, &bar0->rx_traffic_mask);
2106 } else if (flag == DISABLE_INTRS) {
2107
2108
2109
2110
2111 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2112 }
2113 }
2114
2115 temp64 = readq(&bar0->general_int_mask);
2116 if (flag == ENABLE_INTRS)
2117 temp64 &= ~((u64) intr_mask);
2118 else
2119 temp64 = DISABLE_ALL_INTRS;
2120 writeq(temp64, &bar0->general_int_mask);
2121
2122 nic->general_int_mask = readq(&bar0->general_int_mask);
2123}
2124
2125
2126
2127
2128
2129
2130static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2131{
2132 int ret = 0, herc;
2133 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2134 u64 val64 = readq(&bar0->adapter_status);
2135
2136 herc = (sp->device_type == XFRAME_II_DEVICE);
2137
2138 if (flag == FALSE) {
2139 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2140 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2141 ret = 1;
2142 } else {
2143 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2144 ret = 1;
2145 }
2146 } else {
2147 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2148 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2149 ADAPTER_STATUS_RMAC_PCC_IDLE))
2150 ret = 1;
2151 } else {
2152 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2153 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2154 ret = 1;
2155 }
2156 }
2157
2158 return ret;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static int verify_xena_quiescence(struct s2io_nic *sp)
2171{
2172 int mode;
2173 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2174 u64 val64 = readq(&bar0->adapter_status);
2175 mode = s2io_verify_pci_mode(sp);
2176
2177 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2178 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2179 return 0;
2180 }
2181 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2182 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2183 return 0;
2184 }
2185 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2186 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2187 return 0;
2188 }
2189 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2190 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2191 return 0;
2192 }
2193 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2194 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2195 return 0;
2196 }
2197 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2198 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2199 return 0;
2200 }
2201 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2202 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2203 return 0;
2204 }
2205 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2206 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2207 return 0;
2208 }
2209
2210
2211
2212
2213
2214
2215 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2216 sp->device_type == XFRAME_II_DEVICE && mode !=
2217 PCI_MODE_PCI_33) {
2218 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2219 return 0;
2220 }
2221 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2222 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2223 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2224 return 0;
2225 }
2226 return 1;
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237static void fix_mac_address(struct s2io_nic * sp)
2238{
2239 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2240 u64 val64;
2241 int i = 0;
2242
2243 while (fix_mac[i] != END_SIGN) {
2244 writeq(fix_mac[i++], &bar0->gpio_control);
2245 udelay(10);
2246 val64 = readq(&bar0->gpio_control);
2247 }
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263static int start_nic(struct s2io_nic *nic)
2264{
2265 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2266 struct net_device *dev = nic->dev;
2267 register u64 val64 = 0;
2268 u16 subid, i;
2269 struct mac_info *mac_control;
2270 struct config_param *config;
2271
2272 mac_control = &nic->mac_control;
2273 config = &nic->config;
2274
2275
2276 for (i = 0; i < config->rx_ring_num; i++) {
2277 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2278 &bar0->prc_rxd0_n[i]);
2279
2280 val64 = readq(&bar0->prc_ctrl_n[i]);
2281 if (nic->rxd_mode == RXD_MODE_1)
2282 val64 |= PRC_CTRL_RC_ENABLED;
2283 else
2284 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2285 if (nic->device_type == XFRAME_II_DEVICE)
2286 val64 |= PRC_CTRL_GROUP_READS;
2287 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2288 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2289 writeq(val64, &bar0->prc_ctrl_n[i]);
2290 }
2291
2292 if (nic->rxd_mode == RXD_MODE_3B) {
2293
2294 val64 = readq(&bar0->rx_pa_cfg);
2295 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2296 writeq(val64, &bar0->rx_pa_cfg);
2297 }
2298
2299 if (vlan_tag_strip == 0) {
2300 val64 = readq(&bar0->rx_pa_cfg);
2301 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2302 writeq(val64, &bar0->rx_pa_cfg);
2303 nic->vlan_strip_flag = 0;
2304 }
2305
2306
2307
2308
2309
2310
2311 val64 = readq(&bar0->mc_rldram_mrs);
2312 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2313 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2314 val64 = readq(&bar0->mc_rldram_mrs);
2315
2316 msleep(100);
2317
2318
2319 val64 = readq(&bar0->adapter_control);
2320 val64 &= ~ADAPTER_ECC_EN;
2321 writeq(val64, &bar0->adapter_control);
2322
2323
2324
2325
2326
2327 val64 = readq(&bar0->adapter_status);
2328 if (!verify_xena_quiescence(nic)) {
2329 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2330 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2331 (unsigned long long) val64);
2332 return FAILURE;
2333 }
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 val64 = readq(&bar0->adapter_control);
2345 val64 |= ADAPTER_EOI_TX_ON;
2346 writeq(val64, &bar0->adapter_control);
2347
2348 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2349
2350
2351
2352
2353 schedule_work(&nic->set_link_task);
2354 }
2355
2356 subid = nic->pdev->subsystem_device;
2357 if (((subid & 0xFF) >= 0x07) &&
2358 (nic->device_type == XFRAME_I_DEVICE)) {
2359 val64 = readq(&bar0->gpio_control);
2360 val64 |= 0x0000800000000000ULL;
2361 writeq(val64, &bar0->gpio_control);
2362 val64 = 0x0411040400000000ULL;
2363 writeq(val64, (void __iomem *)bar0 + 0x2700);
2364 }
2365
2366 return SUCCESS;
2367}
2368
2369
2370
2371static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2372 TxD *txdlp, int get_off)
2373{
2374 struct s2io_nic *nic = fifo_data->nic;
2375 struct sk_buff *skb;
2376 struct TxD *txds;
2377 u16 j, frg_cnt;
2378
2379 txds = txdlp;
2380 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2381 pci_unmap_single(nic->pdev, (dma_addr_t)
2382 txds->Buffer_Pointer, sizeof(u64),
2383 PCI_DMA_TODEVICE);
2384 txds++;
2385 }
2386
2387 skb = (struct sk_buff *) ((unsigned long)
2388 txds->Host_Control);
2389 if (!skb) {
2390 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2391 return NULL;
2392 }
2393 pci_unmap_single(nic->pdev, (dma_addr_t)
2394 txds->Buffer_Pointer,
2395 skb->len - skb->data_len,
2396 PCI_DMA_TODEVICE);
2397 frg_cnt = skb_shinfo(skb)->nr_frags;
2398 if (frg_cnt) {
2399 txds++;
2400 for (j = 0; j < frg_cnt; j++, txds++) {
2401 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2402 if (!txds->Buffer_Pointer)
2403 break;
2404 pci_unmap_page(nic->pdev, (dma_addr_t)
2405 txds->Buffer_Pointer,
2406 frag->size, PCI_DMA_TODEVICE);
2407 }
2408 }
2409 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2410 return(skb);
2411}
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421static void free_tx_buffers(struct s2io_nic *nic)
2422{
2423 struct net_device *dev = nic->dev;
2424 struct sk_buff *skb;
2425 struct TxD *txdp;
2426 int i, j;
2427 struct mac_info *mac_control;
2428 struct config_param *config;
2429 int cnt = 0;
2430
2431 mac_control = &nic->mac_control;
2432 config = &nic->config;
2433
2434 for (i = 0; i < config->tx_fifo_num; i++) {
2435 unsigned long flags;
2436 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2437 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2438 txdp = (struct TxD *) \
2439 mac_control->fifos[i].list_info[j].list_virt_addr;
2440 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2441 if (skb) {
2442 nic->mac_control.stats_info->sw_stat.mem_freed
2443 += skb->truesize;
2444 dev_kfree_skb(skb);
2445 cnt++;
2446 }
2447 }
2448 DBG_PRINT(INTR_DBG,
2449 "%s:forcibly freeing %d skbs on FIFO%d\n",
2450 dev->name, cnt, i);
2451 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2452 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2453 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2454 }
2455}
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467static void stop_nic(struct s2io_nic *nic)
2468{
2469 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2470 register u64 val64 = 0;
2471 u16 interruptible;
2472 struct mac_info *mac_control;
2473 struct config_param *config;
2474
2475 mac_control = &nic->mac_control;
2476 config = &nic->config;
2477
2478
2479 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2480 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2481 interruptible |= TX_PIC_INTR;
2482 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2483
2484
2485 val64 = readq(&bar0->adapter_control);
2486 val64 &= ~(ADAPTER_CNTL_EN);
2487 writeq(val64, &bar0->adapter_control);
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2513 int from_card_up)
2514{
2515 struct sk_buff *skb;
2516 struct RxD_t *rxdp;
2517 int off, size, block_no, block_no1;
2518 u32 alloc_tab = 0;
2519 u32 alloc_cnt;
2520 u64 tmp;
2521 struct buffAdd *ba;
2522 struct RxD_t *first_rxdp = NULL;
2523 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2524 int rxd_index = 0;
2525 struct RxD1 *rxdp1;
2526 struct RxD3 *rxdp3;
2527 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2528
2529 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2530
2531 block_no1 = ring->rx_curr_get_info.block_index;
2532 while (alloc_tab < alloc_cnt) {
2533 block_no = ring->rx_curr_put_info.block_index;
2534
2535 off = ring->rx_curr_put_info.offset;
2536
2537 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2538
2539 rxd_index = off + 1;
2540 if (block_no)
2541 rxd_index += (block_no * ring->rxd_count);
2542
2543 if ((block_no == block_no1) &&
2544 (off == ring->rx_curr_get_info.offset) &&
2545 (rxdp->Host_Control)) {
2546 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2547 ring->dev->name);
2548 DBG_PRINT(INTR_DBG, " info equated\n");
2549 goto end;
2550 }
2551 if (off && (off == ring->rxd_count)) {
2552 ring->rx_curr_put_info.block_index++;
2553 if (ring->rx_curr_put_info.block_index ==
2554 ring->block_count)
2555 ring->rx_curr_put_info.block_index = 0;
2556 block_no = ring->rx_curr_put_info.block_index;
2557 off = 0;
2558 ring->rx_curr_put_info.offset = off;
2559 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2560 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2561 ring->dev->name, rxdp);
2562
2563 }
2564
2565 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2566 ((ring->rxd_mode == RXD_MODE_3B) &&
2567 (rxdp->Control_2 & s2BIT(0)))) {
2568 ring->rx_curr_put_info.offset = off;
2569 goto end;
2570 }
2571
2572 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2573 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2574 if (ring->rxd_mode == RXD_MODE_1)
2575 size += NET_IP_ALIGN;
2576 else
2577 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2578
2579
2580 skb = dev_alloc_skb(size);
2581 if(!skb) {
2582 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2583 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2584 if (first_rxdp) {
2585 wmb();
2586 first_rxdp->Control_1 |= RXD_OWN_XENA;
2587 }
2588 stats->mem_alloc_fail_cnt++;
2589
2590 return -ENOMEM ;
2591 }
2592 stats->mem_allocated += skb->truesize;
2593
2594 if (ring->rxd_mode == RXD_MODE_1) {
2595
2596 rxdp1 = (struct RxD1*)rxdp;
2597 memset(rxdp, 0, sizeof(struct RxD1));
2598 skb_reserve(skb, NET_IP_ALIGN);
2599 rxdp1->Buffer0_ptr = pci_map_single
2600 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2601 PCI_DMA_FROMDEVICE);
2602 if (pci_dma_mapping_error(nic->pdev,
2603 rxdp1->Buffer0_ptr))
2604 goto pci_map_failed;
2605
2606 rxdp->Control_2 =
2607 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2608 rxdp->Host_Control = (unsigned long) (skb);
2609 } else if (ring->rxd_mode == RXD_MODE_3B) {
2610
2611
2612
2613
2614
2615
2616 rxdp3 = (struct RxD3*)rxdp;
2617
2618 Buffer0_ptr = rxdp3->Buffer0_ptr;
2619 Buffer1_ptr = rxdp3->Buffer1_ptr;
2620 memset(rxdp, 0, sizeof(struct RxD3));
2621
2622 rxdp3->Buffer0_ptr = Buffer0_ptr;
2623 rxdp3->Buffer1_ptr = Buffer1_ptr;
2624
2625 ba = &ring->ba[block_no][off];
2626 skb_reserve(skb, BUF0_LEN);
2627 tmp = (u64)(unsigned long) skb->data;
2628 tmp += ALIGN_SIZE;
2629 tmp &= ~ALIGN_SIZE;
2630 skb->data = (void *) (unsigned long)tmp;
2631 skb_reset_tail_pointer(skb);
2632
2633 if (from_card_up) {
2634 rxdp3->Buffer0_ptr =
2635 pci_map_single(ring->pdev, ba->ba_0,
2636 BUF0_LEN, PCI_DMA_FROMDEVICE);
2637 if (pci_dma_mapping_error(nic->pdev,
2638 rxdp3->Buffer0_ptr))
2639 goto pci_map_failed;
2640 } else
2641 pci_dma_sync_single_for_device(ring->pdev,
2642 (dma_addr_t) rxdp3->Buffer0_ptr,
2643 BUF0_LEN, PCI_DMA_FROMDEVICE);
2644
2645 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2646 if (ring->rxd_mode == RXD_MODE_3B) {
2647
2648
2649
2650
2651
2652
2653 rxdp3->Buffer2_ptr = pci_map_single
2654 (ring->pdev, skb->data, ring->mtu + 4,
2655 PCI_DMA_FROMDEVICE);
2656
2657 if (pci_dma_mapping_error(nic->pdev,
2658 rxdp3->Buffer2_ptr))
2659 goto pci_map_failed;
2660
2661 if (from_card_up) {
2662 rxdp3->Buffer1_ptr =
2663 pci_map_single(ring->pdev,
2664 ba->ba_1, BUF1_LEN,
2665 PCI_DMA_FROMDEVICE);
2666
2667 if (pci_dma_mapping_error(nic->pdev,
2668 rxdp3->Buffer1_ptr)) {
2669 pci_unmap_single
2670 (ring->pdev,
2671 (dma_addr_t)(unsigned long)
2672 skb->data,
2673 ring->mtu + 4,
2674 PCI_DMA_FROMDEVICE);
2675 goto pci_map_failed;
2676 }
2677 }
2678 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2679 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2680 (ring->mtu + 4);
2681 }
2682 rxdp->Control_2 |= s2BIT(0);
2683 rxdp->Host_Control = (unsigned long) (skb);
2684 }
2685 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2686 rxdp->Control_1 |= RXD_OWN_XENA;
2687 off++;
2688 if (off == (ring->rxd_count + 1))
2689 off = 0;
2690 ring->rx_curr_put_info.offset = off;
2691
2692 rxdp->Control_2 |= SET_RXD_MARKER;
2693 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2694 if (first_rxdp) {
2695 wmb();
2696 first_rxdp->Control_1 |= RXD_OWN_XENA;
2697 }
2698 first_rxdp = rxdp;
2699 }
2700 ring->rx_bufs_left += 1;
2701 alloc_tab++;
2702 }
2703
2704 end:
2705
2706
2707
2708
2709 if (first_rxdp) {
2710 wmb();
2711 first_rxdp->Control_1 |= RXD_OWN_XENA;
2712 }
2713
2714 return SUCCESS;
2715pci_map_failed:
2716 stats->pci_map_fail_cnt++;
2717 stats->mem_freed += skb->truesize;
2718 dev_kfree_skb_irq(skb);
2719 return -ENOMEM;
2720}
2721
2722static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2723{
2724 struct net_device *dev = sp->dev;
2725 int j;
2726 struct sk_buff *skb;
2727 struct RxD_t *rxdp;
2728 struct mac_info *mac_control;
2729 struct buffAdd *ba;
2730 struct RxD1 *rxdp1;
2731 struct RxD3 *rxdp3;
2732
2733 mac_control = &sp->mac_control;
2734 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2735 rxdp = mac_control->rings[ring_no].
2736 rx_blocks[blk].rxds[j].virt_addr;
2737 skb = (struct sk_buff *)
2738 ((unsigned long) rxdp->Host_Control);
2739 if (!skb) {
2740 continue;
2741 }
2742 if (sp->rxd_mode == RXD_MODE_1) {
2743 rxdp1 = (struct RxD1*)rxdp;
2744 pci_unmap_single(sp->pdev, (dma_addr_t)
2745 rxdp1->Buffer0_ptr,
2746 dev->mtu +
2747 HEADER_ETHERNET_II_802_3_SIZE
2748 + HEADER_802_2_SIZE +
2749 HEADER_SNAP_SIZE,
2750 PCI_DMA_FROMDEVICE);
2751 memset(rxdp, 0, sizeof(struct RxD1));
2752 } else if(sp->rxd_mode == RXD_MODE_3B) {
2753 rxdp3 = (struct RxD3*)rxdp;
2754 ba = &mac_control->rings[ring_no].
2755 ba[blk][j];
2756 pci_unmap_single(sp->pdev, (dma_addr_t)
2757 rxdp3->Buffer0_ptr,
2758 BUF0_LEN,
2759 PCI_DMA_FROMDEVICE);
2760 pci_unmap_single(sp->pdev, (dma_addr_t)
2761 rxdp3->Buffer1_ptr,
2762 BUF1_LEN,
2763 PCI_DMA_FROMDEVICE);
2764 pci_unmap_single(sp->pdev, (dma_addr_t)
2765 rxdp3->Buffer2_ptr,
2766 dev->mtu + 4,
2767 PCI_DMA_FROMDEVICE);
2768 memset(rxdp, 0, sizeof(struct RxD3));
2769 }
2770 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2771 dev_kfree_skb(skb);
2772 mac_control->rings[ring_no].rx_bufs_left -= 1;
2773 }
2774}
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785static void free_rx_buffers(struct s2io_nic *sp)
2786{
2787 struct net_device *dev = sp->dev;
2788 int i, blk = 0, buf_cnt = 0;
2789 struct mac_info *mac_control;
2790 struct config_param *config;
2791
2792 mac_control = &sp->mac_control;
2793 config = &sp->config;
2794
2795 for (i = 0; i < config->rx_ring_num; i++) {
2796 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2797 free_rxd_blk(sp,i,blk);
2798
2799 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2800 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2801 mac_control->rings[i].rx_curr_put_info.offset = 0;
2802 mac_control->rings[i].rx_curr_get_info.offset = 0;
2803 mac_control->rings[i].rx_bufs_left = 0;
2804 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2805 dev->name, buf_cnt, i);
2806 }
2807}
2808
2809static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2810{
2811 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2812 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2813 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2814 }
2815 return 0;
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831static int s2io_poll_msix(struct napi_struct *napi, int budget)
2832{
2833 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2834 struct net_device *dev = ring->dev;
2835 struct config_param *config;
2836 struct mac_info *mac_control;
2837 int pkts_processed = 0;
2838 u8 __iomem *addr = NULL;
2839 u8 val8 = 0;
2840 struct s2io_nic *nic = dev->priv;
2841 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2842 int budget_org = budget;
2843
2844 config = &nic->config;
2845 mac_control = &nic->mac_control;
2846
2847 if (unlikely(!is_s2io_card_up(nic)))
2848 return 0;
2849
2850 pkts_processed = rx_intr_handler(ring, budget);
2851 s2io_chk_rx_buffers(nic, ring);
2852
2853 if (pkts_processed < budget_org) {
2854 netif_rx_complete(dev, napi);
2855
2856 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2857 addr += 7 - ring->ring_no;
2858 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2859 writeb(val8, addr);
2860 val8 = readb(addr);
2861 }
2862 return pkts_processed;
2863}
2864static int s2io_poll_inta(struct napi_struct *napi, int budget)
2865{
2866 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2867 struct ring_info *ring;
2868 struct net_device *dev = nic->dev;
2869 struct config_param *config;
2870 struct mac_info *mac_control;
2871 int pkts_processed = 0;
2872 int ring_pkts_processed, i;
2873 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2874 int budget_org = budget;
2875
2876 config = &nic->config;
2877 mac_control = &nic->mac_control;
2878
2879 if (unlikely(!is_s2io_card_up(nic)))
2880 return 0;
2881
2882 for (i = 0; i < config->rx_ring_num; i++) {
2883 ring = &mac_control->rings[i];
2884 ring_pkts_processed = rx_intr_handler(ring, budget);
2885 s2io_chk_rx_buffers(nic, ring);
2886 pkts_processed += ring_pkts_processed;
2887 budget -= ring_pkts_processed;
2888 if (budget <= 0)
2889 break;
2890 }
2891 if (pkts_processed < budget_org) {
2892 netif_rx_complete(dev, napi);
2893
2894 writeq(0, &bar0->rx_traffic_mask);
2895 readl(&bar0->rx_traffic_mask);
2896 }
2897 return pkts_processed;
2898}
2899
2900#ifdef CONFIG_NET_POLL_CONTROLLER
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910static void s2io_netpoll(struct net_device *dev)
2911{
2912 struct s2io_nic *nic = dev->priv;
2913 struct mac_info *mac_control;
2914 struct config_param *config;
2915 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2916 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2917 int i;
2918
2919 if (pci_channel_offline(nic->pdev))
2920 return;
2921
2922 disable_irq(dev->irq);
2923
2924 mac_control = &nic->mac_control;
2925 config = &nic->config;
2926
2927 writeq(val64, &bar0->rx_traffic_int);
2928 writeq(val64, &bar0->tx_traffic_int);
2929
2930
2931
2932
2933
2934 for (i = 0; i < config->tx_fifo_num; i++)
2935 tx_intr_handler(&mac_control->fifos[i]);
2936
2937
2938 for (i = 0; i < config->rx_ring_num; i++)
2939 rx_intr_handler(&mac_control->rings[i], 0);
2940
2941 for (i = 0; i < config->rx_ring_num; i++) {
2942 if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
2943 -ENOMEM) {
2944 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2945 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2946 break;
2947 }
2948 }
2949 enable_irq(dev->irq);
2950 return;
2951}
2952#endif
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967static int rx_intr_handler(struct ring_info *ring_data, int budget)
2968{
2969 int get_block, put_block;
2970 struct rx_curr_get_info get_info, put_info;
2971 struct RxD_t *rxdp;
2972 struct sk_buff *skb;
2973 int pkt_cnt = 0, napi_pkts = 0;
2974 int i;
2975 struct RxD1* rxdp1;
2976 struct RxD3* rxdp3;
2977
2978 get_info = ring_data->rx_curr_get_info;
2979 get_block = get_info.block_index;
2980 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2981 put_block = put_info.block_index;
2982 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2983
2984 while (RXD_IS_UP2DT(rxdp)) {
2985
2986
2987
2988
2989 if ((get_block == put_block) &&
2990 (get_info.offset + 1) == put_info.offset) {
2991 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2992 ring_data->dev->name);
2993 break;
2994 }
2995 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2996 if (skb == NULL) {
2997 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2998 ring_data->dev->name);
2999 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3000 return 0;
3001 }
3002 if (ring_data->rxd_mode == RXD_MODE_1) {
3003 rxdp1 = (struct RxD1*)rxdp;
3004 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3005 rxdp1->Buffer0_ptr,
3006 ring_data->mtu +
3007 HEADER_ETHERNET_II_802_3_SIZE +
3008 HEADER_802_2_SIZE +
3009 HEADER_SNAP_SIZE,
3010 PCI_DMA_FROMDEVICE);
3011 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3012 rxdp3 = (struct RxD3*)rxdp;
3013 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3014 rxdp3->Buffer0_ptr,
3015 BUF0_LEN, PCI_DMA_FROMDEVICE);
3016 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3017 rxdp3->Buffer2_ptr,
3018 ring_data->mtu + 4,
3019 PCI_DMA_FROMDEVICE);
3020 }
3021 prefetch(skb->data);
3022 rx_osm_handler(ring_data, rxdp);
3023 get_info.offset++;
3024 ring_data->rx_curr_get_info.offset = get_info.offset;
3025 rxdp = ring_data->rx_blocks[get_block].
3026 rxds[get_info.offset].virt_addr;
3027 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3028 get_info.offset = 0;
3029 ring_data->rx_curr_get_info.offset = get_info.offset;
3030 get_block++;
3031 if (get_block == ring_data->block_count)
3032 get_block = 0;
3033 ring_data->rx_curr_get_info.block_index = get_block;
3034 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3035 }
3036
3037 if (ring_data->nic->config.napi) {
3038 budget--;
3039 napi_pkts++;
3040 if (!budget)
3041 break;
3042 }
3043 pkt_cnt++;
3044 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3045 break;
3046 }
3047 if (ring_data->lro) {
3048
3049 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3050 struct lro *lro = &ring_data->lro0_n[i];
3051 if (lro->in_use) {
3052 update_L3L4_header(ring_data->nic, lro);
3053 queue_rx_frame(lro->parent, lro->vlan_tag);
3054 clear_lro_session(lro);
3055 }
3056 }
3057 }
3058 return(napi_pkts);
3059}
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073static void tx_intr_handler(struct fifo_info *fifo_data)
3074{
3075 struct s2io_nic *nic = fifo_data->nic;
3076 struct tx_curr_get_info get_info, put_info;
3077 struct sk_buff *skb = NULL;
3078 struct TxD *txdlp;
3079 int pkt_cnt = 0;
3080 unsigned long flags = 0;
3081 u8 err_mask;
3082
3083 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3084 return;
3085
3086 get_info = fifo_data->tx_curr_get_info;
3087 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3088 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3089 list_virt_addr;
3090 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3091 (get_info.offset != put_info.offset) &&
3092 (txdlp->Host_Control)) {
3093
3094 if (txdlp->Control_1 & TXD_T_CODE) {
3095 unsigned long long err;
3096 err = txdlp->Control_1 & TXD_T_CODE;
3097 if (err & 0x1) {
3098 nic->mac_control.stats_info->sw_stat.
3099 parity_err_cnt++;
3100 }
3101
3102
3103 err_mask = err >> 48;
3104 switch(err_mask) {
3105 case 2:
3106 nic->mac_control.stats_info->sw_stat.
3107 tx_buf_abort_cnt++;
3108 break;
3109
3110 case 3:
3111 nic->mac_control.stats_info->sw_stat.
3112 tx_desc_abort_cnt++;
3113 break;
3114
3115 case 7:
3116 nic->mac_control.stats_info->sw_stat.
3117 tx_parity_err_cnt++;
3118 break;
3119
3120 case 10:
3121 nic->mac_control.stats_info->sw_stat.
3122 tx_link_loss_cnt++;
3123 break;
3124
3125 case 15:
3126 nic->mac_control.stats_info->sw_stat.
3127 tx_list_proc_err_cnt++;
3128 break;
3129 }
3130 }
3131
3132 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3133 if (skb == NULL) {
3134 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3135 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3136 __func__);
3137 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3138 return;
3139 }
3140 pkt_cnt++;
3141
3142
3143 nic->dev->stats.tx_bytes += skb->len;
3144 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3145 dev_kfree_skb_irq(skb);
3146
3147 get_info.offset++;
3148 if (get_info.offset == get_info.fifo_len + 1)
3149 get_info.offset = 0;
3150 txdlp = (struct TxD *) fifo_data->list_info
3151 [get_info.offset].list_virt_addr;
3152 fifo_data->tx_curr_get_info.offset =
3153 get_info.offset;
3154 }
3155
3156 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3157
3158 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3159}
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3172{
3173 u64 val64 = 0x0;
3174 struct s2io_nic *sp = dev->priv;
3175 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3176
3177
3178 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3179 | MDIO_MMD_DEV_ADDR(mmd_type)
3180 | MDIO_MMS_PRT_ADDR(0x0);
3181 writeq(val64, &bar0->mdio_control);
3182 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3183 writeq(val64, &bar0->mdio_control);
3184 udelay(100);
3185
3186
3187 val64 = 0x0;
3188 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3189 | MDIO_MMD_DEV_ADDR(mmd_type)
3190 | MDIO_MMS_PRT_ADDR(0x0)
3191 | MDIO_MDIO_DATA(value)
3192 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3193 writeq(val64, &bar0->mdio_control);
3194 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3195 writeq(val64, &bar0->mdio_control);
3196 udelay(100);
3197
3198 val64 = 0x0;
3199 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3200 | MDIO_MMD_DEV_ADDR(mmd_type)
3201 | MDIO_MMS_PRT_ADDR(0x0)
3202 | MDIO_OP(MDIO_OP_READ_TRANS);
3203 writeq(val64, &bar0->mdio_control);
3204 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3205 writeq(val64, &bar0->mdio_control);
3206 udelay(100);
3207
3208}
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3220{
3221 u64 val64 = 0x0;
3222 u64 rval64 = 0x0;
3223 struct s2io_nic *sp = dev->priv;
3224 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3225
3226
3227 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3228 | MDIO_MMD_DEV_ADDR(mmd_type)
3229 | MDIO_MMS_PRT_ADDR(0x0);
3230 writeq(val64, &bar0->mdio_control);
3231 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3232 writeq(val64, &bar0->mdio_control);
3233 udelay(100);
3234
3235
3236 val64 = 0x0;
3237 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3238 | MDIO_MMD_DEV_ADDR(mmd_type)
3239 | MDIO_MMS_PRT_ADDR(0x0)
3240 | MDIO_OP(MDIO_OP_READ_TRANS);
3241 writeq(val64, &bar0->mdio_control);
3242 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3243 writeq(val64, &bar0->mdio_control);
3244 udelay(100);
3245
3246
3247 rval64 = readq(&bar0->mdio_control);
3248 rval64 = rval64 & 0xFFFF0000;
3249 rval64 = rval64 >> 16;
3250 return rval64;
3251}
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3263{
3264 u64 mask = 0x3;
3265 u64 val64;
3266 int i;
3267 for(i = 0; i <index; i++)
3268 mask = mask << 0x2;
3269
3270 if(flag > 0)
3271 {
3272 *counter = *counter + 1;
3273 val64 = *regs_stat & mask;
3274 val64 = val64 >> (index * 0x2);
3275 val64 = val64 + 1;
3276 if(val64 == 3)
3277 {
3278 switch(type)
3279 {
3280 case 1:
3281 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3282 "service. Excessive temperatures may "
3283 "result in premature transceiver "
3284 "failure \n");
3285 break;
3286 case 2:
3287 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3288 "service Excessive bias currents may "
3289 "indicate imminent laser diode "
3290 "failure \n");
3291 break;
3292 case 3:
3293 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3294 "service Excessive laser output "
3295 "power may saturate far-end "
3296 "receiver\n");
3297 break;
3298 default:
3299 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3300 "type \n");
3301 }
3302 val64 = 0x0;
3303 }
3304 val64 = val64 << (index * 0x2);
3305 *regs_stat = (*regs_stat & (~mask)) | (val64);
3306
3307 } else {
3308 *regs_stat = *regs_stat & (~mask);
3309 }
3310}
3311
3312
3313
3314
3315
3316
3317
3318
3319static void s2io_updt_xpak_counter(struct net_device *dev)
3320{
3321 u16 flag = 0x0;
3322 u16 type = 0x0;
3323 u16 val16 = 0x0;
3324 u64 val64 = 0x0;
3325 u64 addr = 0x0;
3326
3327 struct s2io_nic *sp = dev->priv;
3328 struct stat_block *stat_info = sp->mac_control.stats_info;
3329
3330
3331 addr = 0x0000;
3332 val64 = 0x0;
3333 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3334 if((val64 == 0xFFFF) || (val64 == 0x0000))
3335 {
3336 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3337 "Returned %llx\n", (unsigned long long)val64);
3338 return;
3339 }
3340
3341
3342 if(val64 != 0x2040)
3343 {
3344 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3345 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3346 (unsigned long long)val64);
3347 return;
3348 }
3349
3350
3351 addr = 0xA100;
3352 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3353 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3354
3355
3356 addr = 0xA070;
3357 val64 = 0x0;
3358 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3359
3360 flag = CHECKBIT(val64, 0x7);
3361 type = 1;
3362 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3363 &stat_info->xpak_stat.xpak_regs_stat,
3364 0x0, flag, type);
3365
3366 if(CHECKBIT(val64, 0x6))
3367 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3368
3369 flag = CHECKBIT(val64, 0x3);
3370 type = 2;
3371 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3372 &stat_info->xpak_stat.xpak_regs_stat,
3373 0x2, flag, type);
3374
3375 if(CHECKBIT(val64, 0x2))
3376 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3377
3378 flag = CHECKBIT(val64, 0x1);
3379 type = 3;
3380 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3381 &stat_info->xpak_stat.xpak_regs_stat,
3382 0x4, flag, type);
3383
3384 if(CHECKBIT(val64, 0x0))
3385 stat_info->xpak_stat.alarm_laser_output_power_low++;
3386
3387
3388 addr = 0xA074;
3389 val64 = 0x0;
3390 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3391
3392 if(CHECKBIT(val64, 0x7))
3393 stat_info->xpak_stat.warn_transceiver_temp_high++;
3394
3395 if(CHECKBIT(val64, 0x6))
3396 stat_info->xpak_stat.warn_transceiver_temp_low++;
3397
3398 if(CHECKBIT(val64, 0x3))
3399 stat_info->xpak_stat.warn_laser_bias_current_high++;
3400
3401 if(CHECKBIT(val64, 0x2))
3402 stat_info->xpak_stat.warn_laser_bias_current_low++;
3403
3404 if(CHECKBIT(val64, 0x1))
3405 stat_info->xpak_stat.warn_laser_output_power_high++;
3406
3407 if(CHECKBIT(val64, 0x0))
3408 stat_info->xpak_stat.warn_laser_output_power_low++;
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3423 int bit_state)
3424{
3425 int ret = FAILURE, cnt = 0, delay = 1;
3426 u64 val64;
3427
3428 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3429 return FAILURE;
3430
3431 do {
3432 val64 = readq(addr);
3433 if (bit_state == S2IO_BIT_RESET) {
3434 if (!(val64 & busy_bit)) {
3435 ret = SUCCESS;
3436 break;
3437 }
3438 } else {
3439 if (!(val64 & busy_bit)) {
3440 ret = SUCCESS;
3441 break;
3442 }
3443 }
3444
3445 if(in_interrupt())
3446 mdelay(delay);
3447 else
3448 msleep(delay);
3449
3450 if (++cnt >= 10)
3451 delay = 50;
3452 } while (cnt < 20);
3453 return ret;
3454}
3455
3456
3457
3458
3459
3460
3461static u16 check_pci_device_id(u16 id)
3462{
3463 switch (id) {
3464 case PCI_DEVICE_ID_HERC_WIN:
3465 case PCI_DEVICE_ID_HERC_UNI:
3466 return XFRAME_II_DEVICE;
3467 case PCI_DEVICE_ID_S2IO_UNI:
3468 case PCI_DEVICE_ID_S2IO_WIN:
3469 return XFRAME_I_DEVICE;
3470 default:
3471 return PCI_ANY_ID;
3472 }
3473}
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485static void s2io_reset(struct s2io_nic * sp)
3486{
3487 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3488 u64 val64;
3489 u16 subid, pci_cmd;
3490 int i;
3491 u16 val16;
3492 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3493 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3494
3495 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3496 __func__, sp->dev->name);
3497
3498
3499 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3500
3501 val64 = SW_RESET_ALL;
3502 writeq(val64, &bar0->sw_reset);
3503 if (strstr(sp->product_name, "CX4")) {
3504 msleep(750);
3505 }
3506 msleep(250);
3507 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3508
3509
3510 pci_restore_state(sp->pdev);
3511 pci_read_config_word(sp->pdev, 0x2, &val16);
3512 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3513 break;
3514 msleep(200);
3515 }
3516
3517 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3518 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3519 }
3520
3521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3522
3523 s2io_init_pci(sp);
3524
3525
3526 s2io_set_swapper(sp);
3527
3528
3529 do_s2io_restore_unicast_mc(sp);
3530
3531
3532 restore_xmsi_data(sp);
3533
3534
3535 if (sp->device_type == XFRAME_II_DEVICE) {
3536
3537 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3538
3539
3540 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3541
3542
3543 writeq(s2BIT(62), &bar0->txpic_int_reg);
3544 }
3545
3546
3547 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3548
3549 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3550 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3551 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3552 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3553 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3554 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3555 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3556 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3557
3558 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3559
3560 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3561 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3562 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3563 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3564 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3565 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3566 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3567 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3568
3569
3570 subid = sp->pdev->subsystem_device;
3571 if (((subid & 0xFF) >= 0x07) &&
3572 (sp->device_type == XFRAME_I_DEVICE)) {
3573 val64 = readq(&bar0->gpio_control);
3574 val64 |= 0x0000800000000000ULL;
3575 writeq(val64, &bar0->gpio_control);
3576 val64 = 0x0411040400000000ULL;
3577 writeq(val64, (void __iomem *)bar0 + 0x2700);
3578 }
3579
3580
3581
3582
3583
3584 if (sp->device_type == XFRAME_II_DEVICE) {
3585 val64 = readq(&bar0->pcc_err_reg);
3586 writeq(val64, &bar0->pcc_err_reg);
3587 }
3588
3589 sp->device_enabled_once = FALSE;
3590}
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602static int s2io_set_swapper(struct s2io_nic * sp)
3603{
3604 struct net_device *dev = sp->dev;
3605 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3606 u64 val64, valt, valr;
3607
3608
3609
3610
3611
3612
3613 val64 = readq(&bar0->pif_rd_swapper_fb);
3614 if (val64 != 0x0123456789ABCDEFULL) {
3615 int i = 0;
3616 u64 value[] = { 0xC30000C3C30000C3ULL,
3617 0x8100008181000081ULL,
3618 0x4200004242000042ULL,
3619 0};
3620
3621 while(i<4) {
3622 writeq(value[i], &bar0->swapper_ctrl);
3623 val64 = readq(&bar0->pif_rd_swapper_fb);
3624 if (val64 == 0x0123456789ABCDEFULL)
3625 break;
3626 i++;
3627 }
3628 if (i == 4) {
3629 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3630 dev->name);
3631 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3632 (unsigned long long) val64);
3633 return FAILURE;
3634 }
3635 valr = value[i];
3636 } else {
3637 valr = readq(&bar0->swapper_ctrl);
3638 }
3639
3640 valt = 0x0123456789ABCDEFULL;
3641 writeq(valt, &bar0->xmsi_address);
3642 val64 = readq(&bar0->xmsi_address);
3643
3644 if(val64 != valt) {
3645 int i = 0;
3646 u64 value[] = { 0x00C3C30000C3C300ULL,
3647 0x0081810000818100ULL,
3648 0x0042420000424200ULL,
3649 0};
3650
3651 while(i<4) {
3652 writeq((value[i] | valr), &bar0->swapper_ctrl);
3653 writeq(valt, &bar0->xmsi_address);
3654 val64 = readq(&bar0->xmsi_address);
3655 if(val64 == valt)
3656 break;
3657 i++;
3658 }
3659 if(i == 4) {
3660 unsigned long long x = val64;
3661 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3662 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3663 return FAILURE;
3664 }
3665 }
3666 val64 = readq(&bar0->swapper_ctrl);
3667 val64 &= 0xFFFF000000000000ULL;
3668
3669#ifdef __BIG_ENDIAN
3670
3671
3672
3673
3674 val64 |= (SWAPPER_CTRL_TXP_FE |
3675 SWAPPER_CTRL_TXP_SE |
3676 SWAPPER_CTRL_TXD_R_FE |
3677 SWAPPER_CTRL_TXD_W_FE |
3678 SWAPPER_CTRL_TXF_R_FE |
3679 SWAPPER_CTRL_RXD_R_FE |
3680 SWAPPER_CTRL_RXD_W_FE |
3681 SWAPPER_CTRL_RXF_W_FE |
3682 SWAPPER_CTRL_XMSI_FE |
3683 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3684 if (sp->config.intr_type == INTA)
3685 val64 |= SWAPPER_CTRL_XMSI_SE;
3686 writeq(val64, &bar0->swapper_ctrl);
3687#else
3688
3689
3690
3691
3692
3693 val64 |= (SWAPPER_CTRL_TXP_FE |
3694 SWAPPER_CTRL_TXP_SE |
3695 SWAPPER_CTRL_TXD_R_FE |
3696 SWAPPER_CTRL_TXD_R_SE |
3697 SWAPPER_CTRL_TXD_W_FE |
3698 SWAPPER_CTRL_TXD_W_SE |
3699 SWAPPER_CTRL_TXF_R_FE |
3700 SWAPPER_CTRL_RXD_R_FE |
3701 SWAPPER_CTRL_RXD_R_SE |
3702 SWAPPER_CTRL_RXD_W_FE |
3703 SWAPPER_CTRL_RXD_W_SE |
3704 SWAPPER_CTRL_RXF_W_FE |
3705 SWAPPER_CTRL_XMSI_FE |
3706 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3707 if (sp->config.intr_type == INTA)
3708 val64 |= SWAPPER_CTRL_XMSI_SE;
3709 writeq(val64, &bar0->swapper_ctrl);
3710#endif
3711 val64 = readq(&bar0->swapper_ctrl);
3712
3713
3714
3715
3716
3717 val64 = readq(&bar0->pif_rd_swapper_fb);
3718 if (val64 != 0x0123456789ABCDEFULL) {
3719
3720 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3721 dev->name);
3722 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3723 (unsigned long long) val64);
3724 return FAILURE;
3725 }
3726
3727 return SUCCESS;
3728}
3729
3730static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3731{
3732 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3733 u64 val64;
3734 int ret = 0, cnt = 0;
3735
3736 do {
3737 val64 = readq(&bar0->xmsi_access);
3738 if (!(val64 & s2BIT(15)))
3739 break;
3740 mdelay(1);
3741 cnt++;
3742 } while(cnt < 5);
3743 if (cnt == 5) {
3744 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3745 ret = 1;
3746 }
3747
3748 return ret;
3749}
3750
3751static void restore_xmsi_data(struct s2io_nic *nic)
3752{
3753 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3754 u64 val64;
3755 int i, msix_index;
3756
3757
3758 if (nic->device_type == XFRAME_I_DEVICE)
3759 return;
3760
3761 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3762 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3763 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3764 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3765 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3766 writeq(val64, &bar0->xmsi_access);
3767 if (wait_for_msix_trans(nic, msix_index)) {
3768 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3769 continue;
3770 }
3771 }
3772}
3773
3774static void store_xmsi_data(struct s2io_nic *nic)
3775{
3776 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3777 u64 val64, addr, data;
3778 int i, msix_index;
3779
3780 if (nic->device_type == XFRAME_I_DEVICE)
3781 return;
3782
3783
3784 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3785 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3786 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3787 writeq(val64, &bar0->xmsi_access);
3788 if (wait_for_msix_trans(nic, msix_index)) {
3789 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3790 continue;
3791 }
3792 addr = readq(&bar0->xmsi_address);
3793 data = readq(&bar0->xmsi_data);
3794 if (addr && data) {
3795 nic->msix_info[i].addr = addr;
3796 nic->msix_info[i].data = data;
3797 }
3798 }
3799}
3800
3801static int s2io_enable_msi_x(struct s2io_nic *nic)
3802{
3803 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3804 u64 rx_mat;
3805 u16 msi_control;
3806 int ret, i, j, msix_indx = 1;
3807
3808 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3809 GFP_KERNEL);
3810 if (!nic->entries) {
3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3812 __func__);
3813 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3814 return -ENOMEM;
3815 }
3816 nic->mac_control.stats_info->sw_stat.mem_allocated
3817 += (nic->num_entries * sizeof(struct msix_entry));
3818
3819 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3820
3821 nic->s2io_entries =
3822 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3823 GFP_KERNEL);
3824 if (!nic->s2io_entries) {
3825 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3826 __func__);
3827 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3828 kfree(nic->entries);
3829 nic->mac_control.stats_info->sw_stat.mem_freed
3830 += (nic->num_entries * sizeof(struct msix_entry));
3831 return -ENOMEM;
3832 }
3833 nic->mac_control.stats_info->sw_stat.mem_allocated
3834 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3835 memset(nic->s2io_entries, 0,
3836 nic->num_entries * sizeof(struct s2io_msix_entry));
3837
3838 nic->entries[0].entry = 0;
3839 nic->s2io_entries[0].entry = 0;
3840 nic->s2io_entries[0].in_use = MSIX_FLG;
3841 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3842 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3843
3844 for (i = 1; i < nic->num_entries; i++) {
3845 nic->entries[i].entry = ((i - 1) * 8) + 1;
3846 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3847 nic->s2io_entries[i].arg = NULL;
3848 nic->s2io_entries[i].in_use = 0;
3849 }
3850
3851 rx_mat = readq(&bar0->rx_mat);
3852 for (j = 0; j < nic->config.rx_ring_num; j++) {
3853 rx_mat |= RX_MAT_SET(j, msix_indx);
3854 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3855 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3856 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3857 msix_indx += 8;
3858 }
3859 writeq(rx_mat, &bar0->rx_mat);
3860 readq(&bar0->rx_mat);
3861
3862 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3863
3864 if (ret) {
3865 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3866 kfree(nic->entries);
3867 nic->mac_control.stats_info->sw_stat.mem_freed
3868 += (nic->num_entries * sizeof(struct msix_entry));
3869 kfree(nic->s2io_entries);
3870 nic->mac_control.stats_info->sw_stat.mem_freed
3871 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3872 nic->entries = NULL;
3873 nic->s2io_entries = NULL;
3874 return -ENOMEM;
3875 }
3876
3877
3878
3879
3880
3881 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3882 msi_control |= 0x1;
3883 pci_write_config_word(nic->pdev, 0x42, msi_control);
3884
3885 return 0;
3886}
3887
3888
3889static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3890{
3891 struct s2io_nic *sp = dev_id;
3892
3893 sp->msi_detected = 1;
3894 wake_up(&sp->msi_wait);
3895
3896 return IRQ_HANDLED;
3897}
3898
3899
3900static int s2io_test_msi(struct s2io_nic *sp)
3901{
3902 struct pci_dev *pdev = sp->pdev;
3903 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3904 int err;
3905 u64 val64, saved64;
3906
3907 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3908 sp->name, sp);
3909 if (err) {
3910 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3911 sp->dev->name, pci_name(pdev), pdev->irq);
3912 return err;
3913 }
3914
3915 init_waitqueue_head (&sp->msi_wait);
3916 sp->msi_detected = 0;
3917
3918 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3919 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3920 val64 |= SCHED_INT_CTRL_TIMER_EN;
3921 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3922 writeq(val64, &bar0->scheduled_int_ctrl);
3923
3924 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3925
3926 if (!sp->msi_detected) {
3927
3928 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3929 "using MSI(X) during test\n", sp->dev->name,
3930 pci_name(pdev));
3931
3932 err = -EOPNOTSUPP;
3933 }
3934
3935 free_irq(sp->entries[1].vector, sp);
3936
3937 writeq(saved64, &bar0->scheduled_int_ctrl);
3938
3939 return err;
3940}
3941
3942static void remove_msix_isr(struct s2io_nic *sp)
3943{
3944 int i;
3945 u16 msi_control;
3946
3947 for (i = 0; i < sp->num_entries; i++) {
3948 if (sp->s2io_entries[i].in_use ==
3949 MSIX_REGISTERED_SUCCESS) {
3950 int vector = sp->entries[i].vector;
3951 void *arg = sp->s2io_entries[i].arg;
3952 free_irq(vector, arg);
3953 }
3954 }
3955
3956 kfree(sp->entries);
3957 kfree(sp->s2io_entries);
3958 sp->entries = NULL;
3959 sp->s2io_entries = NULL;
3960
3961 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3962 msi_control &= 0xFFFE;
3963 pci_write_config_word(sp->pdev, 0x42, msi_control);
3964
3965 pci_disable_msix(sp->pdev);
3966}
3967
3968static void remove_inta_isr(struct s2io_nic *sp)
3969{
3970 struct net_device *dev = sp->dev;
3971
3972 free_irq(sp->pdev->irq, dev);
3973}
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991static int s2io_open(struct net_device *dev)
3992{
3993 struct s2io_nic *sp = dev->priv;
3994 int err = 0;
3995
3996
3997
3998
3999
4000 netif_carrier_off(dev);
4001 sp->last_link_state = 0;
4002
4003
4004 err = s2io_card_up(sp);
4005 if (err) {
4006 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4007 dev->name);
4008 goto hw_init_failed;
4009 }
4010
4011 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4012 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4013 s2io_card_down(sp);
4014 err = -ENODEV;
4015 goto hw_init_failed;
4016 }
4017 s2io_start_all_tx_queue(sp);
4018 return 0;
4019
4020hw_init_failed:
4021 if (sp->config.intr_type == MSI_X) {
4022 if (sp->entries) {
4023 kfree(sp->entries);
4024 sp->mac_control.stats_info->sw_stat.mem_freed
4025 += (sp->num_entries * sizeof(struct msix_entry));
4026 }
4027 if (sp->s2io_entries) {
4028 kfree(sp->s2io_entries);
4029 sp->mac_control.stats_info->sw_stat.mem_freed
4030 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4031 }
4032 }
4033 return err;
4034}
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049static int s2io_close(struct net_device *dev)
4050{
4051 struct s2io_nic *sp = dev->priv;
4052 struct config_param *config = &sp->config;
4053 u64 tmp64;
4054 int offset;
4055
4056
4057
4058
4059 if (!is_s2io_card_up(sp))
4060 return 0;
4061
4062 s2io_stop_all_tx_queue(sp);
4063
4064 for (offset = 1; offset < config->max_mc_addr; offset++) {
4065 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4066 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4067 do_s2io_delete_unicast_mc(sp, tmp64);
4068 }
4069
4070 s2io_card_down(sp);
4071
4072 return 0;
4073}
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4089{
4090 struct s2io_nic *sp = dev->priv;
4091 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4092 register u64 val64;
4093 struct TxD *txdp;
4094 struct TxFIFO_element __iomem *tx_fifo;
4095 unsigned long flags = 0;
4096 u16 vlan_tag = 0;
4097 struct fifo_info *fifo = NULL;
4098 struct mac_info *mac_control;
4099 struct config_param *config;
4100 int do_spin_lock = 1;
4101 int offload_type;
4102 int enable_per_list_interrupt = 0;
4103 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4104
4105 mac_control = &sp->mac_control;
4106 config = &sp->config;
4107
4108 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4109
4110 if (unlikely(skb->len <= 0)) {
4111 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4112 dev_kfree_skb_any(skb);
4113 return 0;
4114 }
4115
4116 if (!is_s2io_card_up(sp)) {
4117 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4118 dev->name);
4119 dev_kfree_skb(skb);
4120 return 0;
4121 }
4122
4123 queue = 0;
4124 if (sp->vlgrp && vlan_tx_tag_present(skb))
4125 vlan_tag = vlan_tx_tag_get(skb);
4126 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4127 if (skb->protocol == htons(ETH_P_IP)) {
4128 struct iphdr *ip;
4129 struct tcphdr *th;
4130 ip = ip_hdr(skb);
4131
4132 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4133 th = (struct tcphdr *)(((unsigned char *)ip) +
4134 ip->ihl*4);
4135
4136 if (ip->protocol == IPPROTO_TCP) {
4137 queue_len = sp->total_tcp_fifos;
4138 queue = (ntohs(th->source) +
4139 ntohs(th->dest)) &
4140 sp->fifo_selector[queue_len - 1];
4141 if (queue >= queue_len)
4142 queue = queue_len - 1;
4143 } else if (ip->protocol == IPPROTO_UDP) {
4144 queue_len = sp->total_udp_fifos;
4145 queue = (ntohs(th->source) +
4146 ntohs(th->dest)) &
4147 sp->fifo_selector[queue_len - 1];
4148 if (queue >= queue_len)
4149 queue = queue_len - 1;
4150 queue += sp->udp_fifo_idx;
4151 if (skb->len > 1024)
4152 enable_per_list_interrupt = 1;
4153 do_spin_lock = 0;
4154 }
4155 }
4156 }
4157 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4158
4159 queue = config->fifo_mapping
4160 [skb->priority & (MAX_TX_FIFOS - 1)];
4161 fifo = &mac_control->fifos[queue];
4162
4163 if (do_spin_lock)
4164 spin_lock_irqsave(&fifo->tx_lock, flags);
4165 else {
4166 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4167 return NETDEV_TX_LOCKED;
4168 }
4169
4170 if (sp->config.multiq) {
4171 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4172 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4173 return NETDEV_TX_BUSY;
4174 }
4175 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4176 if (netif_queue_stopped(dev)) {
4177 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4178 return NETDEV_TX_BUSY;
4179 }
4180 }
4181
4182 put_off = (u16) fifo->tx_curr_put_info.offset;
4183 get_off = (u16) fifo->tx_curr_get_info.offset;
4184 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4185
4186 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4187
4188 if (txdp->Host_Control ||
4189 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4190 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4191 s2io_stop_tx_queue(sp, fifo->fifo_no);
4192 dev_kfree_skb(skb);
4193 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4194 return 0;
4195 }
4196
4197 offload_type = s2io_offload_type(skb);
4198 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4199 txdp->Control_1 |= TXD_TCP_LSO_EN;
4200 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4201 }
4202 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4203 txdp->Control_2 |=
4204 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4205 TXD_TX_CKO_UDP_EN);
4206 }
4207 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4208 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4209 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4210 if (enable_per_list_interrupt)
4211 if (put_off & (queue_len >> 5))
4212 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4213 if (vlan_tag) {
4214 txdp->Control_2 |= TXD_VLAN_ENABLE;
4215 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4216 }
4217
4218 frg_len = skb->len - skb->data_len;
4219 if (offload_type == SKB_GSO_UDP) {
4220 int ufo_size;
4221
4222 ufo_size = s2io_udp_mss(skb);
4223 ufo_size &= ~7;
4224 txdp->Control_1 |= TXD_UFO_EN;
4225 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4226 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4227#ifdef __BIG_ENDIAN
4228
4229 fifo->ufo_in_band_v[put_off] =
4230 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4231#else
4232 fifo->ufo_in_band_v[put_off] =
4233 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4234#endif
4235 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4236 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4237 fifo->ufo_in_band_v,
4238 sizeof(u64), PCI_DMA_TODEVICE);
4239 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4240 goto pci_map_failed;
4241 txdp++;
4242 }
4243
4244 txdp->Buffer_Pointer = pci_map_single
4245 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4246 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4247 goto pci_map_failed;
4248
4249 txdp->Host_Control = (unsigned long) skb;
4250 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4251 if (offload_type == SKB_GSO_UDP)
4252 txdp->Control_1 |= TXD_UFO_EN;
4253
4254 frg_cnt = skb_shinfo(skb)->nr_frags;
4255
4256 for (i = 0; i < frg_cnt; i++) {
4257 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4258
4259 if (!frag->size)
4260 continue;
4261 txdp++;
4262 txdp->Buffer_Pointer = (u64) pci_map_page
4263 (sp->pdev, frag->page, frag->page_offset,
4264 frag->size, PCI_DMA_TODEVICE);
4265 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4266 if (offload_type == SKB_GSO_UDP)
4267 txdp->Control_1 |= TXD_UFO_EN;
4268 }
4269 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4270
4271 if (offload_type == SKB_GSO_UDP)
4272 frg_cnt++;
4273
4274 tx_fifo = mac_control->tx_FIFO_start[queue];
4275 val64 = fifo->list_info[put_off].list_phy_addr;
4276 writeq(val64, &tx_fifo->TxDL_Pointer);
4277
4278 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4279 TX_FIFO_LAST_LIST);
4280 if (offload_type)
4281 val64 |= TX_FIFO_SPECIAL_FUNC;
4282
4283 writeq(val64, &tx_fifo->List_Control);
4284
4285 mmiowb();
4286
4287 put_off++;
4288 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4289 put_off = 0;
4290 fifo->tx_curr_put_info.offset = put_off;
4291
4292
4293 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4294 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4295 DBG_PRINT(TX_DBG,
4296 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4297 put_off, get_off);
4298 s2io_stop_tx_queue(sp, fifo->fifo_no);
4299 }
4300 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4301 dev->trans_start = jiffies;
4302 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4303
4304 if (sp->config.intr_type == MSI_X)
4305 tx_intr_handler(fifo);
4306
4307 return 0;
4308pci_map_failed:
4309 stats->pci_map_fail_cnt++;
4310 s2io_stop_tx_queue(sp, fifo->fifo_no);
4311 stats->mem_freed += skb->truesize;
4312 dev_kfree_skb(skb);
4313 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4314 return 0;
4315}
4316
4317static void
4318s2io_alarm_handle(unsigned long data)
4319{
4320 struct s2io_nic *sp = (struct s2io_nic *)data;
4321 struct net_device *dev = sp->dev;
4322
4323 s2io_handle_errors(dev);
4324 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4325}
4326
4327static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4328{
4329 struct ring_info *ring = (struct ring_info *)dev_id;
4330 struct s2io_nic *sp = ring->nic;
4331 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4332 struct net_device *dev = sp->dev;
4333
4334 if (unlikely(!is_s2io_card_up(sp)))
4335 return IRQ_HANDLED;
4336
4337 if (sp->config.napi) {
4338 u8 __iomem *addr = NULL;
4339 u8 val8 = 0;
4340
4341 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4342 addr += (7 - ring->ring_no);
4343 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4344 writeb(val8, addr);
4345 val8 = readb(addr);
4346 netif_rx_schedule(dev, &ring->napi);
4347 } else {
4348 rx_intr_handler(ring, 0);
4349 s2io_chk_rx_buffers(sp, ring);
4350 }
4351
4352 return IRQ_HANDLED;
4353}
4354
4355static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4356{
4357 int i;
4358 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4359 struct s2io_nic *sp = fifos->nic;
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361 struct config_param *config = &sp->config;
4362 u64 reason;
4363
4364 if (unlikely(!is_s2io_card_up(sp)))
4365 return IRQ_NONE;
4366
4367 reason = readq(&bar0->general_int_status);
4368 if (unlikely(reason == S2IO_MINUS_ONE))
4369
4370 return IRQ_HANDLED;
4371
4372 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4373 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4374
4375 if (reason & GEN_INTR_TXPIC)
4376 s2io_txpic_intr_handle(sp);
4377
4378 if (reason & GEN_INTR_TXTRAFFIC)
4379 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4380
4381 for (i = 0; i < config->tx_fifo_num; i++)
4382 tx_intr_handler(&fifos[i]);
4383
4384 writeq(sp->general_int_mask, &bar0->general_int_mask);
4385 readl(&bar0->general_int_status);
4386 return IRQ_HANDLED;
4387 }
4388
4389 return IRQ_NONE;
4390}
4391
4392static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4393{
4394 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4395 u64 val64;
4396
4397 val64 = readq(&bar0->pic_int_status);
4398 if (val64 & PIC_INT_GPIO) {
4399 val64 = readq(&bar0->gpio_int_reg);
4400 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4401 (val64 & GPIO_INT_REG_LINK_UP)) {
4402
4403
4404
4405
4406 val64 |= GPIO_INT_REG_LINK_DOWN;
4407 val64 |= GPIO_INT_REG_LINK_UP;
4408 writeq(val64, &bar0->gpio_int_reg);
4409 val64 = readq(&bar0->gpio_int_mask);
4410 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4411 GPIO_INT_MASK_LINK_DOWN);
4412 writeq(val64, &bar0->gpio_int_mask);
4413 }
4414 else if (val64 & GPIO_INT_REG_LINK_UP) {
4415 val64 = readq(&bar0->adapter_status);
4416
4417 val64 = readq(&bar0->adapter_control);
4418 val64 |= ADAPTER_CNTL_EN;
4419 writeq(val64, &bar0->adapter_control);
4420 val64 |= ADAPTER_LED_ON;
4421 writeq(val64, &bar0->adapter_control);
4422 if (!sp->device_enabled_once)
4423 sp->device_enabled_once = 1;
4424
4425 s2io_link(sp, LINK_UP);
4426
4427
4428
4429
4430 val64 = readq(&bar0->gpio_int_mask);
4431 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4432 val64 |= GPIO_INT_MASK_LINK_UP;
4433 writeq(val64, &bar0->gpio_int_mask);
4434
4435 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4436 val64 = readq(&bar0->adapter_status);
4437 s2io_link(sp, LINK_DOWN);
4438
4439 val64 = readq(&bar0->gpio_int_mask);
4440 val64 &= ~GPIO_INT_MASK_LINK_UP;
4441 val64 |= GPIO_INT_MASK_LINK_DOWN;
4442 writeq(val64, &bar0->gpio_int_mask);
4443
4444
4445 val64 = readq(&bar0->adapter_control);
4446 val64 = val64 &(~ADAPTER_LED_ON);
4447 writeq(val64, &bar0->adapter_control);
4448 }
4449 }
4450 val64 = readq(&bar0->gpio_int_mask);
4451}
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4464 unsigned long long *cnt)
4465{
4466 u64 val64;
4467 val64 = readq(addr);
4468 if ( val64 & value ) {
4469 writeq(val64, addr);
4470 (*cnt)++;
4471 return 1;
4472 }
4473 return 0;
4474
4475}
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485static void s2io_handle_errors(void * dev_id)
4486{
4487 struct net_device *dev = (struct net_device *) dev_id;
4488 struct s2io_nic *sp = dev->priv;
4489 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4490 u64 temp64 = 0,val64=0;
4491 int i = 0;
4492
4493 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4494 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4495
4496 if (!is_s2io_card_up(sp))
4497 return;
4498
4499 if (pci_channel_offline(sp->pdev))
4500 return;
4501
4502 memset(&sw_stat->ring_full_cnt, 0,
4503 sizeof(sw_stat->ring_full_cnt));
4504
4505
4506 if(stats->xpak_timer_count < 72000) {
4507
4508 stats->xpak_timer_count++;
4509 } else {
4510 s2io_updt_xpak_counter(dev);
4511
4512 stats->xpak_timer_count = 0;
4513 }
4514
4515
4516 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4517 val64 = readq(&bar0->mac_rmac_err_reg);
4518 writeq(val64, &bar0->mac_rmac_err_reg);
4519 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4520 schedule_work(&sp->set_link_task);
4521 }
4522
4523
4524 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4525 &sw_stat->serious_err_cnt))
4526 goto reset;
4527
4528
4529 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4530 &sw_stat->parity_err_cnt))
4531 goto reset;
4532
4533
4534 if (sp->device_type == XFRAME_II_DEVICE) {
4535 val64 = readq(&bar0->ring_bump_counter1);
4536 for (i=0; i<4; i++) {
4537 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4538 temp64 >>= 64 - ((i+1)*16);
4539 sw_stat->ring_full_cnt[i] += temp64;
4540 }
4541
4542 val64 = readq(&bar0->ring_bump_counter2);
4543 for (i=0; i<4; i++) {
4544 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4545 temp64 >>= 64 - ((i+1)*16);
4546 sw_stat->ring_full_cnt[i+4] += temp64;
4547 }
4548 }
4549
4550 val64 = readq(&bar0->txdma_int_status);
4551
4552 if (val64 & TXDMA_PFC_INT) {
4553 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4554 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4555 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4556 &sw_stat->pfc_err_cnt))
4557 goto reset;
4558 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4559 &sw_stat->pfc_err_cnt);
4560 }
4561
4562
4563 if (val64 & TXDMA_TDA_INT) {
4564 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4565 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4566 &sw_stat->tda_err_cnt))
4567 goto reset;
4568 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4569 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4570 }
4571
4572 if (val64 & TXDMA_PCC_INT) {
4573 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4574 | PCC_N_SERR | PCC_6_COF_OV_ERR
4575 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4576 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4577 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4578 &sw_stat->pcc_err_cnt))
4579 goto reset;
4580 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4581 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4582 }
4583
4584
4585 if (val64 & TXDMA_TTI_INT) {
4586 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4587 &sw_stat->tti_err_cnt))
4588 goto reset;
4589 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4590 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4591 }
4592
4593
4594 if (val64 & TXDMA_LSO_INT) {
4595 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4596 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4597 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4598 goto reset;
4599 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4600 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4601 }
4602
4603
4604 if (val64 & TXDMA_TPA_INT) {
4605 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4606 &sw_stat->tpa_err_cnt))
4607 goto reset;
4608 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4609 &sw_stat->tpa_err_cnt);
4610 }
4611
4612
4613 if (val64 & TXDMA_SM_INT) {
4614 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4615 &sw_stat->sm_err_cnt))
4616 goto reset;
4617 }
4618
4619 val64 = readq(&bar0->mac_int_status);
4620 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4621 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4622 &bar0->mac_tmac_err_reg,
4623 &sw_stat->mac_tmac_err_cnt))
4624 goto reset;
4625 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4626 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4627 &bar0->mac_tmac_err_reg,
4628 &sw_stat->mac_tmac_err_cnt);
4629 }
4630
4631 val64 = readq(&bar0->xgxs_int_status);
4632 if (val64 & XGXS_INT_STATUS_TXGXS) {
4633 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4634 &bar0->xgxs_txgxs_err_reg,
4635 &sw_stat->xgxs_txgxs_err_cnt))
4636 goto reset;
4637 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4638 &bar0->xgxs_txgxs_err_reg,
4639 &sw_stat->xgxs_txgxs_err_cnt);
4640 }
4641
4642 val64 = readq(&bar0->rxdma_int_status);
4643 if (val64 & RXDMA_INT_RC_INT_M) {
4644 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4645 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4646 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4647 goto reset;
4648 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4649 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4650 &sw_stat->rc_err_cnt);
4651 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4652 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4653 &sw_stat->prc_pcix_err_cnt))
4654 goto reset;
4655 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4656 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4657 &sw_stat->prc_pcix_err_cnt);
4658 }
4659
4660 if (val64 & RXDMA_INT_RPA_INT_M) {
4661 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4662 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4663 goto reset;
4664 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4665 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4666 }
4667
4668 if (val64 & RXDMA_INT_RDA_INT_M) {
4669 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4670 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4671 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4672 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4673 goto reset;
4674 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4675 | RDA_MISC_ERR | RDA_PCIX_ERR,
4676 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4677 }
4678
4679 if (val64 & RXDMA_INT_RTI_INT_M) {
4680 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4681 &sw_stat->rti_err_cnt))
4682 goto reset;
4683 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4684 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4685 }
4686
4687 val64 = readq(&bar0->mac_int_status);
4688 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4689 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4690 &bar0->mac_rmac_err_reg,
4691 &sw_stat->mac_rmac_err_cnt))
4692 goto reset;
4693 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4694 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4695 &sw_stat->mac_rmac_err_cnt);
4696 }
4697
4698 val64 = readq(&bar0->xgxs_int_status);
4699 if (val64 & XGXS_INT_STATUS_RXGXS) {
4700 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4701 &bar0->xgxs_rxgxs_err_reg,
4702 &sw_stat->xgxs_rxgxs_err_cnt))
4703 goto reset;
4704 }
4705
4706 val64 = readq(&bar0->mc_int_status);
4707 if(val64 & MC_INT_STATUS_MC_INT) {
4708 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4709 &sw_stat->mc_err_cnt))
4710 goto reset;
4711
4712
4713 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4714 writeq(val64, &bar0->mc_err_reg);
4715 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4716 sw_stat->double_ecc_errs++;
4717 if (sp->device_type != XFRAME_II_DEVICE) {
4718
4719
4720
4721 if (val64 &
4722 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4723 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4724 goto reset;
4725 }
4726 } else
4727 sw_stat->single_ecc_errs++;
4728 }
4729 }
4730 return;
4731
4732reset:
4733 s2io_stop_all_tx_queue(sp);
4734 schedule_work(&sp->rst_timer_task);
4735 sw_stat->soft_reset_cnt++;
4736 return;
4737}
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752static irqreturn_t s2io_isr(int irq, void *dev_id)
4753{
4754 struct net_device *dev = (struct net_device *) dev_id;
4755 struct s2io_nic *sp = dev->priv;
4756 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757 int i;
4758 u64 reason = 0;
4759 struct mac_info *mac_control;
4760 struct config_param *config;
4761
4762
4763 if (pci_channel_offline(sp->pdev))
4764 return IRQ_NONE;
4765
4766 if (!is_s2io_card_up(sp))
4767 return IRQ_NONE;
4768
4769 mac_control = &sp->mac_control;
4770 config = &sp->config;
4771
4772
4773
4774
4775
4776
4777
4778
4779 reason = readq(&bar0->general_int_status);
4780
4781 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4782
4783 return IRQ_HANDLED;
4784 }
4785
4786 if (reason & (GEN_INTR_RXTRAFFIC |
4787 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4788 {
4789 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4790
4791 if (config->napi) {
4792 if (reason & GEN_INTR_RXTRAFFIC) {
4793 netif_rx_schedule(dev, &sp->napi);
4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4795 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4796 readl(&bar0->rx_traffic_int);
4797 }
4798 } else {
4799
4800
4801
4802
4803
4804 if (reason & GEN_INTR_RXTRAFFIC)
4805 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4806
4807 for (i = 0; i < config->rx_ring_num; i++)
4808 rx_intr_handler(&mac_control->rings[i], 0);
4809 }
4810
4811
4812
4813
4814
4815
4816 if (reason & GEN_INTR_TXTRAFFIC)
4817 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4818
4819 for (i = 0; i < config->tx_fifo_num; i++)
4820 tx_intr_handler(&mac_control->fifos[i]);
4821
4822 if (reason & GEN_INTR_TXPIC)
4823 s2io_txpic_intr_handle(sp);
4824
4825
4826
4827
4828 if (!config->napi) {
4829 for (i = 0; i < config->rx_ring_num; i++)
4830 s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
4831 }
4832 writeq(sp->general_int_mask, &bar0->general_int_mask);
4833 readl(&bar0->general_int_status);
4834
4835 return IRQ_HANDLED;
4836
4837 }
4838 else if (!reason) {
4839
4840 return IRQ_NONE;
4841 }
4842
4843 return IRQ_HANDLED;
4844}
4845
4846
4847
4848
4849static void s2io_updt_stats(struct s2io_nic *sp)
4850{
4851 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4852 u64 val64;
4853 int cnt = 0;
4854
4855 if (is_s2io_card_up(sp)) {
4856
4857 val64 = SET_UPDT_CLICKS(10) |
4858 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4859 writeq(val64, &bar0->stat_cfg);
4860 do {
4861 udelay(100);
4862 val64 = readq(&bar0->stat_cfg);
4863 if (!(val64 & s2BIT(0)))
4864 break;
4865 cnt++;
4866 if (cnt == 5)
4867 break;
4868 } while(1);
4869 }
4870}
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4883{
4884 struct s2io_nic *sp = dev->priv;
4885 struct mac_info *mac_control;
4886 struct config_param *config;
4887 int i;
4888
4889
4890 mac_control = &sp->mac_control;
4891 config = &sp->config;
4892
4893
4894 s2io_updt_stats(sp);
4895
4896
4897
4898 dev->stats.tx_packets +=
4899 le32_to_cpu(mac_control->stats_info->tmac_frms) -
4900 sp->stats.tx_packets;
4901 sp->stats.tx_packets =
4902 le32_to_cpu(mac_control->stats_info->tmac_frms);
4903 dev->stats.tx_errors +=
4904 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4905 sp->stats.tx_errors;
4906 sp->stats.tx_errors =
4907 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4908 dev->stats.rx_errors +=
4909 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4910 sp->stats.rx_errors;
4911 sp->stats.rx_errors =
4912 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4913 dev->stats.multicast =
4914 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4915 sp->stats.multicast;
4916 sp->stats.multicast =
4917 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4918 dev->stats.rx_length_errors =
4919 le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4920 sp->stats.rx_length_errors;
4921 sp->stats.rx_length_errors =
4922 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4923
4924
4925 dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4926 for (i = 0; i < config->rx_ring_num; i++) {
4927 dev->stats.rx_packets += mac_control->rings[i].rx_packets;
4928 dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4929 }
4930
4931 return (&dev->stats);
4932}
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947static void s2io_set_multicast(struct net_device *dev)
4948{
4949 int i, j, prev_cnt;
4950 struct dev_mc_list *mclist;
4951 struct s2io_nic *sp = dev->priv;
4952 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4953 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4954 0xfeffffffffffULL;
4955 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4956 void __iomem *add;
4957 struct config_param *config = &sp->config;
4958
4959 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4960
4961 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4962 &bar0->rmac_addr_data0_mem);
4963 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4964 &bar0->rmac_addr_data1_mem);
4965 val64 = RMAC_ADDR_CMD_MEM_WE |
4966 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4967 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4968 writeq(val64, &bar0->rmac_addr_cmd_mem);
4969
4970 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4971 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4972 S2IO_BIT_RESET);
4973
4974 sp->m_cast_flg = 1;
4975 sp->all_multi_pos = config->max_mc_addr - 1;
4976 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4977
4978 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4979 &bar0->rmac_addr_data0_mem);
4980 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4981 &bar0->rmac_addr_data1_mem);
4982 val64 = RMAC_ADDR_CMD_MEM_WE |
4983 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4984 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4985 writeq(val64, &bar0->rmac_addr_cmd_mem);
4986
4987 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4988 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4989 S2IO_BIT_RESET);
4990
4991 sp->m_cast_flg = 0;
4992 sp->all_multi_pos = 0;
4993 }
4994
4995 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4996
4997 add = &bar0->mac_cfg;
4998 val64 = readq(&bar0->mac_cfg);
4999 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5000
5001 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5002 writel((u32) val64, add);
5003 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5004 writel((u32) (val64 >> 32), (add + 4));
5005
5006 if (vlan_tag_strip != 1) {
5007 val64 = readq(&bar0->rx_pa_cfg);
5008 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5009 writeq(val64, &bar0->rx_pa_cfg);
5010 sp->vlan_strip_flag = 0;
5011 }
5012
5013 val64 = readq(&bar0->mac_cfg);
5014 sp->promisc_flg = 1;
5015 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5016 dev->name);
5017 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5018
5019 add = &bar0->mac_cfg;
5020 val64 = readq(&bar0->mac_cfg);
5021 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5022
5023 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5024 writel((u32) val64, add);
5025 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5026 writel((u32) (val64 >> 32), (add + 4));
5027
5028 if (vlan_tag_strip != 0) {
5029 val64 = readq(&bar0->rx_pa_cfg);
5030 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5031 writeq(val64, &bar0->rx_pa_cfg);
5032 sp->vlan_strip_flag = 1;
5033 }
5034
5035 val64 = readq(&bar0->mac_cfg);
5036 sp->promisc_flg = 0;
5037 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5038 dev->name);
5039 }
5040
5041
5042 if ((!sp->m_cast_flg) && dev->mc_count) {
5043 if (dev->mc_count >
5044 (config->max_mc_addr - config->max_mac_addr)) {
5045 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5046 dev->name);
5047 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5048 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5049 return;
5050 }
5051
5052 prev_cnt = sp->mc_addr_count;
5053 sp->mc_addr_count = dev->mc_count;
5054
5055
5056 for (i = 0; i < prev_cnt; i++) {
5057 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5058 &bar0->rmac_addr_data0_mem);
5059 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5060 &bar0->rmac_addr_data1_mem);
5061 val64 = RMAC_ADDR_CMD_MEM_WE |
5062 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5063 RMAC_ADDR_CMD_MEM_OFFSET
5064 (config->mc_start_offset + i);
5065 writeq(val64, &bar0->rmac_addr_cmd_mem);
5066
5067
5068 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5069 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5070 S2IO_BIT_RESET)) {
5071 DBG_PRINT(ERR_DBG, "%s: Adding ",
5072 dev->name);
5073 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5074 return;
5075 }
5076 }
5077
5078
5079 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5080 i++, mclist = mclist->next) {
5081 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5082 ETH_ALEN);
5083 mac_addr = 0;
5084 for (j = 0; j < ETH_ALEN; j++) {
5085 mac_addr |= mclist->dmi_addr[j];
5086 mac_addr <<= 8;
5087 }
5088 mac_addr >>= 8;
5089 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5090 &bar0->rmac_addr_data0_mem);
5091 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5092 &bar0->rmac_addr_data1_mem);
5093 val64 = RMAC_ADDR_CMD_MEM_WE |
5094 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5095 RMAC_ADDR_CMD_MEM_OFFSET
5096 (i + config->mc_start_offset);
5097 writeq(val64, &bar0->rmac_addr_cmd_mem);
5098
5099
5100 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5101 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5102 S2IO_BIT_RESET)) {
5103 DBG_PRINT(ERR_DBG, "%s: Adding ",
5104 dev->name);
5105 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5106 return;
5107 }
5108 }
5109 }
5110}
5111
5112
5113
5114
5115void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5116{
5117 int offset;
5118 u64 mac_addr = 0x0;
5119 struct config_param *config = &sp->config;
5120
5121
5122 for (offset = 0; offset < config->max_mc_addr; offset++) {
5123 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5124
5125 if (mac_addr == FAILURE)
5126 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5127 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5128 }
5129}
5130
5131
5132static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5133{
5134 int offset;
5135 struct config_param *config = &sp->config;
5136
5137 for (offset = 0; offset < config->max_mac_addr; offset++)
5138 do_s2io_prog_unicast(sp->dev,
5139 sp->def_mac_addr[offset].mac_addr);
5140
5141
5142 for (offset = config->mc_start_offset;
5143 offset < config->max_mc_addr; offset++)
5144 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5145}
5146
5147
5148static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5149{
5150 int i;
5151 u64 mac_addr = 0;
5152 struct config_param *config = &sp->config;
5153
5154 for (i = 0; i < ETH_ALEN; i++) {
5155 mac_addr <<= 8;
5156 mac_addr |= addr[i];
5157 }
5158 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5159 return SUCCESS;
5160
5161
5162 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5163 u64 tmp64;
5164 tmp64 = do_s2io_read_unicast_mc(sp, i);
5165 if (tmp64 == S2IO_DISABLE_MAC_ENTRY)
5166 break;
5167
5168 if (tmp64 == mac_addr)
5169 return SUCCESS;
5170 }
5171 if (i == config->max_mc_addr) {
5172 DBG_PRINT(ERR_DBG,
5173 "CAM full no space left for multicast MAC\n");
5174 return FAILURE;
5175 }
5176
5177 do_s2io_copy_mac_addr(sp, i, mac_addr);
5178
5179 return (do_s2io_add_mac(sp, mac_addr, i));
5180}
5181
5182
5183static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5184{
5185 u64 val64;
5186 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5187
5188 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5189 &bar0->rmac_addr_data0_mem);
5190
5191 val64 =
5192 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5193 RMAC_ADDR_CMD_MEM_OFFSET(off);
5194 writeq(val64, &bar0->rmac_addr_cmd_mem);
5195
5196
5197 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5198 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5199 S2IO_BIT_RESET)) {
5200 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5201 return FAILURE;
5202 }
5203 return SUCCESS;
5204}
5205
5206static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5207{
5208 int offset;
5209 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5210 struct config_param *config = &sp->config;
5211
5212 for (offset = 1;
5213 offset < config->max_mc_addr; offset++) {
5214 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5215 if (tmp64 == addr) {
5216
5217 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5218 return FAILURE;
5219
5220 do_s2io_store_unicast_mc(sp);
5221 return SUCCESS;
5222 }
5223 }
5224 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5225 (unsigned long long)addr);
5226 return FAILURE;
5227}
5228
5229
5230static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5231{
5232 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5233 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5234
5235
5236 val64 =
5237 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5238 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5239 writeq(val64, &bar0->rmac_addr_cmd_mem);
5240
5241
5242 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5243 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5244 S2IO_BIT_RESET)) {
5245 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5246 return FAILURE;
5247 }
5248 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5249 return (tmp64 >> 16);
5250}
5251
5252
5253
5254
5255
5256static int s2io_set_mac_addr(struct net_device *dev, void *p)
5257{
5258 struct sockaddr *addr = p;
5259
5260 if (!is_valid_ether_addr(addr->sa_data))
5261 return -EINVAL;
5262
5263 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5264
5265
5266 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5267}
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5279{
5280 struct s2io_nic *sp = dev->priv;
5281 register u64 mac_addr = 0, perm_addr = 0;
5282 int i;
5283 u64 tmp64;
5284 struct config_param *config = &sp->config;
5285
5286
5287
5288
5289
5290
5291 for (i = 0; i < ETH_ALEN; i++) {
5292 mac_addr <<= 8;
5293 mac_addr |= addr[i];
5294 perm_addr <<= 8;
5295 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5296 }
5297
5298
5299 if (mac_addr == perm_addr)
5300 return SUCCESS;
5301
5302
5303 for (i = 1; i < config->max_mac_addr; i++) {
5304 tmp64 = do_s2io_read_unicast_mc(sp, i);
5305 if (tmp64 == S2IO_DISABLE_MAC_ENTRY)
5306 break;
5307
5308 if (tmp64 == mac_addr) {
5309 DBG_PRINT(INFO_DBG,
5310 "MAC addr:0x%llx already present in CAM\n",
5311 (unsigned long long)mac_addr);
5312 return SUCCESS;
5313 }
5314 }
5315 if (i == config->max_mac_addr) {
5316 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5317 return FAILURE;
5318 }
5319
5320 do_s2io_copy_mac_addr(sp, i, mac_addr);
5321 return (do_s2io_add_mac(sp, mac_addr, i));
5322}
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336static int s2io_ethtool_sset(struct net_device *dev,
5337 struct ethtool_cmd *info)
5338{
5339 struct s2io_nic *sp = dev->priv;
5340 if ((info->autoneg == AUTONEG_ENABLE) ||
5341 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5342 return -EINVAL;
5343 else {
5344 s2io_close(sp->dev);
5345 s2io_open(sp->dev);
5346 }
5347
5348 return 0;
5349}
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5364{
5365 struct s2io_nic *sp = dev->priv;
5366 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5367 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5368 info->port = PORT_FIBRE;
5369
5370
5371 info->transceiver = XCVR_EXTERNAL;
5372
5373 if (netif_carrier_ok(sp->dev)) {
5374 info->speed = 10000;
5375 info->duplex = DUPLEX_FULL;
5376 } else {
5377 info->speed = -1;
5378 info->duplex = -1;
5379 }
5380
5381 info->autoneg = AUTONEG_DISABLE;
5382 return 0;
5383}
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5398 struct ethtool_drvinfo *info)
5399{
5400 struct s2io_nic *sp = dev->priv;
5401
5402 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5403 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5404 strncpy(info->fw_version, "", sizeof(info->fw_version));
5405 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5406 info->regdump_len = XENA_REG_SPACE;
5407 info->eedump_len = XENA_EEPROM_SPACE;
5408}
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424static void s2io_ethtool_gregs(struct net_device *dev,
5425 struct ethtool_regs *regs, void *space)
5426{
5427 int i;
5428 u64 reg;
5429 u8 *reg_space = (u8 *) space;
5430 struct s2io_nic *sp = dev->priv;
5431
5432 regs->len = XENA_REG_SPACE;
5433 regs->version = sp->pdev->subsystem_device;
5434
5435 for (i = 0; i < regs->len; i += 8) {
5436 reg = readq(sp->bar0 + i);
5437 memcpy((reg_space + i), ®, 8);
5438 }
5439}
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450static void s2io_phy_id(unsigned long data)
5451{
5452 struct s2io_nic *sp = (struct s2io_nic *) data;
5453 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5454 u64 val64 = 0;
5455 u16 subid;
5456
5457 subid = sp->pdev->subsystem_device;
5458 if ((sp->device_type == XFRAME_II_DEVICE) ||
5459 ((subid & 0xFF) >= 0x07)) {
5460 val64 = readq(&bar0->gpio_control);
5461 val64 ^= GPIO_CTRL_GPIO_0;
5462 writeq(val64, &bar0->gpio_control);
5463 } else {
5464 val64 = readq(&bar0->adapter_control);
5465 val64 ^= ADAPTER_LED_ON;
5466 writeq(val64, &bar0->adapter_control);
5467 }
5468
5469 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5470}
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5488{
5489 u64 val64 = 0, last_gpio_ctrl_val;
5490 struct s2io_nic *sp = dev->priv;
5491 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5492 u16 subid;
5493
5494 subid = sp->pdev->subsystem_device;
5495 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5496 if ((sp->device_type == XFRAME_I_DEVICE) &&
5497 ((subid & 0xFF) < 0x07)) {
5498 val64 = readq(&bar0->adapter_control);
5499 if (!(val64 & ADAPTER_CNTL_EN)) {
5500 printk(KERN_ERR
5501 "Adapter Link down, cannot blink LED\n");
5502 return -EFAULT;
5503 }
5504 }
5505 if (sp->id_timer.function == NULL) {
5506 init_timer(&sp->id_timer);
5507 sp->id_timer.function = s2io_phy_id;
5508 sp->id_timer.data = (unsigned long) sp;
5509 }
5510 mod_timer(&sp->id_timer, jiffies);
5511 if (data)
5512 msleep_interruptible(data * HZ);
5513 else
5514 msleep_interruptible(MAX_FLICKER_TIME);
5515 del_timer_sync(&sp->id_timer);
5516
5517 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5518 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5519 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5520 }
5521
5522 return 0;
5523}
5524
5525static void s2io_ethtool_gringparam(struct net_device *dev,
5526 struct ethtool_ringparam *ering)
5527{
5528 struct s2io_nic *sp = dev->priv;
5529 int i,tx_desc_count=0,rx_desc_count=0;
5530
5531 if (sp->rxd_mode == RXD_MODE_1)
5532 ering->rx_max_pending = MAX_RX_DESC_1;
5533 else if (sp->rxd_mode == RXD_MODE_3B)
5534 ering->rx_max_pending = MAX_RX_DESC_2;
5535
5536 ering->tx_max_pending = MAX_TX_DESC;
5537 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5538 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5539
5540 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5541 ering->tx_pending = tx_desc_count;
5542 rx_desc_count = 0;
5543 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5544 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5545
5546 ering->rx_pending = rx_desc_count;
5547
5548 ering->rx_mini_max_pending = 0;
5549 ering->rx_mini_pending = 0;
5550 if(sp->rxd_mode == RXD_MODE_1)
5551 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5552 else if (sp->rxd_mode == RXD_MODE_3B)
5553 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5554 ering->rx_jumbo_pending = rx_desc_count;
5555}
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567static void s2io_ethtool_getpause_data(struct net_device *dev,
5568 struct ethtool_pauseparam *ep)
5569{
5570 u64 val64;
5571 struct s2io_nic *sp = dev->priv;
5572 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5573
5574 val64 = readq(&bar0->rmac_pause_cfg);
5575 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5576 ep->tx_pause = TRUE;
5577 if (val64 & RMAC_PAUSE_RX_ENABLE)
5578 ep->rx_pause = TRUE;
5579 ep->autoneg = FALSE;
5580}
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594static int s2io_ethtool_setpause_data(struct net_device *dev,
5595 struct ethtool_pauseparam *ep)
5596{
5597 u64 val64;
5598 struct s2io_nic *sp = dev->priv;
5599 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5600
5601 val64 = readq(&bar0->rmac_pause_cfg);
5602 if (ep->tx_pause)
5603 val64 |= RMAC_PAUSE_GEN_ENABLE;
5604 else
5605 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5606 if (ep->rx_pause)
5607 val64 |= RMAC_PAUSE_RX_ENABLE;
5608 else
5609 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5610 writeq(val64, &bar0->rmac_pause_cfg);
5611 return 0;
5612}
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630#define S2IO_DEV_ID 5
5631static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5632{
5633 int ret = -1;
5634 u32 exit_cnt = 0;
5635 u64 val64;
5636 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5637
5638 if (sp->device_type == XFRAME_I_DEVICE) {
5639 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5640 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5641 I2C_CONTROL_CNTL_START;
5642 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5643
5644 while (exit_cnt < 5) {
5645 val64 = readq(&bar0->i2c_control);
5646 if (I2C_CONTROL_CNTL_END(val64)) {
5647 *data = I2C_CONTROL_GET_DATA(val64);
5648 ret = 0;
5649 break;
5650 }
5651 msleep(50);
5652 exit_cnt++;
5653 }
5654 }
5655
5656 if (sp->device_type == XFRAME_II_DEVICE) {
5657 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5658 SPI_CONTROL_BYTECNT(0x3) |
5659 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5660 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5661 val64 |= SPI_CONTROL_REQ;
5662 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5663 while (exit_cnt < 5) {
5664 val64 = readq(&bar0->spi_control);
5665 if (val64 & SPI_CONTROL_NACK) {
5666 ret = 1;
5667 break;
5668 } else if (val64 & SPI_CONTROL_DONE) {
5669 *data = readq(&bar0->spi_data);
5670 *data &= 0xffffff;
5671 ret = 0;
5672 break;
5673 }
5674 msleep(50);
5675 exit_cnt++;
5676 }
5677 }
5678 return ret;
5679}
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5697{
5698 int exit_cnt = 0, ret = -1;
5699 u64 val64;
5700 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5701
5702 if (sp->device_type == XFRAME_I_DEVICE) {
5703 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5704 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5705 I2C_CONTROL_CNTL_START;
5706 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5707
5708 while (exit_cnt < 5) {
5709 val64 = readq(&bar0->i2c_control);
5710 if (I2C_CONTROL_CNTL_END(val64)) {
5711 if (!(val64 & I2C_CONTROL_NACK))
5712 ret = 0;
5713 break;
5714 }
5715 msleep(50);
5716 exit_cnt++;
5717 }
5718 }
5719
5720 if (sp->device_type == XFRAME_II_DEVICE) {
5721 int write_cnt = (cnt == 8) ? 0 : cnt;
5722 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5723
5724 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5725 SPI_CONTROL_BYTECNT(write_cnt) |
5726 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5727 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5728 val64 |= SPI_CONTROL_REQ;
5729 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5730 while (exit_cnt < 5) {
5731 val64 = readq(&bar0->spi_control);
5732 if (val64 & SPI_CONTROL_NACK) {
5733 ret = 1;
5734 break;
5735 } else if (val64 & SPI_CONTROL_DONE) {
5736 ret = 0;
5737 break;
5738 }
5739 msleep(50);
5740 exit_cnt++;
5741 }
5742 }
5743 return ret;
5744}
5745static void s2io_vpd_read(struct s2io_nic *nic)
5746{
5747 u8 *vpd_data;
5748 u8 data;
5749 int i=0, cnt, fail = 0;
5750 int vpd_addr = 0x80;
5751
5752 if (nic->device_type == XFRAME_II_DEVICE) {
5753 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5754 vpd_addr = 0x80;
5755 }
5756 else {
5757 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5758 vpd_addr = 0x50;
5759 }
5760 strcpy(nic->serial_num, "NOT AVAILABLE");
5761
5762 vpd_data = kmalloc(256, GFP_KERNEL);
5763 if (!vpd_data) {
5764 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5765 return;
5766 }
5767 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5768
5769 for (i = 0; i < 256; i +=4 ) {
5770 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5771 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5772 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5773 for (cnt = 0; cnt <5; cnt++) {
5774 msleep(2);
5775 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5776 if (data == 0x80)
5777 break;
5778 }
5779 if (cnt >= 5) {
5780 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5781 fail = 1;
5782 break;
5783 }
5784 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5785 (u32 *)&vpd_data[i]);
5786 }
5787
5788 if(!fail) {
5789
5790 for (cnt = 0; cnt < 256; cnt++) {
5791 if ((vpd_data[cnt] == 'S') &&
5792 (vpd_data[cnt+1] == 'N') &&
5793 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5794 memset(nic->serial_num, 0, VPD_STRING_LEN);
5795 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5796 vpd_data[cnt+2]);
5797 break;
5798 }
5799 }
5800 }
5801
5802 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5803 memset(nic->product_name, 0, vpd_data[1]);
5804 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5805 }
5806 kfree(vpd_data);
5807 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5808}
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823static int s2io_ethtool_geeprom(struct net_device *dev,
5824 struct ethtool_eeprom *eeprom, u8 * data_buf)
5825{
5826 u32 i, valid;
5827 u64 data;
5828 struct s2io_nic *sp = dev->priv;
5829
5830 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5831
5832 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5833 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5834
5835 for (i = 0; i < eeprom->len; i += 4) {
5836 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5837 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5838 return -EFAULT;
5839 }
5840 valid = INV(data);
5841 memcpy((data_buf + i), &valid, 4);
5842 }
5843 return 0;
5844}
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860static int s2io_ethtool_seeprom(struct net_device *dev,
5861 struct ethtool_eeprom *eeprom,
5862 u8 * data_buf)
5863{
5864 int len = eeprom->len, cnt = 0;
5865 u64 valid = 0, data;
5866 struct s2io_nic *sp = dev->priv;
5867
5868 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5869 DBG_PRINT(ERR_DBG,
5870 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5871 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5872 eeprom->magic);
5873 return -EFAULT;
5874 }
5875
5876 while (len) {
5877 data = (u32) data_buf[cnt] & 0x000000FF;
5878 if (data) {
5879 valid = (u32) (data << 24);
5880 } else
5881 valid = data;
5882
5883 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5884 DBG_PRINT(ERR_DBG,
5885 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5886 DBG_PRINT(ERR_DBG,
5887 "write into the specified offset\n");
5888 return -EFAULT;
5889 }
5890 cnt++;
5891 len--;
5892 }
5893
5894 return 0;
5895}
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5911{
5912 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5913 u64 val64 = 0, exp_val;
5914 int fail = 0;
5915
5916 val64 = readq(&bar0->pif_rd_swapper_fb);
5917 if (val64 != 0x123456789abcdefULL) {
5918 fail = 1;
5919 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5920 }
5921
5922 val64 = readq(&bar0->rmac_pause_cfg);
5923 if (val64 != 0xc000ffff00000000ULL) {
5924 fail = 1;
5925 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5926 }
5927
5928 val64 = readq(&bar0->rx_queue_cfg);
5929 if (sp->device_type == XFRAME_II_DEVICE)
5930 exp_val = 0x0404040404040404ULL;
5931 else
5932 exp_val = 0x0808080808080808ULL;
5933 if (val64 != exp_val) {
5934 fail = 1;
5935 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5936 }
5937
5938 val64 = readq(&bar0->xgxs_efifo_cfg);
5939 if (val64 != 0x000000001923141EULL) {
5940 fail = 1;
5941 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5942 }
5943
5944 val64 = 0x5A5A5A5A5A5A5A5AULL;
5945 writeq(val64, &bar0->xmsi_data);
5946 val64 = readq(&bar0->xmsi_data);
5947 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5948 fail = 1;
5949 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5950 }
5951
5952 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5953 writeq(val64, &bar0->xmsi_data);
5954 val64 = readq(&bar0->xmsi_data);
5955 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5956 fail = 1;
5957 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5958 }
5959
5960 *data = fail;
5961 return fail;
5962}
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5978{
5979 int fail = 0;
5980 u64 ret_data, org_4F0, org_7F0;
5981 u8 saved_4F0 = 0, saved_7F0 = 0;
5982 struct net_device *dev = sp->dev;
5983
5984
5985
5986
5987
5988 if (sp->device_type == XFRAME_I_DEVICE)
5989 if (!write_eeprom(sp, 0, 0, 3))
5990 fail = 1;
5991
5992
5993 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5994 saved_4F0 = 1;
5995 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5996 saved_7F0 = 1;
5997
5998
5999 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6000 fail = 1;
6001 if (read_eeprom(sp, 0x4F0, &ret_data))
6002 fail = 1;
6003
6004 if (ret_data != 0x012345) {
6005 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6006 "Data written %llx Data read %llx\n",
6007 dev->name, (unsigned long long)0x12345,
6008 (unsigned long long)ret_data);
6009 fail = 1;
6010 }
6011
6012
6013 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6014
6015
6016 if (sp->device_type == XFRAME_I_DEVICE)
6017 if (!write_eeprom(sp, 0x07C, 0, 3))
6018 fail = 1;
6019
6020
6021 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6022 fail = 1;
6023 if (read_eeprom(sp, 0x7F0, &ret_data))
6024 fail = 1;
6025
6026 if (ret_data != 0x012345) {
6027 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6028 "Data written %llx Data read %llx\n",
6029 dev->name, (unsigned long long)0x12345,
6030 (unsigned long long)ret_data);
6031 fail = 1;
6032 }
6033
6034
6035 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6036
6037 if (sp->device_type == XFRAME_I_DEVICE) {
6038
6039 if (!write_eeprom(sp, 0x080, 0, 3))
6040 fail = 1;
6041
6042
6043 if (!write_eeprom(sp, 0x0FC, 0, 3))
6044 fail = 1;
6045
6046
6047 if (!write_eeprom(sp, 0x100, 0, 3))
6048 fail = 1;
6049
6050
6051 if (!write_eeprom(sp, 0x4EC, 0, 3))
6052 fail = 1;
6053 }
6054
6055
6056 if (saved_4F0)
6057 write_eeprom(sp, 0x4F0, org_4F0, 3);
6058 if (saved_7F0)
6059 write_eeprom(sp, 0x7F0, org_7F0, 3);
6060
6061 *data = fail;
6062 return fail;
6063}
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6080{
6081 u8 bist = 0;
6082 int cnt = 0, ret = -1;
6083
6084 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6085 bist |= PCI_BIST_START;
6086 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6087
6088 while (cnt < 20) {
6089 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6090 if (!(bist & PCI_BIST_START)) {
6091 *data = (bist & PCI_BIST_CODE_MASK);
6092 ret = 0;
6093 break;
6094 }
6095 msleep(100);
6096 cnt++;
6097 }
6098
6099 return ret;
6100}
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6116{
6117 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6118 u64 val64;
6119
6120 val64 = readq(&bar0->adapter_status);
6121 if(!(LINK_IS_UP(val64)))
6122 *data = 1;
6123 else
6124 *data = 0;
6125
6126 return *data;
6127}
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6143{
6144 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6145 u64 val64;
6146 int cnt, iteration = 0, test_fail = 0;
6147
6148 val64 = readq(&bar0->adapter_control);
6149 val64 &= ~ADAPTER_ECC_EN;
6150 writeq(val64, &bar0->adapter_control);
6151
6152 val64 = readq(&bar0->mc_rldram_test_ctrl);
6153 val64 |= MC_RLDRAM_TEST_MODE;
6154 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6155
6156 val64 = readq(&bar0->mc_rldram_mrs);
6157 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6158 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6159
6160 val64 |= MC_RLDRAM_MRS_ENABLE;
6161 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6162
6163 while (iteration < 2) {
6164 val64 = 0x55555555aaaa0000ULL;
6165 if (iteration == 1) {
6166 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6167 }
6168 writeq(val64, &bar0->mc_rldram_test_d0);
6169
6170 val64 = 0xaaaa5a5555550000ULL;
6171 if (iteration == 1) {
6172 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6173 }
6174 writeq(val64, &bar0->mc_rldram_test_d1);
6175
6176 val64 = 0x55aaaaaaaa5a0000ULL;
6177 if (iteration == 1) {
6178 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6179 }
6180 writeq(val64, &bar0->mc_rldram_test_d2);
6181
6182 val64 = (u64) (0x0000003ffffe0100ULL);
6183 writeq(val64, &bar0->mc_rldram_test_add);
6184
6185 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6186 MC_RLDRAM_TEST_GO;
6187 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6188
6189 for (cnt = 0; cnt < 5; cnt++) {
6190 val64 = readq(&bar0->mc_rldram_test_ctrl);
6191 if (val64 & MC_RLDRAM_TEST_DONE)
6192 break;
6193 msleep(200);
6194 }
6195
6196 if (cnt == 5)
6197 break;
6198
6199 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6200 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6201
6202 for (cnt = 0; cnt < 5; cnt++) {
6203 val64 = readq(&bar0->mc_rldram_test_ctrl);
6204 if (val64 & MC_RLDRAM_TEST_DONE)
6205 break;
6206 msleep(500);
6207 }
6208
6209 if (cnt == 5)
6210 break;
6211
6212 val64 = readq(&bar0->mc_rldram_test_ctrl);
6213 if (!(val64 & MC_RLDRAM_TEST_PASS))
6214 test_fail = 1;
6215
6216 iteration++;
6217 }
6218
6219 *data = test_fail;
6220
6221
6222 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6223
6224 return test_fail;
6225}
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242static void s2io_ethtool_test(struct net_device *dev,
6243 struct ethtool_test *ethtest,
6244 uint64_t * data)
6245{
6246 struct s2io_nic *sp = dev->priv;
6247 int orig_state = netif_running(sp->dev);
6248
6249 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6250
6251 if (orig_state)
6252 s2io_close(sp->dev);
6253
6254 if (s2io_register_test(sp, &data[0]))
6255 ethtest->flags |= ETH_TEST_FL_FAILED;
6256
6257 s2io_reset(sp);
6258
6259 if (s2io_rldram_test(sp, &data[3]))
6260 ethtest->flags |= ETH_TEST_FL_FAILED;
6261
6262 s2io_reset(sp);
6263
6264 if (s2io_eeprom_test(sp, &data[1]))
6265 ethtest->flags |= ETH_TEST_FL_FAILED;
6266
6267 if (s2io_bist_test(sp, &data[4]))
6268 ethtest->flags |= ETH_TEST_FL_FAILED;
6269
6270 if (orig_state)
6271 s2io_open(sp->dev);
6272
6273 data[2] = 0;
6274 } else {
6275
6276 if (!orig_state) {
6277 DBG_PRINT(ERR_DBG,
6278 "%s: is not up, cannot run test\n",
6279 dev->name);
6280 data[0] = -1;
6281 data[1] = -1;
6282 data[2] = -1;
6283 data[3] = -1;
6284 data[4] = -1;
6285 }
6286
6287 if (s2io_link_test(sp, &data[2]))
6288 ethtest->flags |= ETH_TEST_FL_FAILED;
6289
6290 data[0] = 0;
6291 data[1] = 0;
6292 data[3] = 0;
6293 data[4] = 0;
6294 }
6295}
6296
6297static void s2io_get_ethtool_stats(struct net_device *dev,
6298 struct ethtool_stats *estats,
6299 u64 * tmp_stats)
6300{
6301 int i = 0, k;
6302 struct s2io_nic *sp = dev->priv;
6303 struct stat_block *stat_info = sp->mac_control.stats_info;
6304
6305 s2io_updt_stats(sp);
6306 tmp_stats[i++] =
6307 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6308 le32_to_cpu(stat_info->tmac_frms);
6309 tmp_stats[i++] =
6310 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6311 le32_to_cpu(stat_info->tmac_data_octets);
6312 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6313 tmp_stats[i++] =
6314 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6315 le32_to_cpu(stat_info->tmac_mcst_frms);
6316 tmp_stats[i++] =
6317 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6318 le32_to_cpu(stat_info->tmac_bcst_frms);
6319 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6320 tmp_stats[i++] =
6321 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6322 le32_to_cpu(stat_info->tmac_ttl_octets);
6323 tmp_stats[i++] =
6324 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6325 le32_to_cpu(stat_info->tmac_ucst_frms);
6326 tmp_stats[i++] =
6327 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6328 le32_to_cpu(stat_info->tmac_nucst_frms);
6329 tmp_stats[i++] =
6330 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6331 le32_to_cpu(stat_info->tmac_any_err_frms);
6332 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6333 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6334 tmp_stats[i++] =
6335 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6336 le32_to_cpu(stat_info->tmac_vld_ip);
6337 tmp_stats[i++] =
6338 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6339 le32_to_cpu(stat_info->tmac_drop_ip);
6340 tmp_stats[i++] =
6341 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6342 le32_to_cpu(stat_info->tmac_icmp);
6343 tmp_stats[i++] =
6344 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6345 le32_to_cpu(stat_info->tmac_rst_tcp);
6346 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6347 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6348 le32_to_cpu(stat_info->tmac_udp);
6349 tmp_stats[i++] =
6350 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6351 le32_to_cpu(stat_info->rmac_vld_frms);
6352 tmp_stats[i++] =
6353 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6354 le32_to_cpu(stat_info->rmac_data_octets);
6355 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6356 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6357 tmp_stats[i++] =
6358 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6359 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6360 tmp_stats[i++] =
6361 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6362 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6363 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6364 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6365 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6366 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6367 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6368 tmp_stats[i++] =
6369 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6370 le32_to_cpu(stat_info->rmac_ttl_octets);
6371 tmp_stats[i++] =
6372 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6373 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6374 tmp_stats[i++] =
6375 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6376 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6377 tmp_stats[i++] =
6378 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6379 le32_to_cpu(stat_info->rmac_discarded_frms);
6380 tmp_stats[i++] =
6381 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6382 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6383 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6384 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6385 tmp_stats[i++] =
6386 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6387 le32_to_cpu(stat_info->rmac_usized_frms);
6388 tmp_stats[i++] =
6389 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6390 le32_to_cpu(stat_info->rmac_osized_frms);
6391 tmp_stats[i++] =
6392 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6393 le32_to_cpu(stat_info->rmac_frag_frms);
6394 tmp_stats[i++] =
6395 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6396 le32_to_cpu(stat_info->rmac_jabber_frms);
6397 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6398 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6399 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6400 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6401 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6402 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6403 tmp_stats[i++] =
6404 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6405 le32_to_cpu(stat_info->rmac_ip);
6406 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6407 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6408 tmp_stats[i++] =
6409 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6410 le32_to_cpu(stat_info->rmac_drop_ip);
6411 tmp_stats[i++] =
6412 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6413 le32_to_cpu(stat_info->rmac_icmp);
6414 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6415 tmp_stats[i++] =
6416 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6417 le32_to_cpu(stat_info->rmac_udp);
6418 tmp_stats[i++] =
6419 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6420 le32_to_cpu(stat_info->rmac_err_drp_udp);
6421 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6422 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6423 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6424 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6425 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6426 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6427 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6428 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6429 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6430 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6431 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6432 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6433 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6434 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6435 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6436 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6437 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6438 tmp_stats[i++] =
6439 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6440 le32_to_cpu(stat_info->rmac_pause_cnt);
6441 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6442 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6443 tmp_stats[i++] =
6444 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6445 le32_to_cpu(stat_info->rmac_accepted_ip);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6449 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6450 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6451 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6452 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6453 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6454 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6455 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6456 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6457 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6458 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6459 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6460 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6461 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6462 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6463 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6464 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6465
6466
6467 if(sp->device_type == XFRAME_II_DEVICE) {
6468 tmp_stats[i++] =
6469 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6470 tmp_stats[i++] =
6471 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6472 tmp_stats[i++] =
6473 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6474 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6475 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6476 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6477 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6478 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6479 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6480 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6481 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6482 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6483 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6484 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6485 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6486 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6487 }
6488
6489 tmp_stats[i++] = 0;
6490 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6491 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6492 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6493 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6494 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6495 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6496 for (k = 0; k < MAX_RX_RINGS; k++)
6497 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6498 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6499 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6500 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6501 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6502 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6503 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6504 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6505 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6506 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6507 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6508 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6509 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6510 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6511 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6512 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6513 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6514 if (stat_info->sw_stat.num_aggregations) {
6515 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6516 int count = 0;
6517
6518
6519
6520
6521 while (tmp >= stat_info->sw_stat.num_aggregations) {
6522 tmp -= stat_info->sw_stat.num_aggregations;
6523 count++;
6524 }
6525 tmp_stats[i++] = count;
6526 }
6527 else
6528 tmp_stats[i++] = 0;
6529 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6532 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6533 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6534 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6537 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6538
6539 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6544
6545 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6555 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6556 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6557 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6558 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6559 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6560 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6561 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6562 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6563 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6564 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6565 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6566 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6567 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6568 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6569 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6570 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6571}
6572
6573static int s2io_ethtool_get_regs_len(struct net_device *dev)
6574{
6575 return (XENA_REG_SPACE);
6576}
6577
6578
6579static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6580{
6581 struct s2io_nic *sp = dev->priv;
6582
6583 return (sp->rx_csum);
6584}
6585
6586static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6587{
6588 struct s2io_nic *sp = dev->priv;
6589
6590 if (data)
6591 sp->rx_csum = 1;
6592 else
6593 sp->rx_csum = 0;
6594
6595 return 0;
6596}
6597
6598static int s2io_get_eeprom_len(struct net_device *dev)
6599{
6600 return (XENA_EEPROM_SPACE);
6601}
6602
6603static int s2io_get_sset_count(struct net_device *dev, int sset)
6604{
6605 struct s2io_nic *sp = dev->priv;
6606
6607 switch (sset) {
6608 case ETH_SS_TEST:
6609 return S2IO_TEST_LEN;
6610 case ETH_SS_STATS:
6611 switch(sp->device_type) {
6612 case XFRAME_I_DEVICE:
6613 return XFRAME_I_STAT_LEN;
6614 case XFRAME_II_DEVICE:
6615 return XFRAME_II_STAT_LEN;
6616 default:
6617 return 0;
6618 }
6619 default:
6620 return -EOPNOTSUPP;
6621 }
6622}
6623
6624static void s2io_ethtool_get_strings(struct net_device *dev,
6625 u32 stringset, u8 * data)
6626{
6627 int stat_size = 0;
6628 struct s2io_nic *sp = dev->priv;
6629
6630 switch (stringset) {
6631 case ETH_SS_TEST:
6632 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6633 break;
6634 case ETH_SS_STATS:
6635 stat_size = sizeof(ethtool_xena_stats_keys);
6636 memcpy(data, ðtool_xena_stats_keys,stat_size);
6637 if(sp->device_type == XFRAME_II_DEVICE) {
6638 memcpy(data + stat_size,
6639 ðtool_enhanced_stats_keys,
6640 sizeof(ethtool_enhanced_stats_keys));
6641 stat_size += sizeof(ethtool_enhanced_stats_keys);
6642 }
6643
6644 memcpy(data + stat_size, ðtool_driver_stats_keys,
6645 sizeof(ethtool_driver_stats_keys));
6646 }
6647}
6648
6649static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6650{
6651 if (data)
6652 dev->features |= NETIF_F_IP_CSUM;
6653 else
6654 dev->features &= ~NETIF_F_IP_CSUM;
6655
6656 return 0;
6657}
6658
6659static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6660{
6661 return (dev->features & NETIF_F_TSO) != 0;
6662}
6663static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6664{
6665 if (data)
6666 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6667 else
6668 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6669
6670 return 0;
6671}
6672
6673static const struct ethtool_ops netdev_ethtool_ops = {
6674 .get_settings = s2io_ethtool_gset,
6675 .set_settings = s2io_ethtool_sset,
6676 .get_drvinfo = s2io_ethtool_gdrvinfo,
6677 .get_regs_len = s2io_ethtool_get_regs_len,
6678 .get_regs = s2io_ethtool_gregs,
6679 .get_link = ethtool_op_get_link,
6680 .get_eeprom_len = s2io_get_eeprom_len,
6681 .get_eeprom = s2io_ethtool_geeprom,
6682 .set_eeprom = s2io_ethtool_seeprom,
6683 .get_ringparam = s2io_ethtool_gringparam,
6684 .get_pauseparam = s2io_ethtool_getpause_data,
6685 .set_pauseparam = s2io_ethtool_setpause_data,
6686 .get_rx_csum = s2io_ethtool_get_rx_csum,
6687 .set_rx_csum = s2io_ethtool_set_rx_csum,
6688 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6689 .set_sg = ethtool_op_set_sg,
6690 .get_tso = s2io_ethtool_op_get_tso,
6691 .set_tso = s2io_ethtool_op_set_tso,
6692 .set_ufo = ethtool_op_set_ufo,
6693 .self_test = s2io_ethtool_test,
6694 .get_strings = s2io_ethtool_get_strings,
6695 .phys_id = s2io_ethtool_idnic,
6696 .get_ethtool_stats = s2io_get_ethtool_stats,
6697 .get_sset_count = s2io_get_sset_count,
6698};
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6713{
6714 return -EOPNOTSUPP;
6715}
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6729{
6730 struct s2io_nic *sp = dev->priv;
6731 int ret = 0;
6732
6733 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6734 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6735 dev->name);
6736 return -EPERM;
6737 }
6738
6739 dev->mtu = new_mtu;
6740 if (netif_running(dev)) {
6741 s2io_stop_all_tx_queue(sp);
6742 s2io_card_down(sp);
6743 ret = s2io_card_up(sp);
6744 if (ret) {
6745 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6746 __func__);
6747 return ret;
6748 }
6749 s2io_wake_all_tx_queue(sp);
6750 } else {
6751 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6752 u64 val64 = new_mtu;
6753
6754 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6755 }
6756
6757 return ret;
6758}
6759
6760
6761
6762
6763
6764
6765
6766static void s2io_set_link(struct work_struct *work)
6767{
6768 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6769 struct net_device *dev = nic->dev;
6770 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6771 register u64 val64;
6772 u16 subid;
6773
6774 rtnl_lock();
6775
6776 if (!netif_running(dev))
6777 goto out_unlock;
6778
6779 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6780
6781 goto out_unlock;
6782 }
6783
6784 subid = nic->pdev->subsystem_device;
6785 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6786
6787
6788
6789
6790 msleep(100);
6791 }
6792
6793 val64 = readq(&bar0->adapter_status);
6794 if (LINK_IS_UP(val64)) {
6795 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6796 if (verify_xena_quiescence(nic)) {
6797 val64 = readq(&bar0->adapter_control);
6798 val64 |= ADAPTER_CNTL_EN;
6799 writeq(val64, &bar0->adapter_control);
6800 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6801 nic->device_type, subid)) {
6802 val64 = readq(&bar0->gpio_control);
6803 val64 |= GPIO_CTRL_GPIO_0;
6804 writeq(val64, &bar0->gpio_control);
6805 val64 = readq(&bar0->gpio_control);
6806 } else {
6807 val64 |= ADAPTER_LED_ON;
6808 writeq(val64, &bar0->adapter_control);
6809 }
6810 nic->device_enabled_once = TRUE;
6811 } else {
6812 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6813 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6814 s2io_stop_all_tx_queue(nic);
6815 }
6816 }
6817 val64 = readq(&bar0->adapter_control);
6818 val64 |= ADAPTER_LED_ON;
6819 writeq(val64, &bar0->adapter_control);
6820 s2io_link(nic, LINK_UP);
6821 } else {
6822 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6823 subid)) {
6824 val64 = readq(&bar0->gpio_control);
6825 val64 &= ~GPIO_CTRL_GPIO_0;
6826 writeq(val64, &bar0->gpio_control);
6827 val64 = readq(&bar0->gpio_control);
6828 }
6829
6830 val64 = readq(&bar0->adapter_control);
6831 val64 = val64 &(~ADAPTER_LED_ON);
6832 writeq(val64, &bar0->adapter_control);
6833 s2io_link(nic, LINK_DOWN);
6834 }
6835 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6836
6837out_unlock:
6838 rtnl_unlock();
6839}
6840
6841static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6842 struct buffAdd *ba,
6843 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6844 u64 *temp2, int size)
6845{
6846 struct net_device *dev = sp->dev;
6847 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6848
6849 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6850 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6851
6852 if (*skb) {
6853 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6854
6855
6856
6857
6858
6859 rxdp1->Buffer0_ptr = *temp0;
6860 } else {
6861 *skb = dev_alloc_skb(size);
6862 if (!(*skb)) {
6863 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6864 DBG_PRINT(INFO_DBG, "memory to allocate ");
6865 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6866 sp->mac_control.stats_info->sw_stat. \
6867 mem_alloc_fail_cnt++;
6868 return -ENOMEM ;
6869 }
6870 sp->mac_control.stats_info->sw_stat.mem_allocated
6871 += (*skb)->truesize;
6872
6873
6874
6875
6876 rxdp1->Buffer0_ptr = *temp0 =
6877 pci_map_single( sp->pdev, (*skb)->data,
6878 size - NET_IP_ALIGN,
6879 PCI_DMA_FROMDEVICE);
6880 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6881 goto memalloc_failed;
6882 rxdp->Host_Control = (unsigned long) (*skb);
6883 }
6884 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6885 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6886
6887 if (*skb) {
6888 rxdp3->Buffer2_ptr = *temp2;
6889 rxdp3->Buffer0_ptr = *temp0;
6890 rxdp3->Buffer1_ptr = *temp1;
6891 } else {
6892 *skb = dev_alloc_skb(size);
6893 if (!(*skb)) {
6894 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6895 DBG_PRINT(INFO_DBG, "memory to allocate ");
6896 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6897 sp->mac_control.stats_info->sw_stat. \
6898 mem_alloc_fail_cnt++;
6899 return -ENOMEM;
6900 }
6901 sp->mac_control.stats_info->sw_stat.mem_allocated
6902 += (*skb)->truesize;
6903 rxdp3->Buffer2_ptr = *temp2 =
6904 pci_map_single(sp->pdev, (*skb)->data,
6905 dev->mtu + 4,
6906 PCI_DMA_FROMDEVICE);
6907 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6908 goto memalloc_failed;
6909 rxdp3->Buffer0_ptr = *temp0 =
6910 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6911 PCI_DMA_FROMDEVICE);
6912 if (pci_dma_mapping_error(sp->pdev,
6913 rxdp3->Buffer0_ptr)) {
6914 pci_unmap_single (sp->pdev,
6915 (dma_addr_t)rxdp3->Buffer2_ptr,
6916 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6917 goto memalloc_failed;
6918 }
6919 rxdp->Host_Control = (unsigned long) (*skb);
6920
6921
6922 rxdp3->Buffer1_ptr = *temp1 =
6923 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6924 PCI_DMA_FROMDEVICE);
6925 if (pci_dma_mapping_error(sp->pdev,
6926 rxdp3->Buffer1_ptr)) {
6927 pci_unmap_single (sp->pdev,
6928 (dma_addr_t)rxdp3->Buffer0_ptr,
6929 BUF0_LEN, PCI_DMA_FROMDEVICE);
6930 pci_unmap_single (sp->pdev,
6931 (dma_addr_t)rxdp3->Buffer2_ptr,
6932 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6933 goto memalloc_failed;
6934 }
6935 }
6936 }
6937 return 0;
6938 memalloc_failed:
6939 stats->pci_map_fail_cnt++;
6940 stats->mem_freed += (*skb)->truesize;
6941 dev_kfree_skb(*skb);
6942 return -ENOMEM;
6943}
6944
6945static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6946 int size)
6947{
6948 struct net_device *dev = sp->dev;
6949 if (sp->rxd_mode == RXD_MODE_1) {
6950 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6951 } else if (sp->rxd_mode == RXD_MODE_3B) {
6952 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6953 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6954 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6955 }
6956}
6957
6958static int rxd_owner_bit_reset(struct s2io_nic *sp)
6959{
6960 int i, j, k, blk_cnt = 0, size;
6961 struct mac_info * mac_control = &sp->mac_control;
6962 struct config_param *config = &sp->config;
6963 struct net_device *dev = sp->dev;
6964 struct RxD_t *rxdp = NULL;
6965 struct sk_buff *skb = NULL;
6966 struct buffAdd *ba = NULL;
6967 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6968
6969
6970 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6971 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6972 if (sp->rxd_mode == RXD_MODE_1)
6973 size += NET_IP_ALIGN;
6974 else if (sp->rxd_mode == RXD_MODE_3B)
6975 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6976
6977 for (i = 0; i < config->rx_ring_num; i++) {
6978 blk_cnt = config->rx_cfg[i].num_rxd /
6979 (rxd_count[sp->rxd_mode] +1);
6980
6981 for (j = 0; j < blk_cnt; j++) {
6982 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6983 rxdp = mac_control->rings[i].
6984 rx_blocks[j].rxds[k].virt_addr;
6985 if(sp->rxd_mode == RXD_MODE_3B)
6986 ba = &mac_control->rings[i].ba[j][k];
6987 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6988 &skb,(u64 *)&temp0_64,
6989 (u64 *)&temp1_64,
6990 (u64 *)&temp2_64,
6991 size) == -ENOMEM) {
6992 return 0;
6993 }
6994
6995 set_rxd_buffer_size(sp, rxdp, size);
6996 wmb();
6997
6998 rxdp->Control_1 |= RXD_OWN_XENA;
6999 }
7000 }
7001 }
7002 return 0;
7003
7004}
7005
7006static int s2io_add_isr(struct s2io_nic * sp)
7007{
7008 int ret = 0;
7009 struct net_device *dev = sp->dev;
7010 int err = 0;
7011
7012 if (sp->config.intr_type == MSI_X)
7013 ret = s2io_enable_msi_x(sp);
7014 if (ret) {
7015 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7016 sp->config.intr_type = INTA;
7017 }
7018
7019
7020 store_xmsi_data(sp);
7021
7022
7023 if (sp->config.intr_type == MSI_X) {
7024 int i, msix_rx_cnt = 0;
7025
7026 for (i = 0; i < sp->num_entries; i++) {
7027 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7028 if (sp->s2io_entries[i].type ==
7029 MSIX_RING_TYPE) {
7030 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7031 dev->name, i);
7032 err = request_irq(sp->entries[i].vector,
7033 s2io_msix_ring_handle, 0,
7034 sp->desc[i],
7035 sp->s2io_entries[i].arg);
7036 } else if (sp->s2io_entries[i].type ==
7037 MSIX_ALARM_TYPE) {
7038 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7039 dev->name, i);
7040 err = request_irq(sp->entries[i].vector,
7041 s2io_msix_fifo_handle, 0,
7042 sp->desc[i],
7043 sp->s2io_entries[i].arg);
7044
7045 }
7046
7047 if (!(sp->msix_info[i].addr &&
7048 sp->msix_info[i].data)) {
7049 DBG_PRINT(ERR_DBG,
7050 "%s @Addr:0x%llx Data:0x%llx\n",
7051 sp->desc[i],
7052 (unsigned long long)
7053 sp->msix_info[i].addr,
7054 (unsigned long long)
7055 ntohl(sp->msix_info[i].data));
7056 } else
7057 msix_rx_cnt++;
7058 if (err) {
7059 remove_msix_isr(sp);
7060
7061 DBG_PRINT(ERR_DBG,
7062 "%s:MSI-X-%d registration "
7063 "failed\n", dev->name, i);
7064
7065 DBG_PRINT(ERR_DBG,
7066 "%s: Defaulting to INTA\n",
7067 dev->name);
7068 sp->config.intr_type = INTA;
7069 break;
7070 }
7071 sp->s2io_entries[i].in_use =
7072 MSIX_REGISTERED_SUCCESS;
7073 }
7074 }
7075 if (!err) {
7076 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7077 --msix_rx_cnt);
7078 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7079 " through alarm vector\n");
7080 }
7081 }
7082 if (sp->config.intr_type == INTA) {
7083 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7084 sp->name, dev);
7085 if (err) {
7086 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7087 dev->name);
7088 return -1;
7089 }
7090 }
7091 return 0;
7092}
7093static void s2io_rem_isr(struct s2io_nic * sp)
7094{
7095 if (sp->config.intr_type == MSI_X)
7096 remove_msix_isr(sp);
7097 else
7098 remove_inta_isr(sp);
7099}
7100
7101static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7102{
7103 int cnt = 0;
7104 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7105 register u64 val64 = 0;
7106 struct config_param *config;
7107 config = &sp->config;
7108
7109 if (!is_s2io_card_up(sp))
7110 return;
7111
7112 del_timer_sync(&sp->alarm_timer);
7113
7114 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7115 msleep(50);
7116 }
7117 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7118
7119
7120 if (sp->config.napi) {
7121 int off = 0;
7122 if (config->intr_type == MSI_X) {
7123 for (; off < sp->config.rx_ring_num; off++)
7124 napi_disable(&sp->mac_control.rings[off].napi);
7125 }
7126 else
7127 napi_disable(&sp->napi);
7128 }
7129
7130
7131 if (do_io)
7132 stop_nic(sp);
7133
7134 s2io_rem_isr(sp);
7135
7136
7137 s2io_link(sp, LINK_DOWN);
7138
7139
7140 while(do_io) {
7141
7142
7143
7144
7145
7146
7147
7148 rxd_owner_bit_reset(sp);
7149
7150 val64 = readq(&bar0->adapter_status);
7151 if (verify_xena_quiescence(sp)) {
7152 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7153 break;
7154 }
7155
7156 msleep(50);
7157 cnt++;
7158 if (cnt == 10) {
7159 DBG_PRINT(ERR_DBG,
7160 "s2io_close:Device not Quiescent ");
7161 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7162 (unsigned long long) val64);
7163 break;
7164 }
7165 }
7166 if (do_io)
7167 s2io_reset(sp);
7168
7169
7170 free_tx_buffers(sp);
7171
7172
7173 free_rx_buffers(sp);
7174
7175 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7176}
7177
7178static void s2io_card_down(struct s2io_nic * sp)
7179{
7180 do_s2io_card_down(sp, 1);
7181}
7182
7183static int s2io_card_up(struct s2io_nic * sp)
7184{
7185 int i, ret = 0;
7186 struct mac_info *mac_control;
7187 struct config_param *config;
7188 struct net_device *dev = (struct net_device *) sp->dev;
7189 u16 interruptible;
7190
7191
7192 ret = init_nic(sp);
7193 if (ret != 0) {
7194 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7195 dev->name);
7196 if (ret != -EIO)
7197 s2io_reset(sp);
7198 return ret;
7199 }
7200
7201
7202
7203
7204
7205 mac_control = &sp->mac_control;
7206 config = &sp->config;
7207
7208 for (i = 0; i < config->rx_ring_num; i++) {
7209 mac_control->rings[i].mtu = dev->mtu;
7210 ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
7211 if (ret) {
7212 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7213 dev->name);
7214 s2io_reset(sp);
7215 free_rx_buffers(sp);
7216 return -ENOMEM;
7217 }
7218 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7219 mac_control->rings[i].rx_bufs_left);
7220 }
7221
7222
7223 if (config->napi) {
7224 int i;
7225 if (config->intr_type == MSI_X) {
7226 for (i = 0; i < sp->config.rx_ring_num; i++)
7227 napi_enable(&sp->mac_control.rings[i].napi);
7228 } else {
7229 napi_enable(&sp->napi);
7230 }
7231 }
7232
7233
7234 if (sp->promisc_flg)
7235 sp->promisc_flg = 0;
7236 if (sp->m_cast_flg) {
7237 sp->m_cast_flg = 0;
7238 sp->all_multi_pos= 0;
7239 }
7240
7241
7242 s2io_set_multicast(dev);
7243
7244 if (sp->lro) {
7245
7246 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7247
7248 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7249 sp->lro_max_aggr_per_sess = lro_max_pkts;
7250 }
7251
7252
7253 if (start_nic(sp)) {
7254 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7255 s2io_reset(sp);
7256 free_rx_buffers(sp);
7257 return -ENODEV;
7258 }
7259
7260
7261 if (s2io_add_isr(sp) != 0) {
7262 if (sp->config.intr_type == MSI_X)
7263 s2io_rem_isr(sp);
7264 s2io_reset(sp);
7265 free_rx_buffers(sp);
7266 return -ENODEV;
7267 }
7268
7269 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7270
7271 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7272
7273
7274 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7275 if (sp->config.intr_type != INTA) {
7276 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7277 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7278 } else {
7279 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7280 interruptible |= TX_PIC_INTR;
7281 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7282 }
7283
7284 return 0;
7285}
7286
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297static void s2io_restart_nic(struct work_struct *work)
7298{
7299 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7300 struct net_device *dev = sp->dev;
7301
7302 rtnl_lock();
7303
7304 if (!netif_running(dev))
7305 goto out_unlock;
7306
7307 s2io_card_down(sp);
7308 if (s2io_card_up(sp)) {
7309 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7310 dev->name);
7311 }
7312 s2io_wake_all_tx_queue(sp);
7313 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7314 dev->name);
7315out_unlock:
7316 rtnl_unlock();
7317}
7318
7319
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332static void s2io_tx_watchdog(struct net_device *dev)
7333{
7334 struct s2io_nic *sp = dev->priv;
7335
7336 if (netif_carrier_ok(dev)) {
7337 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7338 schedule_work(&sp->rst_timer_task);
7339 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7340 }
7341}
7342
7343
7344
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7361{
7362 struct s2io_nic *sp = ring_data->nic;
7363 struct net_device *dev = (struct net_device *) ring_data->dev;
7364 struct sk_buff *skb = (struct sk_buff *)
7365 ((unsigned long) rxdp->Host_Control);
7366 int ring_no = ring_data->ring_no;
7367 u16 l3_csum, l4_csum;
7368 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7369 struct lro *lro;
7370 u8 err_mask;
7371
7372 skb->dev = dev;
7373
7374 if (err) {
7375
7376 if (err & 0x1) {
7377 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7378 }
7379 err_mask = err >> 48;
7380 switch(err_mask) {
7381 case 1:
7382 sp->mac_control.stats_info->sw_stat.
7383 rx_parity_err_cnt++;
7384 break;
7385
7386 case 2:
7387 sp->mac_control.stats_info->sw_stat.
7388 rx_abort_cnt++;
7389 break;
7390
7391 case 3:
7392 sp->mac_control.stats_info->sw_stat.
7393 rx_parity_abort_cnt++;
7394 break;
7395
7396 case 4:
7397 sp->mac_control.stats_info->sw_stat.
7398 rx_rda_fail_cnt++;
7399 break;
7400
7401 case 5:
7402 sp->mac_control.stats_info->sw_stat.
7403 rx_unkn_prot_cnt++;
7404 break;
7405
7406 case 6:
7407 sp->mac_control.stats_info->sw_stat.
7408 rx_fcs_err_cnt++;
7409 break;
7410
7411 case 7:
7412 sp->mac_control.stats_info->sw_stat.
7413 rx_buf_size_err_cnt++;
7414 break;
7415
7416 case 8:
7417 sp->mac_control.stats_info->sw_stat.
7418 rx_rxd_corrupt_cnt++;
7419 break;
7420
7421 case 15:
7422 sp->mac_control.stats_info->sw_stat.
7423 rx_unkn_err_cnt++;
7424 break;
7425 }
7426
7427
7428
7429
7430
7431
7432
7433 if (err_mask != 0x5) {
7434 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7435 dev->name, err_mask);
7436 dev->stats.rx_crc_errors++;
7437 sp->mac_control.stats_info->sw_stat.mem_freed
7438 += skb->truesize;
7439 dev_kfree_skb(skb);
7440 ring_data->rx_bufs_left -= 1;
7441 rxdp->Host_Control = 0;
7442 return 0;
7443 }
7444 }
7445
7446
7447 ring_data->rx_packets++;
7448 rxdp->Host_Control = 0;
7449 if (sp->rxd_mode == RXD_MODE_1) {
7450 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7451
7452 ring_data->rx_bytes += len;
7453 skb_put(skb, len);
7454
7455 } else if (sp->rxd_mode == RXD_MODE_3B) {
7456 int get_block = ring_data->rx_curr_get_info.block_index;
7457 int get_off = ring_data->rx_curr_get_info.offset;
7458 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7459 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7460 unsigned char *buff = skb_push(skb, buf0_len);
7461
7462 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7463 ring_data->rx_bytes += buf0_len + buf2_len;
7464 memcpy(buff, ba->ba_0, buf0_len);
7465 skb_put(skb, buf2_len);
7466 }
7467
7468 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7469 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7470 (sp->rx_csum)) {
7471 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7472 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7473 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7474
7475
7476
7477
7478
7479 skb->ip_summed = CHECKSUM_UNNECESSARY;
7480 if (ring_data->lro) {
7481 u32 tcp_len;
7482 u8 *tcp;
7483 int ret = 0;
7484
7485 ret = s2io_club_tcp_session(ring_data,
7486 skb->data, &tcp, &tcp_len, &lro,
7487 rxdp, sp);
7488 switch (ret) {
7489 case 3:
7490 lro->parent = skb;
7491 goto aggregate;
7492 case 1:
7493 {
7494 lro_append_pkt(sp, lro,
7495 skb, tcp_len);
7496 goto aggregate;
7497 }
7498 case 4:
7499 {
7500 lro_append_pkt(sp, lro,
7501 skb, tcp_len);
7502 queue_rx_frame(lro->parent,
7503 lro->vlan_tag);
7504 clear_lro_session(lro);
7505 sp->mac_control.stats_info->
7506 sw_stat.flush_max_pkts++;
7507 goto aggregate;
7508 }
7509 case 2:
7510 lro->parent->data_len =
7511 lro->frags_len;
7512 sp->mac_control.stats_info->
7513 sw_stat.sending_both++;
7514 queue_rx_frame(lro->parent,
7515 lro->vlan_tag);
7516 clear_lro_session(lro);
7517 goto send_up;
7518 case 0:
7519 case -1:
7520
7521
7522 case 5:
7523
7524
7525
7526 break;
7527 default:
7528 DBG_PRINT(ERR_DBG,
7529 "%s: Samadhana!!\n",
7530 __func__);
7531 BUG();
7532 }
7533 }
7534 } else {
7535
7536
7537
7538
7539 skb->ip_summed = CHECKSUM_NONE;
7540 }
7541 } else
7542 skb->ip_summed = CHECKSUM_NONE;
7543
7544 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7545send_up:
7546 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7547 dev->last_rx = jiffies;
7548aggregate:
7549 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7550 return SUCCESS;
7551}
7552
7553
7554
7555
7556
7557
7558
7559
7560
7561
7562
7563
7564
7565
7566static void s2io_link(struct s2io_nic * sp, int link)
7567{
7568 struct net_device *dev = (struct net_device *) sp->dev;
7569
7570 if (link != sp->last_link_state) {
7571 init_tti(sp, link);
7572 if (link == LINK_DOWN) {
7573 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7574 s2io_stop_all_tx_queue(sp);
7575 netif_carrier_off(dev);
7576 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7577 sp->mac_control.stats_info->sw_stat.link_up_time =
7578 jiffies - sp->start_time;
7579 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7580 } else {
7581 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7582 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7583 sp->mac_control.stats_info->sw_stat.link_down_time =
7584 jiffies - sp->start_time;
7585 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7586 netif_carrier_on(dev);
7587 s2io_wake_all_tx_queue(sp);
7588 }
7589 }
7590 sp->last_link_state = link;
7591 sp->start_time = jiffies;
7592}
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605static void s2io_init_pci(struct s2io_nic * sp)
7606{
7607 u16 pci_cmd = 0, pcix_cmd = 0;
7608
7609
7610 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7611 &(pcix_cmd));
7612 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7613 (pcix_cmd | 1));
7614 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7615 &(pcix_cmd));
7616
7617
7618 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7619 pci_write_config_word(sp->pdev, PCI_COMMAND,
7620 (pci_cmd | PCI_COMMAND_PARITY));
7621 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7622}
7623
7624static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7625 u8 *dev_multiq)
7626{
7627 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7628 (tx_fifo_num < 1)) {
7629 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7630 "(%d) not supported\n", tx_fifo_num);
7631
7632 if (tx_fifo_num < 1)
7633 tx_fifo_num = 1;
7634 else
7635 tx_fifo_num = MAX_TX_FIFOS;
7636
7637 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7638 DBG_PRINT(ERR_DBG, "tx fifos\n");
7639 }
7640
7641 if (multiq)
7642 *dev_multiq = multiq;
7643
7644 if (tx_steering_type && (1 == tx_fifo_num)) {
7645 if (tx_steering_type != TX_DEFAULT_STEERING)
7646 DBG_PRINT(ERR_DBG,
7647 "s2io: Tx steering is not supported with "
7648 "one fifo. Disabling Tx steering.\n");
7649 tx_steering_type = NO_STEERING;
7650 }
7651
7652 if ((tx_steering_type < NO_STEERING) ||
7653 (tx_steering_type > TX_DEFAULT_STEERING)) {
7654 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7655 "supported\n");
7656 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7657 tx_steering_type = NO_STEERING;
7658 }
7659
7660 if (rx_ring_num > MAX_RX_RINGS) {
7661 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7662 "supported\n");
7663 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7664 MAX_RX_RINGS);
7665 rx_ring_num = MAX_RX_RINGS;
7666 }
7667
7668 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7669 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7670 "Defaulting to INTA\n");
7671 *dev_intr_type = INTA;
7672 }
7673
7674 if ((*dev_intr_type == MSI_X) &&
7675 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7676 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7677 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7678 "Defaulting to INTA\n");
7679 *dev_intr_type = INTA;
7680 }
7681
7682 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7683 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7684 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7685 rx_ring_mode = 1;
7686 }
7687 return SUCCESS;
7688}
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698
7699static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7700{
7701 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7702 register u64 val64 = 0;
7703
7704 if (ds_codepoint > 63)
7705 return FAILURE;
7706
7707 val64 = RTS_DS_MEM_DATA(ring);
7708 writeq(val64, &bar0->rts_ds_mem_data);
7709
7710 val64 = RTS_DS_MEM_CTRL_WE |
7711 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7712 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7713
7714 writeq(val64, &bar0->rts_ds_mem_ctrl);
7715
7716 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7717 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7718 S2IO_BIT_RESET);
7719}
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729
7730
7731
7732
7733
7734
7735static int __devinit
7736s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7737{
7738 struct s2io_nic *sp;
7739 struct net_device *dev;
7740 int i, j, ret;
7741 int dma_flag = FALSE;
7742 u32 mac_up, mac_down;
7743 u64 val64 = 0, tmp64 = 0;
7744 struct XENA_dev_config __iomem *bar0 = NULL;
7745 u16 subid;
7746 struct mac_info *mac_control;
7747 struct config_param *config;
7748 int mode;
7749 u8 dev_intr_type = intr_type;
7750 u8 dev_multiq = 0;
7751 DECLARE_MAC_BUF(mac);
7752
7753 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7754 if (ret)
7755 return ret;
7756
7757 if ((ret = pci_enable_device(pdev))) {
7758 DBG_PRINT(ERR_DBG,
7759 "s2io_init_nic: pci_enable_device failed\n");
7760 return ret;
7761 }
7762
7763 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7764 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7765 dma_flag = TRUE;
7766 if (pci_set_consistent_dma_mask
7767 (pdev, DMA_64BIT_MASK)) {
7768 DBG_PRINT(ERR_DBG,
7769 "Unable to obtain 64bit DMA for \
7770 consistent allocations\n");
7771 pci_disable_device(pdev);
7772 return -ENOMEM;
7773 }
7774 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7775 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7776 } else {
7777 pci_disable_device(pdev);
7778 return -ENOMEM;
7779 }
7780 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7781 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7782 pci_disable_device(pdev);
7783 return -ENODEV;
7784 }
7785 if (dev_multiq)
7786 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7787 else
7788 dev = alloc_etherdev(sizeof(struct s2io_nic));
7789 if (dev == NULL) {
7790 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7791 pci_disable_device(pdev);
7792 pci_release_regions(pdev);
7793 return -ENODEV;
7794 }
7795
7796 pci_set_master(pdev);
7797 pci_set_drvdata(pdev, dev);
7798 SET_NETDEV_DEV(dev, &pdev->dev);
7799
7800
7801 sp = dev->priv;
7802 memset(sp, 0, sizeof(struct s2io_nic));
7803 sp->dev = dev;
7804 sp->pdev = pdev;
7805 sp->high_dma_flag = dma_flag;
7806 sp->device_enabled_once = FALSE;
7807 if (rx_ring_mode == 1)
7808 sp->rxd_mode = RXD_MODE_1;
7809 if (rx_ring_mode == 2)
7810 sp->rxd_mode = RXD_MODE_3B;
7811
7812 sp->config.intr_type = dev_intr_type;
7813
7814 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7815 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7816 sp->device_type = XFRAME_II_DEVICE;
7817 else
7818 sp->device_type = XFRAME_I_DEVICE;
7819
7820 sp->lro = lro_enable;
7821
7822
7823 s2io_init_pci(sp);
7824
7825
7826
7827
7828
7829
7830
7831
7832 mac_control = &sp->mac_control;
7833 config = &sp->config;
7834
7835 config->napi = napi;
7836 config->tx_steering_type = tx_steering_type;
7837
7838
7839 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7840 config->tx_fifo_num = MAX_TX_FIFOS;
7841 else
7842 config->tx_fifo_num = tx_fifo_num;
7843
7844
7845 if (config->tx_fifo_num < 5) {
7846 if (config->tx_fifo_num == 1)
7847 sp->total_tcp_fifos = 1;
7848 else
7849 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7850 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7851 sp->total_udp_fifos = 1;
7852 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7853 } else {
7854 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7855 FIFO_OTHER_MAX_NUM);
7856 sp->udp_fifo_idx = sp->total_tcp_fifos;
7857 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7858 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7859 }
7860
7861 config->multiq = dev_multiq;
7862 for (i = 0; i < config->tx_fifo_num; i++) {
7863 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7864 config->tx_cfg[i].fifo_priority = i;
7865 }
7866
7867
7868 for (i = 0; i < MAX_TX_FIFOS; i++)
7869 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7870
7871
7872 for (i = 0; i < config->tx_fifo_num; i++)
7873 sp->fifo_selector[i] = fifo_selector[i];
7874
7875
7876 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7877 for (i = 0; i < config->tx_fifo_num; i++) {
7878 config->tx_cfg[i].f_no_snoop =
7879 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7880 if (config->tx_cfg[i].fifo_len < 65) {
7881 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7882 break;
7883 }
7884 }
7885
7886 config->max_txds = MAX_SKB_FRAGS + 2;
7887
7888
7889 config->rx_ring_num = rx_ring_num;
7890 for (i = 0; i < config->rx_ring_num; i++) {
7891 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7892 (rxd_count[sp->rxd_mode] + 1);
7893 config->rx_cfg[i].ring_priority = i;
7894 mac_control->rings[i].rx_bufs_left = 0;
7895 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7896 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7897 mac_control->rings[i].pdev = sp->pdev;
7898 mac_control->rings[i].dev = sp->dev;
7899 }
7900
7901 for (i = 0; i < rx_ring_num; i++) {
7902 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7903 config->rx_cfg[i].f_no_snoop =
7904 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7905 }
7906
7907
7908 mac_control->rmac_pause_time = rmac_pause_time;
7909 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7910 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7911
7912
7913
7914 if (init_shared_mem(sp)) {
7915 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7916 dev->name);
7917 ret = -ENOMEM;
7918 goto mem_alloc_failed;
7919 }
7920
7921 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7922 pci_resource_len(pdev, 0));
7923 if (!sp->bar0) {
7924 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7925 dev->name);
7926 ret = -ENOMEM;
7927 goto bar0_remap_failed;
7928 }
7929
7930 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7931 pci_resource_len(pdev, 2));
7932 if (!sp->bar1) {
7933 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7934 dev->name);
7935 ret = -ENOMEM;
7936 goto bar1_remap_failed;
7937 }
7938
7939 dev->irq = pdev->irq;
7940 dev->base_addr = (unsigned long) sp->bar0;
7941
7942
7943 for (j = 0; j < MAX_TX_FIFOS; j++) {
7944 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7945 (sp->bar1 + (j * 0x00020000));
7946 }
7947
7948
7949 dev->open = &s2io_open;
7950 dev->stop = &s2io_close;
7951 dev->hard_start_xmit = &s2io_xmit;
7952 dev->get_stats = &s2io_get_stats;
7953 dev->set_multicast_list = &s2io_set_multicast;
7954 dev->do_ioctl = &s2io_ioctl;
7955 dev->set_mac_address = &s2io_set_mac_addr;
7956 dev->change_mtu = &s2io_change_mtu;
7957 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7958 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7959 dev->vlan_rx_register = s2io_vlan_rx_register;
7960 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7961
7962
7963
7964
7965
7966#ifdef CONFIG_NET_POLL_CONTROLLER
7967 dev->poll_controller = s2io_netpoll;
7968#endif
7969
7970 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7971 if (sp->high_dma_flag == TRUE)
7972 dev->features |= NETIF_F_HIGHDMA;
7973 dev->features |= NETIF_F_TSO;
7974 dev->features |= NETIF_F_TSO6;
7975 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7976 dev->features |= NETIF_F_UFO;
7977 dev->features |= NETIF_F_HW_CSUM;
7978 }
7979 dev->tx_timeout = &s2io_tx_watchdog;
7980 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7981 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7982 INIT_WORK(&sp->set_link_task, s2io_set_link);
7983
7984 pci_save_state(sp->pdev);
7985
7986
7987 if (s2io_set_swapper(sp)) {
7988 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7989 dev->name);
7990 ret = -EAGAIN;
7991 goto set_swap_failed;
7992 }
7993
7994
7995 if (sp->device_type & XFRAME_II_DEVICE) {
7996 mode = s2io_verify_pci_mode(sp);
7997 if (mode < 0) {
7998 DBG_PRINT(ERR_DBG, "%s: ", __func__);
7999 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8000 ret = -EBADSLT;
8001 goto set_swap_failed;
8002 }
8003 }
8004
8005 if (sp->config.intr_type == MSI_X) {
8006 sp->num_entries = config->rx_ring_num + 1;
8007 ret = s2io_enable_msi_x(sp);
8008
8009 if (!ret) {
8010 ret = s2io_test_msi(sp);
8011
8012 remove_msix_isr(sp);
8013 }
8014 if (ret) {
8015
8016 DBG_PRINT(ERR_DBG,
8017 "%s: MSI-X requested but failed to enable\n",
8018 dev->name);
8019 sp->config.intr_type = INTA;
8020 }
8021 }
8022
8023 if (config->intr_type == MSI_X) {
8024 for (i = 0; i < config->rx_ring_num ; i++)
8025 netif_napi_add(dev, &mac_control->rings[i].napi,
8026 s2io_poll_msix, 64);
8027 } else {
8028 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8029 }
8030
8031
8032 if (sp->device_type & XFRAME_I_DEVICE) {
8033
8034
8035
8036
8037 fix_mac_address(sp);
8038 s2io_reset(sp);
8039 }
8040
8041
8042
8043
8044
8045 bar0 = sp->bar0;
8046 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8047 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8048 writeq(val64, &bar0->rmac_addr_cmd_mem);
8049 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8050 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8051 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8052 mac_down = (u32) tmp64;
8053 mac_up = (u32) (tmp64 >> 32);
8054
8055 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8056 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8057 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8058 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8059 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8060 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8061
8062
8063 dev->addr_len = ETH_ALEN;
8064 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8065 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8066
8067
8068 if (sp->device_type == XFRAME_I_DEVICE) {
8069 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8070 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8071 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8072 } else if (sp->device_type == XFRAME_II_DEVICE) {
8073 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8074 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8075 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8076 }
8077
8078
8079 do_s2io_store_unicast_mc(sp);
8080
8081
8082 if ((sp->device_type == XFRAME_II_DEVICE) &&
8083 (config->intr_type == MSI_X))
8084 sp->num_entries = config->rx_ring_num + 1;
8085
8086
8087 store_xmsi_data(sp);
8088
8089 s2io_reset(sp);
8090
8091
8092
8093
8094
8095 sp->state = 0;
8096
8097
8098 for (i = 0; i < sp->config.tx_fifo_num; i++)
8099 spin_lock_init(&mac_control->fifos[i].tx_lock);
8100
8101
8102
8103
8104
8105 subid = sp->pdev->subsystem_device;
8106 if ((subid & 0xFF) >= 0x07) {
8107 val64 = readq(&bar0->gpio_control);
8108 val64 |= 0x0000800000000000ULL;
8109 writeq(val64, &bar0->gpio_control);
8110 val64 = 0x0411040400000000ULL;
8111 writeq(val64, (void __iomem *) bar0 + 0x2700);
8112 val64 = readq(&bar0->gpio_control);
8113 }
8114
8115 sp->rx_csum = 1;
8116
8117 if (register_netdev(dev)) {
8118 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8119 ret = -ENODEV;
8120 goto register_failed;
8121 }
8122 s2io_vpd_read(sp);
8123 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8124 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8125 sp->product_name, pdev->revision);
8126 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8127 s2io_driver_version);
8128 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8129 dev->name, print_mac(mac, dev->dev_addr));
8130 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8131 if (sp->device_type & XFRAME_II_DEVICE) {
8132 mode = s2io_print_pci_mode(sp);
8133 if (mode < 0) {
8134 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8135 ret = -EBADSLT;
8136 unregister_netdev(dev);
8137 goto set_swap_failed;
8138 }
8139 }
8140 switch(sp->rxd_mode) {
8141 case RXD_MODE_1:
8142 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8143 dev->name);
8144 break;
8145 case RXD_MODE_3B:
8146 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8147 dev->name);
8148 break;
8149 }
8150
8151 switch (sp->config.napi) {
8152 case 0:
8153 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8154 break;
8155 case 1:
8156 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8157 break;
8158 }
8159
8160 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8161 sp->config.tx_fifo_num);
8162
8163 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8164 sp->config.rx_ring_num);
8165
8166 switch(sp->config.intr_type) {
8167 case INTA:
8168 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8169 break;
8170 case MSI_X:
8171 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8172 break;
8173 }
8174 if (sp->config.multiq) {
8175 for (i = 0; i < sp->config.tx_fifo_num; i++)
8176 mac_control->fifos[i].multiq = config->multiq;
8177 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8178 dev->name);
8179 } else
8180 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8181 dev->name);
8182
8183 switch (sp->config.tx_steering_type) {
8184 case NO_STEERING:
8185 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8186 " transmit\n", dev->name);
8187 break;
8188 case TX_PRIORITY_STEERING:
8189 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8190 " transmit\n", dev->name);
8191 break;
8192 case TX_DEFAULT_STEERING:
8193 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8194 " transmit\n", dev->name);
8195 }
8196
8197 if (sp->lro)
8198 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8199 dev->name);
8200 if (ufo)
8201 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8202 " enabled\n", dev->name);
8203
8204 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8205
8206 if (vlan_tag_strip)
8207 sp->vlan_strip_flag = 1;
8208 else
8209 sp->vlan_strip_flag = 0;
8210
8211
8212
8213
8214
8215
8216 netif_carrier_off(dev);
8217
8218 return 0;
8219
8220 register_failed:
8221 set_swap_failed:
8222 iounmap(sp->bar1);
8223 bar1_remap_failed:
8224 iounmap(sp->bar0);
8225 bar0_remap_failed:
8226 mem_alloc_failed:
8227 free_shared_mem(sp);
8228 pci_disable_device(pdev);
8229 pci_release_regions(pdev);
8230 pci_set_drvdata(pdev, NULL);
8231 free_netdev(dev);
8232
8233 return ret;
8234}
8235
8236
8237
8238
8239
8240
8241
8242
8243
8244
8245static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8246{
8247 struct net_device *dev =
8248 (struct net_device *) pci_get_drvdata(pdev);
8249 struct s2io_nic *sp;
8250
8251 if (dev == NULL) {
8252 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8253 return;
8254 }
8255
8256 flush_scheduled_work();
8257
8258 sp = dev->priv;
8259 unregister_netdev(dev);
8260
8261 free_shared_mem(sp);
8262 iounmap(sp->bar0);
8263 iounmap(sp->bar1);
8264 pci_release_regions(pdev);
8265 pci_set_drvdata(pdev, NULL);
8266 free_netdev(dev);
8267 pci_disable_device(pdev);
8268}
8269
8270
8271
8272
8273
8274
8275
8276static int __init s2io_starter(void)
8277{
8278 return pci_register_driver(&s2io_driver);
8279}
8280
8281
8282
8283
8284
8285
8286static __exit void s2io_closer(void)
8287{
8288 pci_unregister_driver(&s2io_driver);
8289 DBG_PRINT(INIT_DBG, "cleanup done\n");
8290}
8291
8292module_init(s2io_starter);
8293module_exit(s2io_closer);
8294
8295static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8296 struct tcphdr **tcp, struct RxD_t *rxdp,
8297 struct s2io_nic *sp)
8298{
8299 int ip_off;
8300 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8301
8302 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8303 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8304 __func__);
8305 return -1;
8306 }
8307
8308
8309 if ((l2_type == 0)
8310 || (l2_type == 4)) {
8311 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8312
8313
8314
8315
8316 if ((!sp->vlan_strip_flag) &&
8317 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8318 ip_off += HEADER_VLAN_SIZE;
8319 } else {
8320
8321 return -1;
8322 }
8323
8324 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8325 ip_len = (u8)((*ip)->ihl);
8326 ip_len <<= 2;
8327 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8328
8329 return 0;
8330}
8331
8332static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8333 struct tcphdr *tcp)
8334{
8335 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8336 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8337 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8338 return -1;
8339 return 0;
8340}
8341
8342static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8343{
8344 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8345}
8346
8347static void initiate_new_session(struct lro *lro, u8 *l2h,
8348 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8349{
8350 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8351 lro->l2h = l2h;
8352 lro->iph = ip;
8353 lro->tcph = tcp;
8354 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8355 lro->tcp_ack = tcp->ack_seq;
8356 lro->sg_num = 1;
8357 lro->total_len = ntohs(ip->tot_len);
8358 lro->frags_len = 0;
8359 lro->vlan_tag = vlan_tag;
8360
8361
8362
8363
8364 if (tcp->doff == 8) {
8365 __be32 *ptr;
8366 ptr = (__be32 *)(tcp+1);
8367 lro->saw_ts = 1;
8368 lro->cur_tsval = ntohl(*(ptr+1));
8369 lro->cur_tsecr = *(ptr+2);
8370 }
8371 lro->in_use = 1;
8372}
8373
8374static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8375{
8376 struct iphdr *ip = lro->iph;
8377 struct tcphdr *tcp = lro->tcph;
8378 __sum16 nchk;
8379 struct stat_block *statinfo = sp->mac_control.stats_info;
8380 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8381
8382
8383 ip->tot_len = htons(lro->total_len);
8384 ip->check = 0;
8385 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8386 ip->check = nchk;
8387
8388
8389 tcp->ack_seq = lro->tcp_ack;
8390 tcp->window = lro->window;
8391
8392
8393 if (lro->saw_ts) {
8394 __be32 *ptr = (__be32 *)(tcp + 1);
8395 *(ptr+2) = lro->cur_tsecr;
8396 }
8397
8398
8399
8400
8401 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8402 statinfo->sw_stat.num_aggregations++;
8403}
8404
8405static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8406 struct tcphdr *tcp, u32 l4_pyld)
8407{
8408 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8409 lro->total_len += l4_pyld;
8410 lro->frags_len += l4_pyld;
8411 lro->tcp_next_seq += l4_pyld;
8412 lro->sg_num++;
8413
8414
8415 lro->tcp_ack = tcp->ack_seq;
8416 lro->window = tcp->window;
8417
8418 if (lro->saw_ts) {
8419 __be32 *ptr;
8420
8421 ptr = (__be32 *)(tcp+1);
8422 lro->cur_tsval = ntohl(*(ptr+1));
8423 lro->cur_tsecr = *(ptr + 2);
8424 }
8425}
8426
8427static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8428 struct tcphdr *tcp, u32 tcp_pyld_len)
8429{
8430 u8 *ptr;
8431
8432 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8433
8434 if (!tcp_pyld_len) {
8435
8436 return -1;
8437 }
8438
8439 if (ip->ihl != 5)
8440 return -1;
8441
8442
8443 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8444 return -1;
8445
8446
8447 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8448 tcp->ece || tcp->cwr || !tcp->ack) {
8449
8450
8451
8452
8453
8454 return -1;
8455 }
8456
8457
8458
8459
8460
8461 if (tcp->doff != 5 && tcp->doff != 8)
8462 return -1;
8463
8464 if (tcp->doff == 8) {
8465 ptr = (u8 *)(tcp + 1);
8466 while (*ptr == TCPOPT_NOP)
8467 ptr++;
8468 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8469 return -1;
8470
8471
8472 if (l_lro)
8473 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8474 return -1;
8475
8476
8477 if (*((__be32 *)(ptr+6)) == 0)
8478 return -1;
8479 }
8480
8481 return 0;
8482}
8483
8484static int
8485s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8486 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8487 struct s2io_nic *sp)
8488{
8489 struct iphdr *ip;
8490 struct tcphdr *tcph;
8491 int ret = 0, i;
8492 u16 vlan_tag = 0;
8493
8494 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8495 rxdp, sp))) {
8496 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8497 ip->saddr, ip->daddr);
8498 } else
8499 return ret;
8500
8501 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8502 tcph = (struct tcphdr *)*tcp;
8503 *tcp_len = get_l4_pyld_length(ip, tcph);
8504 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8505 struct lro *l_lro = &ring_data->lro0_n[i];
8506 if (l_lro->in_use) {
8507 if (check_for_socket_match(l_lro, ip, tcph))
8508 continue;
8509
8510 *lro = l_lro;
8511
8512 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8513 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8514 "0x%x, actual 0x%x\n", __func__,
8515 (*lro)->tcp_next_seq,
8516 ntohl(tcph->seq));
8517
8518 sp->mac_control.stats_info->
8519 sw_stat.outof_sequence_pkts++;
8520 ret = 2;
8521 break;
8522 }
8523
8524 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8525 ret = 1;
8526 else
8527 ret = 2;
8528 break;
8529 }
8530 }
8531
8532 if (ret == 0) {
8533
8534
8535
8536
8537
8538 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8539 return 5;
8540 }
8541
8542 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8543 struct lro *l_lro = &ring_data->lro0_n[i];
8544 if (!(l_lro->in_use)) {
8545 *lro = l_lro;
8546 ret = 3;
8547 break;
8548 }
8549 }
8550 }
8551
8552 if (ret == 0) {
8553 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8554 __func__);
8555 *lro = NULL;
8556 return ret;
8557 }
8558
8559 switch (ret) {
8560 case 3:
8561 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8562 vlan_tag);
8563 break;
8564 case 2:
8565 update_L3L4_header(sp, *lro);
8566 break;
8567 case 1:
8568 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8569 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8570 update_L3L4_header(sp, *lro);
8571 ret = 4;
8572 }
8573 break;
8574 default:
8575 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8576 __func__);
8577 break;
8578 }
8579
8580 return ret;
8581}
8582
8583static void clear_lro_session(struct lro *lro)
8584{
8585 static u16 lro_struct_size = sizeof(struct lro);
8586
8587 memset(lro, 0, lro_struct_size);
8588}
8589
8590static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8591{
8592 struct net_device *dev = skb->dev;
8593 struct s2io_nic *sp = dev->priv;
8594
8595 skb->protocol = eth_type_trans(skb, dev);
8596 if (sp->vlgrp && vlan_tag
8597 && (sp->vlan_strip_flag)) {
8598
8599 if (sp->config.napi)
8600 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8601 else
8602 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8603 } else {
8604 if (sp->config.napi)
8605 netif_receive_skb(skb);
8606 else
8607 netif_rx(skb);
8608 }
8609}
8610
8611static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8612 struct sk_buff *skb,
8613 u32 tcp_len)
8614{
8615 struct sk_buff *first = lro->parent;
8616
8617 first->len += tcp_len;
8618 first->data_len = lro->frags_len;
8619 skb_pull(skb, (skb->len - tcp_len));
8620 if (skb_shinfo(first)->frag_list)
8621 lro->last_frag->next = skb;
8622 else
8623 skb_shinfo(first)->frag_list = skb;
8624 first->truesize += skb->truesize;
8625 lro->last_frag = skb;
8626 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8627 return;
8628}
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8639 pci_channel_state_t state)
8640{
8641 struct net_device *netdev = pci_get_drvdata(pdev);
8642 struct s2io_nic *sp = netdev->priv;
8643
8644 netif_device_detach(netdev);
8645
8646 if (netif_running(netdev)) {
8647
8648 do_s2io_card_down(sp, 0);
8649 }
8650 pci_disable_device(pdev);
8651
8652 return PCI_ERS_RESULT_NEED_RESET;
8653}
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8665{
8666 struct net_device *netdev = pci_get_drvdata(pdev);
8667 struct s2io_nic *sp = netdev->priv;
8668
8669 if (pci_enable_device(pdev)) {
8670 printk(KERN_ERR "s2io: "
8671 "Cannot re-enable PCI device after reset.\n");
8672 return PCI_ERS_RESULT_DISCONNECT;
8673 }
8674
8675 pci_set_master(pdev);
8676 s2io_reset(sp);
8677
8678 return PCI_ERS_RESULT_RECOVERED;
8679}
8680
8681
8682
8683
8684
8685
8686
8687
8688static void s2io_io_resume(struct pci_dev *pdev)
8689{
8690 struct net_device *netdev = pci_get_drvdata(pdev);
8691 struct s2io_nic *sp = netdev->priv;
8692
8693 if (netif_running(netdev)) {
8694 if (s2io_card_up(sp)) {
8695 printk(KERN_ERR "s2io: "
8696 "Can't bring device back up after reset.\n");
8697 return;
8698 }
8699
8700 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8701 s2io_card_down(sp);
8702 printk(KERN_ERR "s2io: "
8703 "Can't resetore mac addr after reset.\n");
8704 return;
8705 }
8706 }
8707
8708 netif_device_attach(netdev);
8709 netif_tx_wake_all_queues(netdev);
8710}