Showing error 924

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: drivers/net/qlge/qlge_main.c
Line in file: 3541
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Undetermined 1
Entered: 2012-03-02 21:35:17 UTC


Source:

   1/*
   2 * QLogic qlge NIC HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 * See LICENSE.qlge for copyright and licensing details.
   5 * Author:     Linux qlge network device driver by
   6 *                      Ron Mercer <ron.mercer@qlogic.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/dmapool.h>
  19#include <linux/mempool.h>
  20#include <linux/spinlock.h>
  21#include <linux/kthread.h>
  22#include <linux/interrupt.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/in.h>
  26#include <linux/ip.h>
  27#include <linux/ipv6.h>
  28#include <net/ipv6.h>
  29#include <linux/tcp.h>
  30#include <linux/udp.h>
  31#include <linux/if_arp.h>
  32#include <linux/if_ether.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/ethtool.h>
  36#include <linux/skbuff.h>
  37#include <linux/rtnetlink.h>
  38#include <linux/if_vlan.h>
  39#include <linux/delay.h>
  40#include <linux/mm.h>
  41#include <linux/vmalloc.h>
  42#include <net/ip6_checksum.h>
  43
  44#include "qlge.h"
  45
  46char qlge_driver_name[] = DRV_NAME;
  47const char qlge_driver_version[] = DRV_VERSION;
  48
  49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  50MODULE_DESCRIPTION(DRV_STRING " ");
  51MODULE_LICENSE("GPL");
  52MODULE_VERSION(DRV_VERSION);
  53
  54static const u32 default_msg =
  55    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  56/* NETIF_MSG_TIMER |        */
  57    NETIF_MSG_IFDOWN |
  58    NETIF_MSG_IFUP |
  59    NETIF_MSG_RX_ERR |
  60    NETIF_MSG_TX_ERR |
  61    NETIF_MSG_TX_QUEUED |
  62    NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
  63/* NETIF_MSG_PKTDATA | */
  64    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  65
  66static int debug = 0x00007fff;        /* defaults above */
  67module_param(debug, int, 0);
  68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  69
  70#define MSIX_IRQ 0
  71#define MSI_IRQ 1
  72#define LEG_IRQ 2
  73static int irq_type = MSIX_IRQ;
  74module_param(irq_type, int, MSIX_IRQ);
  75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  76
  77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
  78        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
  79        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
  80        /* required last entry */
  81        {0,}
  82};
  83
  84MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  85
  86/* This hardware semaphore causes exclusive access to
  87 * resources shared between the NIC driver, MPI firmware,
  88 * FCOE firmware and the FC driver.
  89 */
  90static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  91{
  92        u32 sem_bits = 0;
  93
  94        switch (sem_mask) {
  95        case SEM_XGMAC0_MASK:
  96                sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  97                break;
  98        case SEM_XGMAC1_MASK:
  99                sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
 100                break;
 101        case SEM_ICB_MASK:
 102                sem_bits = SEM_SET << SEM_ICB_SHIFT;
 103                break;
 104        case SEM_MAC_ADDR_MASK:
 105                sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
 106                break;
 107        case SEM_FLASH_MASK:
 108                sem_bits = SEM_SET << SEM_FLASH_SHIFT;
 109                break;
 110        case SEM_PROBE_MASK:
 111                sem_bits = SEM_SET << SEM_PROBE_SHIFT;
 112                break;
 113        case SEM_RT_IDX_MASK:
 114                sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
 115                break;
 116        case SEM_PROC_REG_MASK:
 117                sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
 118                break;
 119        default:
 120                QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
 121                return -EINVAL;
 122        }
 123
 124        ql_write32(qdev, SEM, sem_bits | sem_mask);
 125        return !(ql_read32(qdev, SEM) & sem_bits);
 126}
 127
 128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 129{
 130        unsigned int seconds = 3;
 131        do {
 132                if (!ql_sem_trylock(qdev, sem_mask))
 133                        return 0;
 134                ssleep(1);
 135        } while (--seconds);
 136        return -ETIMEDOUT;
 137}
 138
 139void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
 140{
 141        ql_write32(qdev, SEM, sem_mask);
 142        ql_read32(qdev, SEM);        /* flush */
 143}
 144
 145/* This function waits for a specific bit to come ready
 146 * in a given register.  It is used mostly by the initialize
 147 * process, but is also used in kernel thread API such as
 148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
 149 */
 150int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 151{
 152        u32 temp;
 153        int count = UDELAY_COUNT;
 154
 155        while (count) {
 156                temp = ql_read32(qdev, reg);
 157
 158                /* check for errors */
 159                if (temp & err_bit) {
 160                        QPRINTK(qdev, PROBE, ALERT,
 161                                "register 0x%.08x access error, value = 0x%.08x!.\n",
 162                                reg, temp);
 163                        return -EIO;
 164                } else if (temp & bit)
 165                        return 0;
 166                udelay(UDELAY_DELAY);
 167                count--;
 168        }
 169        QPRINTK(qdev, PROBE, ALERT,
 170                "Timed out waiting for reg %x to come ready.\n", reg);
 171        return -ETIMEDOUT;
 172}
 173
 174/* The CFG register is used to download TX and RX control blocks
 175 * to the chip. This function waits for an operation to complete.
 176 */
 177static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 178{
 179        int count = UDELAY_COUNT;
 180        u32 temp;
 181
 182        while (count) {
 183                temp = ql_read32(qdev, CFG);
 184                if (temp & CFG_LE)
 185                        return -EIO;
 186                if (!(temp & bit))
 187                        return 0;
 188                udelay(UDELAY_DELAY);
 189                count--;
 190        }
 191        return -ETIMEDOUT;
 192}
 193
 194
 195/* Used to issue init control blocks to hw. Maps control block,
 196 * sets address, triggers download, waits for completion.
 197 */
 198int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
 199                 u16 q_id)
 200{
 201        u64 map;
 202        int status = 0;
 203        int direction;
 204        u32 mask;
 205        u32 value;
 206
 207        direction =
 208            (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
 209            PCI_DMA_FROMDEVICE;
 210
 211        map = pci_map_single(qdev->pdev, ptr, size, direction);
 212        if (pci_dma_mapping_error(qdev->pdev, map)) {
 213                QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
 214                return -ENOMEM;
 215        }
 216
 217        status = ql_wait_cfg(qdev, bit);
 218        if (status) {
 219                QPRINTK(qdev, IFUP, ERR,
 220                        "Timed out waiting for CFG to come ready.\n");
 221                goto exit;
 222        }
 223
 224        status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
 225        if (status)
 226                goto exit;
 227        ql_write32(qdev, ICB_L, (u32) map);
 228        ql_write32(qdev, ICB_H, (u32) (map >> 32));
 229        ql_sem_unlock(qdev, SEM_ICB_MASK);        /* does flush too */
 230
 231        mask = CFG_Q_MASK | (bit << 16);
 232        value = bit | (q_id << CFG_Q_SHIFT);
 233        ql_write32(qdev, CFG, (mask | value));
 234
 235        /*
 236         * Wait for the bit to clear after signaling hw.
 237         */
 238        status = ql_wait_cfg(qdev, bit);
 239exit:
 240        pci_unmap_single(qdev->pdev, map, size, direction);
 241        return status;
 242}
 243
 244/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
 245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 246                        u32 *value)
 247{
 248        u32 offset = 0;
 249        int status;
 250
 251        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 252        if (status)
 253                return status;
 254        switch (type) {
 255        case MAC_ADDR_TYPE_MULTI_MAC:
 256        case MAC_ADDR_TYPE_CAM_MAC:
 257                {
 258                        status =
 259                            ql_wait_reg_rdy(qdev,
 260                                MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 261                        if (status)
 262                                goto exit;
 263                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 264                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 265                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 266                        status =
 267                            ql_wait_reg_rdy(qdev,
 268                                MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
 269                        if (status)
 270                                goto exit;
 271                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 272                        status =
 273                            ql_wait_reg_rdy(qdev,
 274                                MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 275                        if (status)
 276                                goto exit;
 277                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 278                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 279                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 280                        status =
 281                            ql_wait_reg_rdy(qdev,
 282                                MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
 283                        if (status)
 284                                goto exit;
 285                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 286                        if (type == MAC_ADDR_TYPE_CAM_MAC) {
 287                                status =
 288                                    ql_wait_reg_rdy(qdev,
 289                                        MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 290                                if (status)
 291                                        goto exit;
 292                                ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 293                                           (index << MAC_ADDR_IDX_SHIFT) | /* index */
 294                                           MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 295                                status =
 296                                    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
 297                                                    MAC_ADDR_MR, MAC_ADDR_E);
 298                                if (status)
 299                                        goto exit;
 300                                *value++ = ql_read32(qdev, MAC_ADDR_DATA);
 301                        }
 302                        break;
 303                }
 304        case MAC_ADDR_TYPE_VLAN:
 305        case MAC_ADDR_TYPE_MULTI_FLTR:
 306        default:
 307                QPRINTK(qdev, IFUP, CRIT,
 308                        "Address type %d not yet supported.\n", type);
 309                status = -EPERM;
 310        }
 311exit:
 312        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 313        return status;
 314}
 315
 316/* Set up a MAC, multicast or VLAN address for the
 317 * inbound frame matching.
 318 */
 319static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 320                               u16 index)
 321{
 322        u32 offset = 0;
 323        int status = 0;
 324
 325        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 326        if (status)
 327                return status;
 328        switch (type) {
 329        case MAC_ADDR_TYPE_MULTI_MAC:
 330        case MAC_ADDR_TYPE_CAM_MAC:
 331                {
 332                        u32 cam_output;
 333                        u32 upper = (addr[0] << 8) | addr[1];
 334                        u32 lower =
 335                            (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 336                            (addr[5]);
 337
 338                        QPRINTK(qdev, IFUP, INFO,
 339                                "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
 340                                " at index %d in the CAM.\n",
 341                                ((type ==
 342                                  MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
 343                                 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
 344                                addr[4], addr[5], index);
 345
 346                        status =
 347                            ql_wait_reg_rdy(qdev,
 348                                MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 349                        if (status)
 350                                goto exit;
 351                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 352                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 353                                   type);        /* type */
 354                        ql_write32(qdev, MAC_ADDR_DATA, lower);
 355                        status =
 356                            ql_wait_reg_rdy(qdev,
 357                                MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 358                        if (status)
 359                                goto exit;
 360                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 361                                   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 362                                   type);        /* type */
 363                        ql_write32(qdev, MAC_ADDR_DATA, upper);
 364                        status =
 365                            ql_wait_reg_rdy(qdev,
 366                                MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 367                        if (status)
 368                                goto exit;
 369                        ql_write32(qdev, MAC_ADDR_IDX, (offset) |        /* offset */
 370                                   (index << MAC_ADDR_IDX_SHIFT) |        /* index */
 371                                   type);        /* type */
 372                        /* This field should also include the queue id
 373                           and possibly the function id.  Right now we hardcode
 374                           the route field to NIC core.
 375                         */
 376                        if (type == MAC_ADDR_TYPE_CAM_MAC) {
 377                                cam_output = (CAM_OUT_ROUTE_NIC |
 378                                              (qdev->
 379                                               func << CAM_OUT_FUNC_SHIFT) |
 380                                              (qdev->
 381                                               rss_ring_first_cq_id <<
 382                                               CAM_OUT_CQ_ID_SHIFT));
 383                                if (qdev->vlgrp)
 384                                        cam_output |= CAM_OUT_RV;
 385                                /* route to NIC core */
 386                                ql_write32(qdev, MAC_ADDR_DATA, cam_output);
 387                        }
 388                        break;
 389                }
 390        case MAC_ADDR_TYPE_VLAN:
 391                {
 392                        u32 enable_bit = *((u32 *) &addr[0]);
 393                        /* For VLAN, the addr actually holds a bit that
 394                         * either enables or disables the vlan id we are
 395                         * addressing. It's either MAC_ADDR_E on or off.
 396                         * That's bit-27 we're talking about.
 397                         */
 398                        QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
 399                                (enable_bit ? "Adding" : "Removing"),
 400                                index, (enable_bit ? "to" : "from"));
 401
 402                        status =
 403                            ql_wait_reg_rdy(qdev,
 404                                MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
 405                        if (status)
 406                                goto exit;
 407                        ql_write32(qdev, MAC_ADDR_IDX, offset |        /* offset */
 408                                   (index << MAC_ADDR_IDX_SHIFT) |        /* index */
 409                                   type |        /* type */
 410                                   enable_bit);        /* enable/disable */
 411                        break;
 412                }
 413        case MAC_ADDR_TYPE_MULTI_FLTR:
 414        default:
 415                QPRINTK(qdev, IFUP, CRIT,
 416                        "Address type %d not yet supported.\n", type);
 417                status = -EPERM;
 418        }
 419exit:
 420        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 421        return status;
 422}
 423
 424/* Get a specific frame routing value from the CAM.
 425 * Used for debug and reg dump.
 426 */
 427int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 428{
 429        int status = 0;
 430
 431        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
 432        if (status)
 433                goto exit;
 434
 435        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
 436        if (status)
 437                goto exit;
 438
 439        ql_write32(qdev, RT_IDX,
 440                   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
 441        status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
 442        if (status)
 443                goto exit;
 444        *value = ql_read32(qdev, RT_DATA);
 445exit:
 446        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
 447        return status;
 448}
 449
 450/* The NIC function for this chip has 16 routing indexes.  Each one can be used
 451 * to route different frame types to various inbound queues.  We send broadcast/
 452 * multicast/error frames to the default queue for slow handling,
 453 * and CAM hit/RSS frames to the fast handling queues.
 454 */
 455static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
 456                              int enable)
 457{
 458        int status;
 459        u32 value = 0;
 460
 461        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
 462        if (status)
 463                return status;
 464
 465        QPRINTK(qdev, IFUP, DEBUG,
 466                "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
 467                (enable ? "Adding" : "Removing"),
 468                ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
 469                ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
 470                ((index ==
 471                  RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
 472                ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
 473                ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
 474                ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
 475                ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
 476                ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
 477                ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
 478                ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
 479                ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
 480                ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
 481                ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
 482                ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
 483                ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
 484                ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
 485                (enable ? "to" : "from"));
 486
 487        switch (mask) {
 488        case RT_IDX_CAM_HIT:
 489                {
 490                        value = RT_IDX_DST_CAM_Q |        /* dest */
 491                            RT_IDX_TYPE_NICQ |        /* type */
 492                            (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
 493                        break;
 494                }
 495        case RT_IDX_VALID:        /* Promiscuous Mode frames. */
 496                {
 497                        value = RT_IDX_DST_DFLT_Q |        /* dest */
 498                            RT_IDX_TYPE_NICQ |        /* type */
 499                            (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
 500                        break;
 501                }
 502        case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
 503                {
 504                        value = RT_IDX_DST_DFLT_Q |        /* dest */
 505                            RT_IDX_TYPE_NICQ |        /* type */
 506                            (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 507                        break;
 508                }
 509        case RT_IDX_BCAST:        /* Pass up Broadcast frames to default Q. */
 510                {
 511                        value = RT_IDX_DST_DFLT_Q |        /* dest */
 512                            RT_IDX_TYPE_NICQ |        /* type */
 513                            (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
 514                        break;
 515                }
 516        case RT_IDX_MCAST:        /* Pass up All Multicast frames. */
 517                {
 518                        value = RT_IDX_DST_CAM_Q |        /* dest */
 519                            RT_IDX_TYPE_NICQ |        /* type */
 520                            (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
 521                        break;
 522                }
 523        case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
 524                {
 525                        value = RT_IDX_DST_CAM_Q |        /* dest */
 526                            RT_IDX_TYPE_NICQ |        /* type */
 527                            (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 528                        break;
 529                }
 530        case RT_IDX_RSS_MATCH:        /* Pass up matched RSS frames. */
 531                {
 532                        value = RT_IDX_DST_RSS |        /* dest */
 533                            RT_IDX_TYPE_NICQ |        /* type */
 534                            (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 535                        break;
 536                }
 537        case 0:                /* Clear the E-bit on an entry. */
 538                {
 539                        value = RT_IDX_DST_DFLT_Q |        /* dest */
 540                            RT_IDX_TYPE_NICQ |        /* type */
 541                            (index << RT_IDX_IDX_SHIFT);/* index */
 542                        break;
 543                }
 544        default:
 545                QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
 546                        mask);
 547                status = -EPERM;
 548                goto exit;
 549        }
 550
 551        if (value) {
 552                status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 553                if (status)
 554                        goto exit;
 555                value |= (enable ? RT_IDX_E : 0);
 556                ql_write32(qdev, RT_IDX, value);
 557                ql_write32(qdev, RT_DATA, enable ? mask : 0);
 558        }
 559exit:
 560        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
 561        return status;
 562}
 563
 564static void ql_enable_interrupts(struct ql_adapter *qdev)
 565{
 566        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
 567}
 568
 569static void ql_disable_interrupts(struct ql_adapter *qdev)
 570{
 571        ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 572}
 573
 574/* If we're running with multiple MSI-X vectors then we enable on the fly.
 575 * Otherwise, we may have multiple outstanding workers and don't want to
 576 * enable until the last one finishes. In this case, the irq_cnt gets
 577 * incremented everytime we queue a worker and decremented everytime
 578 * a worker finishes.  Once it hits zero we enable the interrupt.
 579 */
 580u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 581{
 582        u32 var = 0;
 583        unsigned long hw_flags = 0;
 584        struct intr_context *ctx = qdev->intr_context + intr;
 585
 586        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
 587                /* Always enable if we're MSIX multi interrupts and
 588                 * it's not the default (zeroeth) interrupt.
 589                 */
 590                ql_write32(qdev, INTR_EN,
 591                           ctx->intr_en_mask);
 592                var = ql_read32(qdev, STS);
 593                return var;
 594        }
 595
 596        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 597        if (atomic_dec_and_test(&ctx->irq_cnt)) {
 598                ql_write32(qdev, INTR_EN,
 599                           ctx->intr_en_mask);
 600                var = ql_read32(qdev, STS);
 601        }
 602        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 603        return var;
 604}
 605
 606static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 607{
 608        u32 var = 0;
 609        unsigned long hw_flags;
 610        struct intr_context *ctx;
 611
 612        /* HW disables for us if we're MSIX multi interrupts and
 613         * it's not the default (zeroeth) interrupt.
 614         */
 615        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
 616                return 0;
 617
 618        ctx = qdev->intr_context + intr;
 619        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 620        if (!atomic_read(&ctx->irq_cnt)) {
 621                ql_write32(qdev, INTR_EN,
 622                ctx->intr_dis_mask);
 623                var = ql_read32(qdev, STS);
 624        }
 625        atomic_inc(&ctx->irq_cnt);
 626        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 627        return var;
 628}
 629
 630static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 631{
 632        int i;
 633        for (i = 0; i < qdev->intr_count; i++) {
 634                /* The enable call does a atomic_dec_and_test
 635                 * and enables only if the result is zero.
 636                 * So we precharge it here.
 637                 */
 638                if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
 639                        i == 0))
 640                        atomic_set(&qdev->intr_context[i].irq_cnt, 1);
 641                ql_enable_completion_interrupt(qdev, i);
 642        }
 643
 644}
 645
 646int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
 647{
 648        int status = 0;
 649        /* wait for reg to come ready */
 650        status = ql_wait_reg_rdy(qdev,
 651                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 652        if (status)
 653                goto exit;
 654        /* set up for reg read */
 655        ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 656        /* wait for reg to come ready */
 657        status = ql_wait_reg_rdy(qdev,
 658                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 659        if (status)
 660                goto exit;
 661        /* get the data */
 662        *data = ql_read32(qdev, FLASH_DATA);
 663exit:
 664        return status;
 665}
 666
 667static int ql_get_flash_params(struct ql_adapter *qdev)
 668{
 669        int i;
 670        int status;
 671        u32 *p = (u32 *)&qdev->flash;
 672
 673        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 674                return -ETIMEDOUT;
 675
 676        for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
 677                status = ql_read_flash_word(qdev, i, p);
 678                if (status) {
 679                        QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
 680                        goto exit;
 681                }
 682
 683        }
 684exit:
 685        ql_sem_unlock(qdev, SEM_FLASH_MASK);
 686        return status;
 687}
 688
 689/* xgmac register are located behind the xgmac_addr and xgmac_data
 690 * register pair.  Each read/write requires us to wait for the ready
 691 * bit before reading/writing the data.
 692 */
 693static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 694{
 695        int status;
 696        /* wait for reg to come ready */
 697        status = ql_wait_reg_rdy(qdev,
 698                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 699        if (status)
 700                return status;
 701        /* write the data to the data reg */
 702        ql_write32(qdev, XGMAC_DATA, data);
 703        /* trigger the write */
 704        ql_write32(qdev, XGMAC_ADDR, reg);
 705        return status;
 706}
 707
 708/* xgmac register are located behind the xgmac_addr and xgmac_data
 709 * register pair.  Each read/write requires us to wait for the ready
 710 * bit before reading/writing the data.
 711 */
 712int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 713{
 714        int status = 0;
 715        /* wait for reg to come ready */
 716        status = ql_wait_reg_rdy(qdev,
 717                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 718        if (status)
 719                goto exit;
 720        /* set up for reg read */
 721        ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 722        /* wait for reg to come ready */
 723        status = ql_wait_reg_rdy(qdev,
 724                        XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 725        if (status)
 726                goto exit;
 727        /* get the data */
 728        *data = ql_read32(qdev, XGMAC_DATA);
 729exit:
 730        return status;
 731}
 732
 733/* This is used for reading the 64-bit statistics regs. */
 734int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
 735{
 736        int status = 0;
 737        u32 hi = 0;
 738        u32 lo = 0;
 739
 740        status = ql_read_xgmac_reg(qdev, reg, &lo);
 741        if (status)
 742                goto exit;
 743
 744        status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
 745        if (status)
 746                goto exit;
 747
 748        *data = (u64) lo | ((u64) hi << 32);
 749
 750exit:
 751        return status;
 752}
 753
 754/* Take the MAC Core out of reset.
 755 * Enable statistics counting.
 756 * Take the transmitter/receiver out of reset.
 757 * This functionality may be done in the MPI firmware at a
 758 * later date.
 759 */
 760static int ql_port_initialize(struct ql_adapter *qdev)
 761{
 762        int status = 0;
 763        u32 data;
 764
 765        if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
 766                /* Another function has the semaphore, so
 767                 * wait for the port init bit to come ready.
 768                 */
 769                QPRINTK(qdev, LINK, INFO,
 770                        "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 771                status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
 772                if (status) {
 773                        QPRINTK(qdev, LINK, CRIT,
 774                                "Port initialize timed out.\n");
 775                }
 776                return status;
 777        }
 778
 779        QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
 780        /* Set the core reset. */
 781        status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
 782        if (status)
 783                goto end;
 784        data |= GLOBAL_CFG_RESET;
 785        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 786        if (status)
 787                goto end;
 788
 789        /* Clear the core reset and turn on jumbo for receiver. */
 790        data &= ~GLOBAL_CFG_RESET;        /* Clear core reset. */
 791        data |= GLOBAL_CFG_JUMBO;        /* Turn on jumbo. */
 792        data |= GLOBAL_CFG_TX_STAT_EN;
 793        data |= GLOBAL_CFG_RX_STAT_EN;
 794        status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 795        if (status)
 796                goto end;
 797
 798        /* Enable transmitter, and clear it's reset. */
 799        status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
 800        if (status)
 801                goto end;
 802        data &= ~TX_CFG_RESET;        /* Clear the TX MAC reset. */
 803        data |= TX_CFG_EN;        /* Enable the transmitter. */
 804        status = ql_write_xgmac_reg(qdev, TX_CFG, data);
 805        if (status)
 806                goto end;
 807
 808        /* Enable receiver and clear it's reset. */
 809        status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
 810        if (status)
 811                goto end;
 812        data &= ~RX_CFG_RESET;        /* Clear the RX MAC reset. */
 813        data |= RX_CFG_EN;        /* Enable the receiver. */
 814        status = ql_write_xgmac_reg(qdev, RX_CFG, data);
 815        if (status)
 816                goto end;
 817
 818        /* Turn on jumbo. */
 819        status =
 820            ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
 821        if (status)
 822                goto end;
 823        status =
 824            ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
 825        if (status)
 826                goto end;
 827
 828        /* Signal to the world that the port is enabled.        */
 829        ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
 830end:
 831        ql_sem_unlock(qdev, qdev->xg_sem_mask);
 832        return status;
 833}
 834
 835/* Get the next large buffer. */
 836struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
 837{
 838        struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
 839        rx_ring->lbq_curr_idx++;
 840        if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
 841                rx_ring->lbq_curr_idx = 0;
 842        rx_ring->lbq_free_cnt++;
 843        return lbq_desc;
 844}
 845
 846/* Get the next small buffer. */
 847struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
 848{
 849        struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
 850        rx_ring->sbq_curr_idx++;
 851        if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
 852                rx_ring->sbq_curr_idx = 0;
 853        rx_ring->sbq_free_cnt++;
 854        return sbq_desc;
 855}
 856
 857/* Update an rx ring index. */
 858static void ql_update_cq(struct rx_ring *rx_ring)
 859{
 860        rx_ring->cnsmr_idx++;
 861        rx_ring->curr_entry++;
 862        if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
 863                rx_ring->cnsmr_idx = 0;
 864                rx_ring->curr_entry = rx_ring->cq_base;
 865        }
 866}
 867
 868static void ql_write_cq_idx(struct rx_ring *rx_ring)
 869{
 870        ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
 871}
 872
 873/* Process (refill) a large buffer queue. */
 874static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 875{
 876        int clean_idx = rx_ring->lbq_clean_idx;
 877        struct bq_desc *lbq_desc;
 878        struct bq_element *bq;
 879        u64 map;
 880        int i;
 881
 882        while (rx_ring->lbq_free_cnt > 16) {
 883                for (i = 0; i < 16; i++) {
 884                        QPRINTK(qdev, RX_STATUS, DEBUG,
 885                                "lbq: try cleaning clean_idx = %d.\n",
 886                                clean_idx);
 887                        lbq_desc = &rx_ring->lbq[clean_idx];
 888                        bq = lbq_desc->bq;
 889                        if (lbq_desc->p.lbq_page == NULL) {
 890                                QPRINTK(qdev, RX_STATUS, DEBUG,
 891                                        "lbq: getting new page for index %d.\n",
 892                                        lbq_desc->index);
 893                                lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
 894                                if (lbq_desc->p.lbq_page == NULL) {
 895                                        QPRINTK(qdev, RX_STATUS, ERR,
 896                                                "Couldn't get a page.\n");
 897                                        return;
 898                                }
 899                                map = pci_map_page(qdev->pdev,
 900                                                   lbq_desc->p.lbq_page,
 901                                                   0, PAGE_SIZE,
 902                                                   PCI_DMA_FROMDEVICE);
 903                                if (pci_dma_mapping_error(qdev->pdev, map)) {
 904                                        QPRINTK(qdev, RX_STATUS, ERR,
 905                                                "PCI mapping failed.\n");
 906                                        return;
 907                                }
 908                                pci_unmap_addr_set(lbq_desc, mapaddr, map);
 909                                pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
 910                                bq->addr_lo =        /*lbq_desc->addr_lo = */
 911                                    cpu_to_le32(map);
 912                                bq->addr_hi =        /*lbq_desc->addr_hi = */
 913                                    cpu_to_le32(map >> 32);
 914                        }
 915                        clean_idx++;
 916                        if (clean_idx == rx_ring->lbq_len)
 917                                clean_idx = 0;
 918                }
 919
 920                rx_ring->lbq_clean_idx = clean_idx;
 921                rx_ring->lbq_prod_idx += 16;
 922                if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
 923                        rx_ring->lbq_prod_idx = 0;
 924                QPRINTK(qdev, RX_STATUS, DEBUG,
 925                        "lbq: updating prod idx = %d.\n",
 926                        rx_ring->lbq_prod_idx);
 927                ql_write_db_reg(rx_ring->lbq_prod_idx,
 928                                rx_ring->lbq_prod_idx_db_reg);
 929                rx_ring->lbq_free_cnt -= 16;
 930        }
 931}
 932
 933/* Process (refill) a small buffer queue. */
 934static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 935{
 936        int clean_idx = rx_ring->sbq_clean_idx;
 937        struct bq_desc *sbq_desc;
 938        struct bq_element *bq;
 939        u64 map;
 940        int i;
 941
 942        while (rx_ring->sbq_free_cnt > 16) {
 943                for (i = 0; i < 16; i++) {
 944                        sbq_desc = &rx_ring->sbq[clean_idx];
 945                        QPRINTK(qdev, RX_STATUS, DEBUG,
 946                                "sbq: try cleaning clean_idx = %d.\n",
 947                                clean_idx);
 948                        bq = sbq_desc->bq;
 949                        if (sbq_desc->p.skb == NULL) {
 950                                QPRINTK(qdev, RX_STATUS, DEBUG,
 951                                        "sbq: getting new skb for index %d.\n",
 952                                        sbq_desc->index);
 953                                sbq_desc->p.skb =
 954                                    netdev_alloc_skb(qdev->ndev,
 955                                                     rx_ring->sbq_buf_size);
 956                                if (sbq_desc->p.skb == NULL) {
 957                                        QPRINTK(qdev, PROBE, ERR,
 958                                                "Couldn't get an skb.\n");
 959                                        rx_ring->sbq_clean_idx = clean_idx;
 960                                        return;
 961                                }
 962                                skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
 963                                map = pci_map_single(qdev->pdev,
 964                                                     sbq_desc->p.skb->data,
 965                                                     rx_ring->sbq_buf_size /
 966                                                     2, PCI_DMA_FROMDEVICE);
 967                                pci_unmap_addr_set(sbq_desc, mapaddr, map);
 968                                pci_unmap_len_set(sbq_desc, maplen,
 969                                                  rx_ring->sbq_buf_size / 2);
 970                                bq->addr_lo = cpu_to_le32(map);
 971                                bq->addr_hi = cpu_to_le32(map >> 32);
 972                        }
 973
 974                        clean_idx++;
 975                        if (clean_idx == rx_ring->sbq_len)
 976                                clean_idx = 0;
 977                }
 978                rx_ring->sbq_clean_idx = clean_idx;
 979                rx_ring->sbq_prod_idx += 16;
 980                if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
 981                        rx_ring->sbq_prod_idx = 0;
 982                QPRINTK(qdev, RX_STATUS, DEBUG,
 983                        "sbq: updating prod idx = %d.\n",
 984                        rx_ring->sbq_prod_idx);
 985                ql_write_db_reg(rx_ring->sbq_prod_idx,
 986                                rx_ring->sbq_prod_idx_db_reg);
 987
 988                rx_ring->sbq_free_cnt -= 16;
 989        }
 990}
 991
 992static void ql_update_buffer_queues(struct ql_adapter *qdev,
 993                                    struct rx_ring *rx_ring)
 994{
 995        ql_update_sbq(qdev, rx_ring);
 996        ql_update_lbq(qdev, rx_ring);
 997}
 998
 999/* Unmaps tx buffers.  Can be called from send() if a pci mapping
1000 * fails at some stage, or from the interrupt when a tx completes.
1001 */
1002static void ql_unmap_send(struct ql_adapter *qdev,
1003                          struct tx_ring_desc *tx_ring_desc, int mapped)
1004{
1005        int i;
1006        for (i = 0; i < mapped; i++) {
1007                if (i == 0 || (i == 7 && mapped > 7)) {
1008                        /*
1009                         * Unmap the skb->data area, or the
1010                         * external sglist (AKA the Outbound
1011                         * Address List (OAL)).
1012                         * If its the zeroeth element, then it's
1013                         * the skb->data area.  If it's the 7th
1014                         * element and there is more than 6 frags,
1015                         * then its an OAL.
1016                         */
1017                        if (i == 7) {
1018                                QPRINTK(qdev, TX_DONE, DEBUG,
1019                                        "unmapping OAL area.\n");
1020                        }
1021                        pci_unmap_single(qdev->pdev,
1022                                         pci_unmap_addr(&tx_ring_desc->map[i],
1023                                                        mapaddr),
1024                                         pci_unmap_len(&tx_ring_desc->map[i],
1025                                                       maplen),
1026                                         PCI_DMA_TODEVICE);
1027                } else {
1028                        QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1029                                i);
1030                        pci_unmap_page(qdev->pdev,
1031                                       pci_unmap_addr(&tx_ring_desc->map[i],
1032                                                      mapaddr),
1033                                       pci_unmap_len(&tx_ring_desc->map[i],
1034                                                     maplen), PCI_DMA_TODEVICE);
1035                }
1036        }
1037
1038}
1039
1040/* Map the buffers for this transmit.  This will return
1041 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1042 */
1043static int ql_map_send(struct ql_adapter *qdev,
1044                       struct ob_mac_iocb_req *mac_iocb_ptr,
1045                       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1046{
1047        int len = skb_headlen(skb);
1048        dma_addr_t map;
1049        int frag_idx, err, map_idx = 0;
1050        struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1051        int frag_cnt = skb_shinfo(skb)->nr_frags;
1052
1053        if (frag_cnt) {
1054                QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1055        }
1056        /*
1057         * Map the skb buffer first.
1058         */
1059        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1060
1061        err = pci_dma_mapping_error(qdev->pdev, map);
1062        if (err) {
1063                QPRINTK(qdev, TX_QUEUED, ERR,
1064                        "PCI mapping failed with error: %d\n", err);
1065
1066                return NETDEV_TX_BUSY;
1067        }
1068
1069        tbd->len = cpu_to_le32(len);
1070        tbd->addr = cpu_to_le64(map);
1071        pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1072        pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1073        map_idx++;
1074
1075        /*
1076         * This loop fills the remainder of the 8 address descriptors
1077         * in the IOCB.  If there are more than 7 fragments, then the
1078         * eighth address desc will point to an external list (OAL).
1079         * When this happens, the remainder of the frags will be stored
1080         * in this list.
1081         */
1082        for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1083                skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1084                tbd++;
1085                if (frag_idx == 6 && frag_cnt > 7) {
1086                        /* Let's tack on an sglist.
1087                         * Our control block will now
1088                         * look like this:
1089                         * iocb->seg[0] = skb->data
1090                         * iocb->seg[1] = frag[0]
1091                         * iocb->seg[2] = frag[1]
1092                         * iocb->seg[3] = frag[2]
1093                         * iocb->seg[4] = frag[3]
1094                         * iocb->seg[5] = frag[4]
1095                         * iocb->seg[6] = frag[5]
1096                         * iocb->seg[7] = ptr to OAL (external sglist)
1097                         * oal->seg[0] = frag[6]
1098                         * oal->seg[1] = frag[7]
1099                         * oal->seg[2] = frag[8]
1100                         * oal->seg[3] = frag[9]
1101                         * oal->seg[4] = frag[10]
1102                         *      etc...
1103                         */
1104                        /* Tack on the OAL in the eighth segment of IOCB. */
1105                        map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1106                                             sizeof(struct oal),
1107                                             PCI_DMA_TODEVICE);
1108                        err = pci_dma_mapping_error(qdev->pdev, map);
1109                        if (err) {
1110                                QPRINTK(qdev, TX_QUEUED, ERR,
1111                                        "PCI mapping outbound address list with error: %d\n",
1112                                        err);
1113                                goto map_error;
1114                        }
1115
1116                        tbd->addr = cpu_to_le64(map);
1117                        /*
1118                         * The length is the number of fragments
1119                         * that remain to be mapped times the length
1120                         * of our sglist (OAL).
1121                         */
1122                        tbd->len =
1123                            cpu_to_le32((sizeof(struct tx_buf_desc) *
1124                                         (frag_cnt - frag_idx)) | TX_DESC_C);
1125                        pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1126                                           map);
1127                        pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1128                                          sizeof(struct oal));
1129                        tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1130                        map_idx++;
1131                }
1132
1133                map =
1134                    pci_map_page(qdev->pdev, frag->page,
1135                                 frag->page_offset, frag->size,
1136                                 PCI_DMA_TODEVICE);
1137
1138                err = pci_dma_mapping_error(qdev->pdev, map);
1139                if (err) {
1140                        QPRINTK(qdev, TX_QUEUED, ERR,
1141                                "PCI mapping frags failed with error: %d.\n",
1142                                err);
1143                        goto map_error;
1144                }
1145
1146                tbd->addr = cpu_to_le64(map);
1147                tbd->len = cpu_to_le32(frag->size);
1148                pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1149                pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1150                                  frag->size);
1151
1152        }
1153        /* Save the number of segments we've mapped. */
1154        tx_ring_desc->map_cnt = map_idx;
1155        /* Terminate the last segment. */
1156        tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1157        return NETDEV_TX_OK;
1158
1159map_error:
1160        /*
1161         * If the first frag mapping failed, then i will be zero.
1162         * This causes the unmap of the skb->data area.  Otherwise
1163         * we pass in the number of frags that mapped successfully
1164         * so they can be umapped.
1165         */
1166        ql_unmap_send(qdev, tx_ring_desc, map_idx);
1167        return NETDEV_TX_BUSY;
1168}
1169
1170void ql_realign_skb(struct sk_buff *skb, int len)
1171{
1172        void *temp_addr = skb->data;
1173
1174        /* Undo the skb_reserve(skb,32) we did before
1175         * giving to hardware, and realign data on
1176         * a 2-byte boundary.
1177         */
1178        skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1179        skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1180        skb_copy_to_linear_data(skb, temp_addr,
1181                (unsigned int)len);
1182}
1183
1184/*
1185 * This function builds an skb for the given inbound
1186 * completion.  It will be rewritten for readability in the near
1187 * future, but for not it works well.
1188 */
1189static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1190                                       struct rx_ring *rx_ring,
1191                                       struct ib_mac_iocb_rsp *ib_mac_rsp)
1192{
1193        struct bq_desc *lbq_desc;
1194        struct bq_desc *sbq_desc;
1195        struct sk_buff *skb = NULL;
1196        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1197       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1198
1199        /*
1200         * Handle the header buffer if present.
1201         */
1202        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1203            ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1204                QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1205                /*
1206                 * Headers fit nicely into a small buffer.
1207                 */
1208                sbq_desc = ql_get_curr_sbuf(rx_ring);
1209                pci_unmap_single(qdev->pdev,
1210                                pci_unmap_addr(sbq_desc, mapaddr),
1211                                pci_unmap_len(sbq_desc, maplen),
1212                                PCI_DMA_FROMDEVICE);
1213                skb = sbq_desc->p.skb;
1214                ql_realign_skb(skb, hdr_len);
1215                skb_put(skb, hdr_len);
1216                sbq_desc->p.skb = NULL;
1217        }
1218
1219        /*
1220         * Handle the data buffer(s).
1221         */
1222        if (unlikely(!length)) {        /* Is there data too? */
1223                QPRINTK(qdev, RX_STATUS, DEBUG,
1224                        "No Data buffer in this packet.\n");
1225                return skb;
1226        }
1227
1228        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1229                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1230                        QPRINTK(qdev, RX_STATUS, DEBUG,
1231                                "Headers in small, data of %d bytes in small, combine them.\n", length);
1232                        /*
1233                         * Data is less than small buffer size so it's
1234                         * stuffed in a small buffer.
1235                         * For this case we append the data
1236                         * from the "data" small buffer to the "header" small
1237                         * buffer.
1238                         */
1239                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1240                        pci_dma_sync_single_for_cpu(qdev->pdev,
1241                                                    pci_unmap_addr
1242                                                    (sbq_desc, mapaddr),
1243                                                    pci_unmap_len
1244                                                    (sbq_desc, maplen),
1245                                                    PCI_DMA_FROMDEVICE);
1246                        memcpy(skb_put(skb, length),
1247                               sbq_desc->p.skb->data, length);
1248                        pci_dma_sync_single_for_device(qdev->pdev,
1249                                                       pci_unmap_addr
1250                                                       (sbq_desc,
1251                                                        mapaddr),
1252                                                       pci_unmap_len
1253                                                       (sbq_desc,
1254                                                        maplen),
1255                                                       PCI_DMA_FROMDEVICE);
1256                } else {
1257                        QPRINTK(qdev, RX_STATUS, DEBUG,
1258                                "%d bytes in a single small buffer.\n", length);
1259                        sbq_desc = ql_get_curr_sbuf(rx_ring);
1260                        skb = sbq_desc->p.skb;
1261                        ql_realign_skb(skb, length);
1262                        skb_put(skb, length);
1263                        pci_unmap_single(qdev->pdev,
1264                                         pci_unmap_addr(sbq_desc,
1265                                                        mapaddr),
1266                                         pci_unmap_len(sbq_desc,
1267                                                       maplen),
1268                                         PCI_DMA_FROMDEVICE);
1269                        sbq_desc->p.skb = NULL;
1270                }
1271        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1272                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1273                        QPRINTK(qdev, RX_STATUS, DEBUG,
1274                                "Header in small, %d bytes in large. Chain large to small!\n", length);
1275                        /*
1276                         * The data is in a single large buffer.  We
1277                         * chain it to the header buffer's skb and let
1278                         * it rip.
1279                         */
1280                        lbq_desc = ql_get_curr_lbuf(rx_ring);
1281                        pci_unmap_page(qdev->pdev,
1282                                       pci_unmap_addr(lbq_desc,
1283                                                      mapaddr),
1284                                       pci_unmap_len(lbq_desc, maplen),
1285                                       PCI_DMA_FROMDEVICE);
1286                        QPRINTK(qdev, RX_STATUS, DEBUG,
1287                                "Chaining page to skb.\n");
1288                        skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1289                                           0, length);
1290                        skb->len += length;
1291                        skb->data_len += length;
1292                        skb->truesize += length;
1293                        lbq_desc->p.lbq_page = NULL;
1294                } else {
1295                        /*
1296                         * The headers and data are in a single large buffer. We
1297                         * copy it to a new skb and let it go. This can happen with
1298                         * jumbo mtu on a non-TCP/UDP frame.
1299                         */
1300                        lbq_desc = ql_get_curr_lbuf(rx_ring);
1301                        skb = netdev_alloc_skb(qdev->ndev, length);
1302                        if (skb == NULL) {
1303                                QPRINTK(qdev, PROBE, DEBUG,
1304                                        "No skb available, drop the packet.\n");
1305                                return NULL;
1306                        }
1307                        skb_reserve(skb, NET_IP_ALIGN);
1308                        QPRINTK(qdev, RX_STATUS, DEBUG,
1309                                "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1310                        skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1311                                           0, length);
1312                        skb->len += length;
1313                        skb->data_len += length;
1314                        skb->truesize += length;
1315                        length -= length;
1316                        lbq_desc->p.lbq_page = NULL;
1317                        __pskb_pull_tail(skb,
1318                                (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1319                                VLAN_ETH_HLEN : ETH_HLEN);
1320                }
1321        } else {
1322                /*
1323                 * The data is in a chain of large buffers
1324                 * pointed to by a small buffer.  We loop
1325                 * thru and chain them to the our small header
1326                 * buffer's skb.
1327                 * frags:  There are 18 max frags and our small
1328                 *         buffer will hold 32 of them. The thing is,
1329                 *         we'll use 3 max for our 9000 byte jumbo
1330                 *         frames.  If the MTU goes up we could
1331                 *          eventually be in trouble.
1332                 */
1333                int size, offset, i = 0;
1334                struct bq_element *bq, bq_array[8];
1335                sbq_desc = ql_get_curr_sbuf(rx_ring);
1336                pci_unmap_single(qdev->pdev,
1337                                 pci_unmap_addr(sbq_desc, mapaddr),
1338                                 pci_unmap_len(sbq_desc, maplen),
1339                                 PCI_DMA_FROMDEVICE);
1340                if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1341                        /*
1342                         * This is an non TCP/UDP IP frame, so
1343                         * the headers aren't split into a small
1344                         * buffer.  We have to use the small buffer
1345                         * that contains our sg list as our skb to
1346                         * send upstairs. Copy the sg list here to
1347                         * a local buffer and use it to find the
1348                         * pages to chain.
1349                         */
1350                        QPRINTK(qdev, RX_STATUS, DEBUG,
1351                                "%d bytes of headers & data in chain of large.\n", length);
1352                        skb = sbq_desc->p.skb;
1353                        bq = &bq_array[0];
1354                        memcpy(bq, skb->data, sizeof(bq_array));
1355                        sbq_desc->p.skb = NULL;
1356                        skb_reserve(skb, NET_IP_ALIGN);
1357                } else {
1358                        QPRINTK(qdev, RX_STATUS, DEBUG,
1359                                "Headers in small, %d bytes of data in chain of large.\n", length);
1360                        bq = (struct bq_element *)sbq_desc->p.skb->data;
1361                }
1362                while (length > 0) {
1363                        lbq_desc = ql_get_curr_lbuf(rx_ring);
1364                        if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1365                                QPRINTK(qdev, RX_STATUS, ERR,
1366                                        "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1367                                        lbq_desc->bq->addr_lo, bq->addr_lo);
1368                                return NULL;
1369                        }
1370                        pci_unmap_page(qdev->pdev,
1371                                       pci_unmap_addr(lbq_desc,
1372                                                      mapaddr),
1373                                       pci_unmap_len(lbq_desc,
1374                                                     maplen),
1375                                       PCI_DMA_FROMDEVICE);
1376                        size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1377                        offset = 0;
1378
1379                        QPRINTK(qdev, RX_STATUS, DEBUG,
1380                                "Adding page %d to skb for %d bytes.\n",
1381                                i, size);
1382                        skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1383                                           offset, size);
1384                        skb->len += size;
1385                        skb->data_len += size;
1386                        skb->truesize += size;
1387                        length -= size;
1388                        lbq_desc->p.lbq_page = NULL;
1389                        bq++;
1390                        i++;
1391                }
1392                __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1393                                VLAN_ETH_HLEN : ETH_HLEN);
1394        }
1395        return skb;
1396}
1397
1398/* Process an inbound completion from an rx ring. */
1399static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1400                                   struct rx_ring *rx_ring,
1401                                   struct ib_mac_iocb_rsp *ib_mac_rsp)
1402{
1403        struct net_device *ndev = qdev->ndev;
1404        struct sk_buff *skb = NULL;
1405
1406        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1407
1408        skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1409        if (unlikely(!skb)) {
1410                QPRINTK(qdev, RX_STATUS, DEBUG,
1411                        "No skb available, drop packet.\n");
1412                return;
1413        }
1414
1415        prefetch(skb->data);
1416        skb->dev = ndev;
1417        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1418                QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1419                        (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1420                        IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1421                        (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1422                        IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1423                        (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1424                        IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1425        }
1426        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1427                QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1428        }
1429        if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1430                QPRINTK(qdev, RX_STATUS, ERR,
1431                        "Bad checksum for this %s packet.\n",
1432                        ((ib_mac_rsp->
1433                          flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1434                skb->ip_summed = CHECKSUM_NONE;
1435        } else if (qdev->rx_csum &&
1436                   ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1437                    ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1438                     !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1439                QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1440                skb->ip_summed = CHECKSUM_UNNECESSARY;
1441        }
1442        qdev->stats.rx_packets++;
1443        qdev->stats.rx_bytes += skb->len;
1444        skb->protocol = eth_type_trans(skb, ndev);
1445        if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1446                QPRINTK(qdev, RX_STATUS, DEBUG,
1447                        "Passing a VLAN packet upstream.\n");
1448                vlan_hwaccel_rx(skb, qdev->vlgrp,
1449                                le16_to_cpu(ib_mac_rsp->vlan_id));
1450        } else {
1451                QPRINTK(qdev, RX_STATUS, DEBUG,
1452                        "Passing a normal packet upstream.\n");
1453                netif_rx(skb);
1454        }
1455        ndev->last_rx = jiffies;
1456}
1457
1458/* Process an outbound completion from an rx ring. */
1459static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1460                                   struct ob_mac_iocb_rsp *mac_rsp)
1461{
1462        struct tx_ring *tx_ring;
1463        struct tx_ring_desc *tx_ring_desc;
1464
1465        QL_DUMP_OB_MAC_RSP(mac_rsp);
1466        tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1467        tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1468        ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1469        qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1470        qdev->stats.tx_packets++;
1471        dev_kfree_skb(tx_ring_desc->skb);
1472        tx_ring_desc->skb = NULL;
1473
1474        if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1475                                        OB_MAC_IOCB_RSP_S |
1476                                        OB_MAC_IOCB_RSP_L |
1477                                        OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1478                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1479                        QPRINTK(qdev, TX_DONE, WARNING,
1480                                "Total descriptor length did not match transfer length.\n");
1481                }
1482                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1483                        QPRINTK(qdev, TX_DONE, WARNING,
1484                                "Frame too short to be legal, not sent.\n");
1485                }
1486                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1487                        QPRINTK(qdev, TX_DONE, WARNING,
1488                                "Frame too long, but sent anyway.\n");
1489                }
1490                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1491                        QPRINTK(qdev, TX_DONE, WARNING,
1492                                "PCI backplane error. Frame not sent.\n");
1493                }
1494        }
1495        atomic_inc(&tx_ring->tx_count);
1496}
1497
1498/* Fire up a handler to reset the MPI processor. */
1499void ql_queue_fw_error(struct ql_adapter *qdev)
1500{
1501        netif_stop_queue(qdev->ndev);
1502        netif_carrier_off(qdev->ndev);
1503        queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1504}
1505
1506void ql_queue_asic_error(struct ql_adapter *qdev)
1507{
1508        netif_stop_queue(qdev->ndev);
1509        netif_carrier_off(qdev->ndev);
1510        ql_disable_interrupts(qdev);
1511        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1512}
1513
1514static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1515                                    struct ib_ae_iocb_rsp *ib_ae_rsp)
1516{
1517        switch (ib_ae_rsp->event) {
1518        case MGMT_ERR_EVENT:
1519                QPRINTK(qdev, RX_ERR, ERR,
1520                        "Management Processor Fatal Error.\n");
1521                ql_queue_fw_error(qdev);
1522                return;
1523
1524        case CAM_LOOKUP_ERR_EVENT:
1525                QPRINTK(qdev, LINK, ERR,
1526                        "Multiple CAM hits lookup occurred.\n");
1527                QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1528                ql_queue_asic_error(qdev);
1529                return;
1530
1531        case SOFT_ECC_ERROR_EVENT:
1532                QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1533                ql_queue_asic_error(qdev);
1534                break;
1535
1536        case PCI_ERR_ANON_BUF_RD:
1537                QPRINTK(qdev, RX_ERR, ERR,
1538                        "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1539                        ib_ae_rsp->q_id);
1540                ql_queue_asic_error(qdev);
1541                break;
1542
1543        default:
1544                QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1545                        ib_ae_rsp->event);
1546                ql_queue_asic_error(qdev);
1547                break;
1548        }
1549}
1550
1551static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1552{
1553        struct ql_adapter *qdev = rx_ring->qdev;
1554        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1555        struct ob_mac_iocb_rsp *net_rsp = NULL;
1556        int count = 0;
1557
1558        /* While there are entries in the completion queue. */
1559        while (prod != rx_ring->cnsmr_idx) {
1560
1561                QPRINTK(qdev, RX_STATUS, DEBUG,
1562                        "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1563                        prod, rx_ring->cnsmr_idx);
1564
1565                net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1566                rmb();
1567                switch (net_rsp->opcode) {
1568
1569                case OPCODE_OB_MAC_TSO_IOCB:
1570                case OPCODE_OB_MAC_IOCB:
1571                        ql_process_mac_tx_intr(qdev, net_rsp);
1572                        break;
1573                default:
1574                        QPRINTK(qdev, RX_STATUS, DEBUG,
1575                                "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1576                                net_rsp->opcode);
1577                }
1578                count++;
1579                ql_update_cq(rx_ring);
1580                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1581        }
1582        ql_write_cq_idx(rx_ring);
1583        if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1584                struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1585                if (atomic_read(&tx_ring->queue_stopped) &&
1586                    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1587                        /*
1588                         * The queue got stopped because the tx_ring was full.
1589                         * Wake it up, because it's now at least 25% empty.
1590                         */
1591                        netif_wake_queue(qdev->ndev);
1592        }
1593
1594        return count;
1595}
1596
1597static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1598{
1599        struct ql_adapter *qdev = rx_ring->qdev;
1600        u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1601        struct ql_net_rsp_iocb *net_rsp;
1602        int count = 0;
1603
1604        /* While there are entries in the completion queue. */
1605        while (prod != rx_ring->cnsmr_idx) {
1606
1607                QPRINTK(qdev, RX_STATUS, DEBUG,
1608                        "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1609                        prod, rx_ring->cnsmr_idx);
1610
1611                net_rsp = rx_ring->curr_entry;
1612                rmb();
1613                switch (net_rsp->opcode) {
1614                case OPCODE_IB_MAC_IOCB:
1615                        ql_process_mac_rx_intr(qdev, rx_ring,
1616                                               (struct ib_mac_iocb_rsp *)
1617                                               net_rsp);
1618                        break;
1619
1620                case OPCODE_IB_AE_IOCB:
1621                        ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1622                                                net_rsp);
1623                        break;
1624                default:
1625                        {
1626                                QPRINTK(qdev, RX_STATUS, DEBUG,
1627                                        "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1628                                        net_rsp->opcode);
1629                        }
1630                }
1631                count++;
1632                ql_update_cq(rx_ring);
1633                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1634                if (count == budget)
1635                        break;
1636        }
1637        ql_update_buffer_queues(qdev, rx_ring);
1638        ql_write_cq_idx(rx_ring);
1639        return count;
1640}
1641
1642static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1643{
1644        struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1645        struct ql_adapter *qdev = rx_ring->qdev;
1646        int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1647
1648        QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1649                rx_ring->cq_id);
1650
1651        if (work_done < budget) {
1652                __netif_rx_complete(qdev->ndev, napi);
1653                ql_enable_completion_interrupt(qdev, rx_ring->irq);
1654        }
1655        return work_done;
1656}
1657
1658static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1659{
1660        struct ql_adapter *qdev = netdev_priv(ndev);
1661
1662        qdev->vlgrp = grp;
1663        if (grp) {
1664                QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1665                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1666                           NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1667        } else {
1668                QPRINTK(qdev, IFUP, DEBUG,
1669                        "Turning off VLAN in NIC_RCV_CFG.\n");
1670                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1671        }
1672}
1673
1674static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1675{
1676        struct ql_adapter *qdev = netdev_priv(ndev);
1677        u32 enable_bit = MAC_ADDR_E;
1678
1679        spin_lock(&qdev->hw_lock);
1680        if (ql_set_mac_addr_reg
1681            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1682                QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1683        }
1684        spin_unlock(&qdev->hw_lock);
1685}
1686
1687static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1688{
1689        struct ql_adapter *qdev = netdev_priv(ndev);
1690        u32 enable_bit = 0;
1691
1692        spin_lock(&qdev->hw_lock);
1693        if (ql_set_mac_addr_reg
1694            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1695                QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1696        }
1697        spin_unlock(&qdev->hw_lock);
1698
1699}
1700
1701/* Worker thread to process a given rx_ring that is dedicated
1702 * to outbound completions.
1703 */
1704static void ql_tx_clean(struct work_struct *work)
1705{
1706        struct rx_ring *rx_ring =
1707            container_of(work, struct rx_ring, rx_work.work);
1708        ql_clean_outbound_rx_ring(rx_ring);
1709        ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1710
1711}
1712
1713/* Worker thread to process a given rx_ring that is dedicated
1714 * to inbound completions.
1715 */
1716static void ql_rx_clean(struct work_struct *work)
1717{
1718        struct rx_ring *rx_ring =
1719            container_of(work, struct rx_ring, rx_work.work);
1720        ql_clean_inbound_rx_ring(rx_ring, 64);
1721        ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1722}
1723
1724/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1725static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1726{
1727        struct rx_ring *rx_ring = dev_id;
1728        queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1729                              &rx_ring->rx_work, 0);
1730        return IRQ_HANDLED;
1731}
1732
1733/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1734static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1735{
1736        struct rx_ring *rx_ring = dev_id;
1737        struct ql_adapter *qdev = rx_ring->qdev;
1738        netif_rx_schedule(qdev->ndev, &rx_ring->napi);
1739        return IRQ_HANDLED;
1740}
1741
1742/* This handles a fatal error, MPI activity, and the default
1743 * rx_ring in an MSI-X multiple vector environment.
1744 * In MSI/Legacy environment it also process the rest of
1745 * the rx_rings.
1746 */
1747static irqreturn_t qlge_isr(int irq, void *dev_id)
1748{
1749        struct rx_ring *rx_ring = dev_id;
1750        struct ql_adapter *qdev = rx_ring->qdev;
1751        struct intr_context *intr_context = &qdev->intr_context[0];
1752        u32 var;
1753        int i;
1754        int work_done = 0;
1755
1756        spin_lock(&qdev->hw_lock);
1757        if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1758                QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1759                spin_unlock(&qdev->hw_lock);
1760                return IRQ_NONE;
1761        }
1762        spin_unlock(&qdev->hw_lock);
1763
1764        var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1765
1766        /*
1767         * Check for fatal error.
1768         */
1769        if (var & STS_FE) {
1770                ql_queue_asic_error(qdev);
1771                QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1772                var = ql_read32(qdev, ERR_STS);
1773                QPRINTK(qdev, INTR, ERR,
1774                        "Resetting chip. Error Status Register = 0x%x\n", var);
1775                return IRQ_HANDLED;
1776        }
1777
1778        /*
1779         * Check MPI processor activity.
1780         */
1781        if (var & STS_PI) {
1782                /*
1783                 * We've got an async event or mailbox completion.
1784                 * Handle it and clear the source of the interrupt.
1785                 */
1786                QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1787                ql_disable_completion_interrupt(qdev, intr_context->intr);
1788                queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1789                                      &qdev->mpi_work, 0);
1790                work_done++;
1791        }
1792
1793        /*
1794         * Check the default queue and wake handler if active.
1795         */
1796        rx_ring = &qdev->rx_ring[0];
1797        if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1798                QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1799                ql_disable_completion_interrupt(qdev, intr_context->intr);
1800                queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1801                                      &rx_ring->rx_work, 0);
1802                work_done++;
1803        }
1804
1805        if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1806                /*
1807                 * Start the DPC for each active queue.
1808                 */
1809                for (i = 1; i < qdev->rx_ring_count; i++) {
1810                        rx_ring = &qdev->rx_ring[i];
1811                        if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1812                            rx_ring->cnsmr_idx) {
1813                                QPRINTK(qdev, INTR, INFO,
1814                                        "Waking handler for rx_ring[%d].\n", i);
1815                                ql_disable_completion_interrupt(qdev,
1816                                                                intr_context->
1817                                                                intr);
1818                                if (i < qdev->rss_ring_first_cq_id)
1819                                        queue_delayed_work_on(rx_ring->cpu,
1820                                                              qdev->q_workqueue,
1821                                                              &rx_ring->rx_work,
1822                                                              0);
1823                                else
1824                                        netif_rx_schedule(qdev->ndev,
1825                                                          &rx_ring->napi);
1826                                work_done++;
1827                        }
1828                }
1829        }
1830        ql_enable_completion_interrupt(qdev, intr_context->intr);
1831        return work_done ? IRQ_HANDLED : IRQ_NONE;
1832}
1833
1834static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1835{
1836
1837        if (skb_is_gso(skb)) {
1838                int err;
1839                if (skb_header_cloned(skb)) {
1840                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1841                        if (err)
1842                                return err;
1843                }
1844
1845                mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1846                mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1847                mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1848                mac_iocb_ptr->total_hdrs_len =
1849                    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1850                mac_iocb_ptr->net_trans_offset =
1851                    cpu_to_le16(skb_network_offset(skb) |
1852                                skb_transport_offset(skb)
1853                                << OB_MAC_TRANSPORT_HDR_SHIFT);
1854                mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1855                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1856                if (likely(skb->protocol == htons(ETH_P_IP))) {
1857                        struct iphdr *iph = ip_hdr(skb);
1858                        iph->check = 0;
1859                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1860                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1861                                                                 iph->daddr, 0,
1862                                                                 IPPROTO_TCP,
1863                                                                 0);
1864                } else if (skb->protocol == htons(ETH_P_IPV6)) {
1865                        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1866                        tcp_hdr(skb)->check =
1867                            ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1868                                             &ipv6_hdr(skb)->daddr,
1869                                             0, IPPROTO_TCP, 0);
1870                }
1871                return 1;
1872        }
1873        return 0;
1874}
1875
1876static void ql_hw_csum_setup(struct sk_buff *skb,
1877                             struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1878{
1879        int len;
1880        struct iphdr *iph = ip_hdr(skb);
1881        u16 *check;
1882        mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1883        mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1884        mac_iocb_ptr->net_trans_offset =
1885                cpu_to_le16(skb_network_offset(skb) |
1886                skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1887
1888        mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1889        len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1890        if (likely(iph->protocol == IPPROTO_TCP)) {
1891                check = &(tcp_hdr(skb)->check);
1892                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1893                mac_iocb_ptr->total_hdrs_len =
1894                    cpu_to_le16(skb_transport_offset(skb) +
1895                                (tcp_hdr(skb)->doff << 2));
1896        } else {
1897                check = &(udp_hdr(skb)->check);
1898                mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1899                mac_iocb_ptr->total_hdrs_len =
1900                    cpu_to_le16(skb_transport_offset(skb) +
1901                                sizeof(struct udphdr));
1902        }
1903        *check = ~csum_tcpudp_magic(iph->saddr,
1904                                    iph->daddr, len, iph->protocol, 0);
1905}
1906
1907static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1908{
1909        struct tx_ring_desc *tx_ring_desc;
1910        struct ob_mac_iocb_req *mac_iocb_ptr;
1911        struct ql_adapter *qdev = netdev_priv(ndev);
1912        int tso;
1913        struct tx_ring *tx_ring;
1914        u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1915
1916        tx_ring = &qdev->tx_ring[tx_ring_idx];
1917
1918        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1919                QPRINTK(qdev, TX_QUEUED, INFO,
1920                        "%s: shutting down tx queue %d du to lack of resources.\n",
1921                        __func__, tx_ring_idx);
1922                netif_stop_queue(ndev);
1923                atomic_inc(&tx_ring->queue_stopped);
1924                return NETDEV_TX_BUSY;
1925        }
1926        tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1927        mac_iocb_ptr = tx_ring_desc->queue_entry;
1928        memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1929        if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1930                QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1931                return NETDEV_TX_BUSY;
1932        }
1933
1934        mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1935        mac_iocb_ptr->tid = tx_ring_desc->index;
1936        /* We use the upper 32-bits to store the tx queue for this IO.
1937         * When we get the completion we can use it to establish the context.
1938         */
1939        mac_iocb_ptr->txq_idx = tx_ring_idx;
1940        tx_ring_desc->skb = skb;
1941
1942        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1943
1944        if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1945                QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1946                        vlan_tx_tag_get(skb));
1947                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1948                mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1949        }
1950        tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1951        if (tso < 0) {
1952                dev_kfree_skb_any(skb);
1953                return NETDEV_TX_OK;
1954        } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1955                ql_hw_csum_setup(skb,
1956                                 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1957        }
1958        QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1959        tx_ring->prod_idx++;
1960        if (tx_ring->prod_idx == tx_ring->wq_len)
1961                tx_ring->prod_idx = 0;
1962        wmb();
1963
1964        ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1965        ndev->trans_start = jiffies;
1966        QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1967                tx_ring->prod_idx, skb->len);
1968
1969        atomic_dec(&tx_ring->tx_count);
1970        return NETDEV_TX_OK;
1971}
1972
1973static void ql_free_shadow_space(struct ql_adapter *qdev)
1974{
1975        if (qdev->rx_ring_shadow_reg_area) {
1976                pci_free_consistent(qdev->pdev,
1977                                    PAGE_SIZE,
1978                                    qdev->rx_ring_shadow_reg_area,
1979                                    qdev->rx_ring_shadow_reg_dma);
1980                qdev->rx_ring_shadow_reg_area = NULL;
1981        }
1982        if (qdev->tx_ring_shadow_reg_area) {
1983                pci_free_consistent(qdev->pdev,
1984                                    PAGE_SIZE,
1985                                    qdev->tx_ring_shadow_reg_area,
1986                                    qdev->tx_ring_shadow_reg_dma);
1987                qdev->tx_ring_shadow_reg_area = NULL;
1988        }
1989}
1990
1991static int ql_alloc_shadow_space(struct ql_adapter *qdev)
1992{
1993        qdev->rx_ring_shadow_reg_area =
1994            pci_alloc_consistent(qdev->pdev,
1995                                 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
1996        if (qdev->rx_ring_shadow_reg_area == NULL) {
1997                QPRINTK(qdev, IFUP, ERR,
1998                        "Allocation of RX shadow space failed.\n");
1999                return -ENOMEM;
2000        }
2001        qdev->tx_ring_shadow_reg_area =
2002            pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2003                                 &qdev->tx_ring_shadow_reg_dma);
2004        if (qdev->tx_ring_shadow_reg_area == NULL) {
2005                QPRINTK(qdev, IFUP, ERR,
2006                        "Allocation of TX shadow space failed.\n");
2007                goto err_wqp_sh_area;
2008        }
2009        return 0;
2010
2011err_wqp_sh_area:
2012        pci_free_consistent(qdev->pdev,
2013                            PAGE_SIZE,
2014                            qdev->rx_ring_shadow_reg_area,
2015                            qdev->rx_ring_shadow_reg_dma);
2016        return -ENOMEM;
2017}
2018
2019static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2020{
2021        struct tx_ring_desc *tx_ring_desc;
2022        int i;
2023        struct ob_mac_iocb_req *mac_iocb_ptr;
2024
2025        mac_iocb_ptr = tx_ring->wq_base;
2026        tx_ring_desc = tx_ring->q;
2027        for (i = 0; i < tx_ring->wq_len; i++) {
2028                tx_ring_desc->index = i;
2029                tx_ring_desc->skb = NULL;
2030                tx_ring_desc->queue_entry = mac_iocb_ptr;
2031                mac_iocb_ptr++;
2032                tx_ring_desc++;
2033        }
2034        atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2035        atomic_set(&tx_ring->queue_stopped, 0);
2036}
2037
2038static void ql_free_tx_resources(struct ql_adapter *qdev,
2039                                 struct tx_ring *tx_ring)
2040{
2041        if (tx_ring->wq_base) {
2042                pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2043                                    tx_ring->wq_base, tx_ring->wq_base_dma);
2044                tx_ring->wq_base = NULL;
2045        }
2046        kfree(tx_ring->q);
2047        tx_ring->q = NULL;
2048}
2049
2050static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2051                                 struct tx_ring *tx_ring)
2052{
2053        tx_ring->wq_base =
2054            pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2055                                 &tx_ring->wq_base_dma);
2056
2057        if ((tx_ring->wq_base == NULL)
2058            || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2059                QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2060                return -ENOMEM;
2061        }
2062        tx_ring->q =
2063            kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2064        if (tx_ring->q == NULL)
2065                goto err;
2066
2067        return 0;
2068err:
2069        pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2070                            tx_ring->wq_base, tx_ring->wq_base_dma);
2071        return -ENOMEM;
2072}
2073
2074void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2075{
2076        int i;
2077        struct bq_desc *lbq_desc;
2078
2079        for (i = 0; i < rx_ring->lbq_len; i++) {
2080                lbq_desc = &rx_ring->lbq[i];
2081                if (lbq_desc->p.lbq_page) {
2082                        pci_unmap_page(qdev->pdev,
2083                                       pci_unmap_addr(lbq_desc, mapaddr),
2084                                       pci_unmap_len(lbq_desc, maplen),
2085                                       PCI_DMA_FROMDEVICE);
2086
2087                        put_page(lbq_desc->p.lbq_page);
2088                        lbq_desc->p.lbq_page = NULL;
2089                }
2090                lbq_desc->bq->addr_lo = 0;
2091                lbq_desc->bq->addr_hi = 0;
2092        }
2093}
2094
2095/*
2096 * Allocate and map a page for each element of the lbq.
2097 */
2098static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2099                                struct rx_ring *rx_ring)
2100{
2101        int i;
2102        struct bq_desc *lbq_desc;
2103        u64 map;
2104        struct bq_element *bq = rx_ring->lbq_base;
2105
2106        for (i = 0; i < rx_ring->lbq_len; i++) {
2107                lbq_desc = &rx_ring->lbq[i];
2108                memset(lbq_desc, 0, sizeof(lbq_desc));
2109                lbq_desc->bq = bq;
2110                lbq_desc->index = i;
2111                lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2112                if (unlikely(!lbq_desc->p.lbq_page)) {
2113                        QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2114                        goto mem_error;
2115                } else {
2116                        map = pci_map_page(qdev->pdev,
2117                                           lbq_desc->p.lbq_page,
2118                                           0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2119                        if (pci_dma_mapping_error(qdev->pdev, map)) {
2120                                QPRINTK(qdev, IFUP, ERR,
2121                                        "PCI mapping failed.\n");
2122                                goto mem_error;
2123                        }
2124                        pci_unmap_addr_set(lbq_desc, mapaddr, map);
2125                        pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2126                        bq->addr_lo = cpu_to_le32(map);
2127                        bq->addr_hi = cpu_to_le32(map >> 32);
2128                }
2129                bq++;
2130        }
2131        return 0;
2132mem_error:
2133        ql_free_lbq_buffers(qdev, rx_ring);
2134        return -ENOMEM;
2135}
2136
2137void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2138{
2139        int i;
2140        struct bq_desc *sbq_desc;
2141
2142        for (i = 0; i < rx_ring->sbq_len; i++) {
2143                sbq_desc = &rx_ring->sbq[i];
2144                if (sbq_desc == NULL) {
2145                        QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2146                        return;
2147                }
2148                if (sbq_desc->p.skb) {
2149                        pci_unmap_single(qdev->pdev,
2150                                         pci_unmap_addr(sbq_desc, mapaddr),
2151                                         pci_unmap_len(sbq_desc, maplen),
2152                                         PCI_DMA_FROMDEVICE);
2153                        dev_kfree_skb(sbq_desc->p.skb);
2154                        sbq_desc->p.skb = NULL;
2155                }
2156                if (sbq_desc->bq == NULL) {
2157                        QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2158                                i);
2159                        return;
2160                }
2161                sbq_desc->bq->addr_lo = 0;
2162                sbq_desc->bq->addr_hi = 0;
2163        }
2164}
2165
2166/* Allocate and map an skb for each element of the sbq. */
2167static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2168                                struct rx_ring *rx_ring)
2169{
2170        int i;
2171        struct bq_desc *sbq_desc;
2172        struct sk_buff *skb;
2173        u64 map;
2174        struct bq_element *bq = rx_ring->sbq_base;
2175
2176        for (i = 0; i < rx_ring->sbq_len; i++) {
2177                sbq_desc = &rx_ring->sbq[i];
2178                memset(sbq_desc, 0, sizeof(sbq_desc));
2179                sbq_desc->index = i;
2180                sbq_desc->bq = bq;
2181                skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2182                if (unlikely(!skb)) {
2183                        /* Better luck next round */
2184                        QPRINTK(qdev, IFUP, ERR,
2185                                "small buff alloc failed for %d bytes at index %d.\n",
2186                                rx_ring->sbq_buf_size, i);
2187                        goto mem_err;
2188                }
2189                skb_reserve(skb, QLGE_SB_PAD);
2190                sbq_desc->p.skb = skb;
2191                /*
2192                 * Map only half the buffer. Because the
2193                 * other half may get some data copied to it
2194                 * when the completion arrives.
2195                 */
2196                map = pci_map_single(qdev->pdev,
2197                                     skb->data,
2198                                     rx_ring->sbq_buf_size / 2,
2199                                     PCI_DMA_FROMDEVICE);
2200                if (pci_dma_mapping_error(qdev->pdev, map)) {
2201                        QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2202                        goto mem_err;
2203                }
2204                pci_unmap_addr_set(sbq_desc, mapaddr, map);
2205                pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2206                bq->addr_lo =        /*sbq_desc->addr_lo = */
2207                    cpu_to_le32(map);
2208                bq->addr_hi =        /*sbq_desc->addr_hi = */
2209                    cpu_to_le32(map >> 32);
2210                bq++;
2211        }
2212        return 0;
2213mem_err:
2214        ql_free_sbq_buffers(qdev, rx_ring);
2215        return -ENOMEM;
2216}
2217
2218static void ql_free_rx_resources(struct ql_adapter *qdev,
2219                                 struct rx_ring *rx_ring)
2220{
2221        if (rx_ring->sbq_len)
2222                ql_free_sbq_buffers(qdev, rx_ring);
2223        if (rx_ring->lbq_len)
2224                ql_free_lbq_buffers(qdev, rx_ring);
2225
2226        /* Free the small buffer queue. */
2227        if (rx_ring->sbq_base) {
2228                pci_free_consistent(qdev->pdev,
2229                                    rx_ring->sbq_size,
2230                                    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2231                rx_ring->sbq_base = NULL;
2232        }
2233
2234        /* Free the small buffer queue control blocks. */
2235        kfree(rx_ring->sbq);
2236        rx_ring->sbq = NULL;
2237
2238        /* Free the large buffer queue. */
2239        if (rx_ring->lbq_base) {
2240                pci_free_consistent(qdev->pdev,
2241                                    rx_ring->lbq_size,
2242                                    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2243                rx_ring->lbq_base = NULL;
2244        }
2245
2246        /* Free the large buffer queue control blocks. */
2247        kfree(rx_ring->lbq);
2248        rx_ring->lbq = NULL;
2249
2250        /* Free the rx queue. */
2251        if (rx_ring->cq_base) {
2252                pci_free_consistent(qdev->pdev,
2253                                    rx_ring->cq_size,
2254                                    rx_ring->cq_base, rx_ring->cq_base_dma);
2255                rx_ring->cq_base = NULL;
2256        }
2257}
2258
2259/* Allocate queues and buffers for this completions queue based
2260 * on the values in the parameter structure. */
2261static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2262                                 struct rx_ring *rx_ring)
2263{
2264
2265        /*
2266         * Allocate the completion queue for this rx_ring.
2267         */
2268        rx_ring->cq_base =
2269            pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2270                                 &rx_ring->cq_base_dma);
2271
2272        if (rx_ring->cq_base == NULL) {
2273                QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2274                return -ENOMEM;
2275        }
2276
2277        if (rx_ring->sbq_len) {
2278                /*
2279                 * Allocate small buffer queue.
2280                 */
2281                rx_ring->sbq_base =
2282                    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2283                                         &rx_ring->sbq_base_dma);
2284
2285                if (rx_ring->sbq_base == NULL) {
2286                        QPRINTK(qdev, IFUP, ERR,
2287                                "Small buffer queue allocation failed.\n");
2288                        goto err_mem;
2289                }
2290
2291                /*
2292                 * Allocate small buffer queue control blocks.
2293                 */
2294                rx_ring->sbq =
2295                    kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2296                            GFP_KERNEL);
2297                if (rx_ring->sbq == NULL) {
2298                        QPRINTK(qdev, IFUP, ERR,
2299                                "Small buffer queue control block allocation failed.\n");
2300                        goto err_mem;
2301                }
2302
2303                if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2304                        QPRINTK(qdev, IFUP, ERR,
2305                                "Small buffer allocation failed.\n");
2306                        goto err_mem;
2307                }
2308        }
2309
2310        if (rx_ring->lbq_len) {
2311                /*
2312                 * Allocate large buffer queue.
2313                 */
2314                rx_ring->lbq_base =
2315                    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2316                                         &rx_ring->lbq_base_dma);
2317
2318                if (rx_ring->lbq_base == NULL) {
2319                        QPRINTK(qdev, IFUP, ERR,
2320                                "Large buffer queue allocation failed.\n");
2321                        goto err_mem;
2322                }
2323                /*
2324                 * Allocate large buffer queue control blocks.
2325                 */
2326                rx_ring->lbq =
2327                    kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2328                            GFP_KERNEL);
2329                if (rx_ring->lbq == NULL) {
2330                        QPRINTK(qdev, IFUP, ERR,
2331                                "Large buffer queue control block allocation failed.\n");
2332                        goto err_mem;
2333                }
2334
2335                /*
2336                 * Allocate the buffers.
2337                 */
2338                if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2339                        QPRINTK(qdev, IFUP, ERR,
2340                                "Large buffer allocation failed.\n");
2341                        goto err_mem;
2342                }
2343        }
2344
2345        return 0;
2346
2347err_mem:
2348        ql_free_rx_resources(qdev, rx_ring);
2349        return -ENOMEM;
2350}
2351
2352static void ql_tx_ring_clean(struct ql_adapter *qdev)
2353{
2354        struct tx_ring *tx_ring;
2355        struct tx_ring_desc *tx_ring_desc;
2356        int i, j;
2357
2358        /*
2359         * Loop through all queues and free
2360         * any resources.
2361         */
2362        for (j = 0; j < qdev->tx_ring_count; j++) {
2363                tx_ring = &qdev->tx_ring[j];
2364                for (i = 0; i < tx_ring->wq_len; i++) {
2365                        tx_ring_desc = &tx_ring->q[i];
2366                        if (tx_ring_desc && tx_ring_desc->skb) {
2367                                QPRINTK(qdev, IFDOWN, ERR,
2368                                "Freeing lost SKB %p, from queue %d, index %d.\n",
2369                                        tx_ring_desc->skb, j,
2370                                        tx_ring_desc->index);
2371                                ql_unmap_send(qdev, tx_ring_desc,
2372                                              tx_ring_desc->map_cnt);
2373                                dev_kfree_skb(tx_ring_desc->skb);
2374                                tx_ring_desc->skb = NULL;
2375                        }
2376                }
2377        }
2378}
2379
2380static void ql_free_ring_cb(struct ql_adapter *qdev)
2381{
2382        kfree(qdev->ring_mem);
2383}
2384
2385static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2386{
2387        /* Allocate space for tx/rx ring control blocks. */
2388        qdev->ring_mem_size =
2389            (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2390            (qdev->rx_ring_count * sizeof(struct rx_ring));
2391        qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2392        if (qdev->ring_mem == NULL) {
2393                return -ENOMEM;
2394        } else {
2395                qdev->rx_ring = qdev->ring_mem;
2396                qdev->tx_ring = qdev->ring_mem +
2397                    (qdev->rx_ring_count * sizeof(struct rx_ring));
2398        }
2399        return 0;
2400}
2401
2402static void ql_free_mem_resources(struct ql_adapter *qdev)
2403{
2404        int i;
2405
2406        for (i = 0; i < qdev->tx_ring_count; i++)
2407                ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2408        for (i = 0; i < qdev->rx_ring_count; i++)
2409                ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2410        ql_free_shadow_space(qdev);
2411}
2412
2413static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2414{
2415        int i;
2416
2417        /* Allocate space for our shadow registers and such. */
2418        if (ql_alloc_shadow_space(qdev))
2419                return -ENOMEM;
2420
2421        for (i = 0; i < qdev->rx_ring_count; i++) {
2422                if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2423                        QPRINTK(qdev, IFUP, ERR,
2424                                "RX resource allocation failed.\n");
2425                        goto err_mem;
2426                }
2427        }
2428        /* Allocate tx queue resources */
2429        for (i = 0; i < qdev->tx_ring_count; i++) {
2430                if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2431                        QPRINTK(qdev, IFUP, ERR,
2432                                "TX resource allocation failed.\n");
2433                        goto err_mem;
2434                }
2435        }
2436        return 0;
2437
2438err_mem:
2439        ql_free_mem_resources(qdev);
2440        return -ENOMEM;
2441}
2442
2443/* Set up the rx ring control block and pass it to the chip.
2444 * The control block is defined as
2445 * "Completion Queue Initialization Control Block", or cqicb.
2446 */
2447static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2448{
2449        struct cqicb *cqicb = &rx_ring->cqicb;
2450        void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2451            (rx_ring->cq_id * sizeof(u64) * 4);
2452        u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2453            (rx_ring->cq_id * sizeof(u64) * 4);
2454        void __iomem *doorbell_area =
2455            qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2456        int err = 0;
2457        u16 bq_len;
2458
2459        /* Set up the shadow registers for this ring. */
2460        rx_ring->prod_idx_sh_reg = shadow_reg;
2461        rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2462        shadow_reg += sizeof(u64);
2463        shadow_reg_dma += sizeof(u64);
2464        rx_ring->lbq_base_indirect = shadow_reg;
2465        rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2466        shadow_reg += sizeof(u64);
2467        shadow_reg_dma += sizeof(u64);
2468        rx_ring->sbq_base_indirect = shadow_reg;
2469        rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2470
2471        /* PCI doorbell mem area + 0x00 for consumer index register */
2472        rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
2473        rx_ring->cnsmr_idx = 0;
2474        rx_ring->curr_entry = rx_ring->cq_base;
2475
2476        /* PCI doorbell mem area + 0x04 for valid register */
2477        rx_ring->valid_db_reg = doorbell_area + 0x04;
2478
2479        /* PCI doorbell mem area + 0x18 for large buffer consumer */
2480        rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
2481
2482        /* PCI doorbell mem area + 0x1c */
2483        rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
2484
2485        memset((void *)cqicb, 0, sizeof(struct cqicb));
2486        cqicb->msix_vect = rx_ring->irq;
2487
2488        cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
2489
2490        cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
2491        cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
2492
2493        cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
2494        cqicb->prod_idx_addr_hi =
2495            cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
2496
2497        /*
2498         * Set up the control block load flags.
2499         */
2500        cqicb->flags = FLAGS_LC |        /* Load queue base address */
2501            FLAGS_LV |                /* Load MSI-X vector */
2502            FLAGS_LI;                /* Load irq delay values */
2503        if (rx_ring->lbq_len) {
2504                cqicb->flags |= FLAGS_LL;        /* Load lbq values */
2505                *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2506                cqicb->lbq_addr_lo =
2507                    cpu_to_le32(rx_ring->lbq_base_indirect_dma);
2508                cqicb->lbq_addr_hi =
2509                    cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2510                cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
2511                bq_len = (u16) rx_ring->lbq_len;
2512                cqicb->lbq_len = cpu_to_le16(bq_len);
2513                rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2514                rx_ring->lbq_curr_idx = 0;
2515                rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2516                rx_ring->lbq_free_cnt = 16;
2517        }
2518        if (rx_ring->sbq_len) {
2519                cqicb->flags |= FLAGS_LS;        /* Load sbq values */
2520                *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2521                cqicb->sbq_addr_lo =
2522                    cpu_to_le32(rx_ring->sbq_base_indirect_dma);
2523                cqicb->sbq_addr_hi =
2524                    cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2525                cqicb->sbq_buf_size =
2526                    cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2527                bq_len = (u16) rx_ring->sbq_len;
2528                cqicb->sbq_len = cpu_to_le16(bq_len);
2529                rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2530                rx_ring->sbq_curr_idx = 0;
2531                rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2532                rx_ring->sbq_free_cnt = 16;
2533        }
2534        switch (rx_ring->type) {
2535        case TX_Q:
2536                /* If there's only one interrupt, then we use
2537                 * worker threads to process the outbound
2538                 * completion handling rx_rings. We do this so
2539                 * they can be run on multiple CPUs. There is
2540                 * room to play with this more where we would only
2541                 * run in a worker if there are more than x number
2542                 * of outbound completions on the queue and more
2543                 * than one queue active.  Some threshold that
2544                 * would indicate a benefit in spite of the cost
2545                 * of a context switch.
2546                 * If there's more than one interrupt, then the
2547                 * outbound completions are processed in the ISR.
2548                 */
2549                if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2550                        INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2551                else {
2552                        /* With all debug warnings on we see a WARN_ON message
2553                         * when we free the skb in the interrupt context.
2554                         */
2555                        INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2556                }
2557                cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2558                cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2559                break;
2560        case DEFAULT_Q:
2561                INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2562                cqicb->irq_delay = 0;
2563                cqicb->pkt_delay = 0;
2564                break;
2565        case RX_Q:
2566                /* Inbound completion handling rx_rings run in
2567                 * separate NAPI contexts.
2568                 */
2569                netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2570                               64);
2571                cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2572                cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2573                break;
2574        default:
2575                QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2576                        rx_ring->type);
2577        }
2578        QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2579        err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2580                           CFG_LCQ, rx_ring->cq_id);
2581        if (err) {
2582                QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2583                return err;
2584        }
2585        QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2586        /*
2587         * Advance the producer index for the buffer queues.
2588         */
2589        wmb();
2590        if (rx_ring->lbq_len)
2591                ql_write_db_reg(rx_ring->lbq_prod_idx,
2592                                rx_ring->lbq_prod_idx_db_reg);
2593        if (rx_ring->sbq_len)
2594                ql_write_db_reg(rx_ring->sbq_prod_idx,
2595                                rx_ring->sbq_prod_idx_db_reg);
2596        return err;
2597}
2598
2599static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2600{
2601        struct wqicb *wqicb = (struct wqicb *)tx_ring;
2602        void __iomem *doorbell_area =
2603            qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2604        void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2605            (tx_ring->wq_id * sizeof(u64));
2606        u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2607            (tx_ring->wq_id * sizeof(u64));
2608        int err = 0;
2609
2610        /*
2611         * Assign doorbell registers for this tx_ring.
2612         */
2613        /* TX PCI doorbell mem area for tx producer index */
2614        tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
2615        tx_ring->prod_idx = 0;
2616        /* TX PCI doorbell mem area + 0x04 */
2617        tx_ring->valid_db_reg = doorbell_area + 0x04;
2618
2619        /*
2620         * Assign shadow registers for this tx_ring.
2621         */
2622        tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2623        tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2624
2625        wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2626        wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2627                                   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2628        wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2629        wqicb->rid = 0;
2630        wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
2631        wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
2632
2633        wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
2634        wqicb->cnsmr_idx_addr_hi =
2635            cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
2636
2637        ql_init_tx_ring(qdev, tx_ring);
2638
2639        err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2640                           (u16) tx_ring->wq_id);
2641        if (err) {
2642                QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2643                return err;
2644        }
2645        QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2646        return err;
2647}
2648
2649static void ql_disable_msix(struct ql_adapter *qdev)
2650{
2651        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2652                pci_disable_msix(qdev->pdev);
2653                clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2654                kfree(qdev->msi_x_entry);
2655                qdev->msi_x_entry = NULL;
2656        } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2657                pci_disable_msi(qdev->pdev);
2658                clear_bit(QL_MSI_ENABLED, &qdev->flags);
2659        }
2660}
2661
2662static void ql_enable_msix(struct ql_adapter *qdev)
2663{
2664        int i;
2665
2666        qdev->intr_count = 1;
2667        /* Get the MSIX vectors. */
2668        if (irq_type == MSIX_IRQ) {
2669                /* Try to alloc space for the msix struct,
2670                 * if it fails then go to MSI/legacy.
2671                 */
2672                qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2673                                            sizeof(struct msix_entry),
2674                                            GFP_KERNEL);
2675                if (!qdev->msi_x_entry) {
2676                        irq_type = MSI_IRQ;
2677                        goto msi;
2678                }
2679
2680                for (i = 0; i < qdev->rx_ring_count; i++)
2681                        qdev->msi_x_entry[i].entry = i;
2682
2683                if (!pci_enable_msix
2684                    (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2685                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
2686                        qdev->intr_count = qdev->rx_ring_count;
2687                        QPRINTK(qdev, IFUP, INFO,
2688                                "MSI-X Enabled, got %d vectors.\n",
2689                                qdev->intr_count);
2690                        return;
2691                } else {
2692                        kfree(qdev->msi_x_entry);
2693                        qdev->msi_x_entry = NULL;
2694                        QPRINTK(qdev, IFUP, WARNING,
2695                                "MSI-X Enable failed, trying MSI.\n");
2696                        irq_type = MSI_IRQ;
2697                }
2698        }
2699msi:
2700        if (irq_type == MSI_IRQ) {
2701                if (!pci_enable_msi(qdev->pdev)) {
2702                        set_bit(QL_MSI_ENABLED, &qdev->flags);
2703                        QPRINTK(qdev, IFUP, INFO,
2704                                "Running with MSI interrupts.\n");
2705                        return;
2706                }
2707        }
2708        irq_type = LEG_IRQ;
2709        QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2710}
2711
2712/*
2713 * Here we build the intr_context structures based on
2714 * our rx_ring count and intr vector count.
2715 * The intr_context structure is used to hook each vector
2716 * to possibly different handlers.
2717 */
2718static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2719{
2720        int i = 0;
2721        struct intr_context *intr_context = &qdev->intr_context[0];
2722
2723        ql_enable_msix(qdev);
2724
2725        if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2726                /* Each rx_ring has it's
2727                 * own intr_context since we have separate
2728                 * vectors for each queue.
2729                 * This only true when MSI-X is enabled.
2730                 */
2731                for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2732                        qdev->rx_ring[i].irq = i;
2733                        intr_context->intr = i;
2734                        intr_context->qdev = qdev;
2735                        /*
2736                         * We set up each vectors enable/disable/read bits so
2737                         * there's no bit/mask calculations in the critical path.
2738                         */
2739                        intr_context->intr_en_mask =
2740                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2741                            INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2742                            | i;
2743                        intr_context->intr_dis_mask =
2744                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2745                            INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2746                            INTR_EN_IHD | i;
2747                        intr_context->intr_read_mask =
2748                            INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2749                            INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2750                            i;
2751
2752                        if (i == 0) {
2753                                /*
2754                                 * Default queue handles bcast/mcast plus
2755                                 * async events.  Needs buffers.
2756                                 */
2757                                intr_context->handler = qlge_isr;
2758                                sprintf(intr_context->name, "%s-default-queue",
2759                                        qdev->ndev->name);
2760                        } else if (i < qdev->rss_ring_first_cq_id) {
2761                                /*
2762                                 * Outbound queue is for outbound completions only.
2763                                 */
2764                                intr_context->handler = qlge_msix_tx_isr;
2765                                sprintf(intr_context->name, "%s-txq-%d",
2766                                        qdev->ndev->name, i);
2767                        } else {
2768                                /*
2769                                 * Inbound queues handle unicast frames only.
2770                                 */
2771                                intr_context->handler = qlge_msix_rx_isr;
2772                                sprintf(intr_context->name, "%s-rxq-%d",
2773                                        qdev->ndev->name, i);
2774                        }
2775                }
2776        } else {
2777                /*
2778                 * All rx_rings use the same intr_context since
2779                 * there is only one vector.
2780                 */
2781                intr_context->intr = 0;
2782                intr_context->qdev = qdev;
2783                /*
2784                 * We set up each vectors enable/disable/read bits so
2785                 * there's no bit/mask calculations in the critical path.
2786                 */
2787                intr_context->intr_en_mask =
2788                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2789                intr_context->intr_dis_mask =
2790                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2791                    INTR_EN_TYPE_DISABLE;
2792                intr_context->intr_read_mask =
2793                    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2794                /*
2795                 * Single interrupt means one handler for all rings.
2796                 */
2797                intr_context->handler = qlge_isr;
2798                sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2799                for (i = 0; i < qdev->rx_ring_count; i++)
2800                        qdev->rx_ring[i].irq = 0;
2801        }
2802}
2803
2804static void ql_free_irq(struct ql_adapter *qdev)
2805{
2806        int i;
2807        struct intr_context *intr_context = &qdev->intr_context[0];
2808
2809        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2810                if (intr_context->hooked) {
2811                        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2812                                free_irq(qdev->msi_x_entry[i].vector,
2813                                         &qdev->rx_ring[i]);
2814                                QPRINTK(qdev, IFDOWN, ERR,
2815                                        "freeing msix interrupt %d.\n", i);
2816                        } else {
2817                                free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2818                                QPRINTK(qdev, IFDOWN, ERR,
2819                                        "freeing msi interrupt %d.\n", i);
2820                        }
2821                }
2822        }
2823        ql_disable_msix(qdev);
2824}
2825
2826static int ql_request_irq(struct ql_adapter *qdev)
2827{
2828        int i;
2829        int status = 0;
2830        struct pci_dev *pdev = qdev->pdev;
2831        struct intr_context *intr_context = &qdev->intr_context[0];
2832
2833        ql_resolve_queues_to_irqs(qdev);
2834
2835        for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2836                atomic_set(&intr_context->irq_cnt, 0);
2837                if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2838                        status = request_irq(qdev->msi_x_entry[i].vector,
2839                                             intr_context->handler,
2840                                             0,
2841                                             intr_context->name,
2842                                             &qdev->rx_ring[i]);
2843                        if (status) {
2844                                QPRINTK(qdev, IFUP, ERR,
2845                                        "Failed request for MSIX interrupt %d.\n",
2846                                        i);
2847                                goto err_irq;
2848                        } else {
2849                                QPRINTK(qdev, IFUP, INFO,
2850                                        "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2851                                        i,
2852                                        qdev->rx_ring[i].type ==
2853                                        DEFAULT_Q ? "DEFAULT_Q" : "",
2854                                        qdev->rx_ring[i].type ==
2855                                        TX_Q ? "TX_Q" : "",
2856                                        qdev->rx_ring[i].type ==
2857                                        RX_Q ? "RX_Q" : "", intr_context->name);
2858                        }
2859                } else {
2860                        QPRINTK(qdev, IFUP, DEBUG,
2861                                "trying msi or legacy interrupts.\n");
2862                        QPRINTK(qdev, IFUP, DEBUG,
2863                                "%s: irq = %d.\n", __func__, pdev->irq);
2864                        QPRINTK(qdev, IFUP, DEBUG,
2865                                "%s: context->name = %s.\n", __func__,
2866                               intr_context->name);
2867                        QPRINTK(qdev, IFUP, DEBUG,
2868                                "%s: dev_id = 0x%p.\n", __func__,
2869                               &qdev->rx_ring[0]);
2870                        status =
2871                            request_irq(pdev->irq, qlge_isr,
2872                                        test_bit(QL_MSI_ENABLED,
2873                                                 &qdev->
2874                                                 flags) ? 0 : IRQF_SHARED,
2875                                        intr_context->name, &qdev->rx_ring[0]);
2876                        if (status)
2877                                goto err_irq;
2878
2879                        QPRINTK(qdev, IFUP, ERR,
2880                                "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2881                                i,
2882                                qdev->rx_ring[0].type ==
2883                                DEFAULT_Q ? "DEFAULT_Q" : "",
2884                                qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2885                                qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2886                                intr_context->name);
2887                }
2888                intr_context->hooked = 1;
2889        }
2890        return status;
2891err_irq:
2892        QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2893        ql_free_irq(qdev);
2894        return status;
2895}
2896
2897static int ql_start_rss(struct ql_adapter *qdev)
2898{
2899        struct ricb *ricb = &qdev->ricb;
2900        int status = 0;
2901        int i;
2902        u8 *hash_id = (u8 *) ricb->hash_cq_id;
2903
2904        memset((void *)ricb, 0, sizeof(ricb));
2905
2906        ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2907        ricb->flags =
2908            (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2909             RSS_RT6);
2910        ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2911
2912        /*
2913         * Fill out the Indirection Table.
2914         */
2915        for (i = 0; i < 32; i++)
2916                hash_id[i] = i & 1;
2917
2918        /*
2919         * Random values for the IPv6 and IPv4 Hash Keys.
2920         */
2921        get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2922        get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2923
2924        QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2925
2926        status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2927        if (status) {
2928                QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2929                return status;
2930        }
2931        QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2932        return status;
2933}
2934
2935/* Initialize the frame-to-queue routing. */
2936static int ql_route_initialize(struct ql_adapter *qdev)
2937{
2938        int status = 0;
2939        int i;
2940
2941        /* Clear all the entries in the routing table. */
2942        for (i = 0; i < 16; i++) {
2943                status = ql_set_routing_reg(qdev, i, 0, 0);
2944                if (status) {
2945                        QPRINTK(qdev, IFUP, ERR,
2946                                "Failed to init routing register for CAM packets.\n");
2947                        return status;
2948                }
2949        }
2950
2951        status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2952        if (status) {
2953                QPRINTK(qdev, IFUP, ERR,
2954                        "Failed to init routing register for error packets.\n");
2955                return status;
2956        }
2957        status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2958        if (status) {
2959                QPRINTK(qdev, IFUP, ERR,
2960                        "Failed to init routing register for broadcast packets.\n");
2961                return status;
2962        }
2963        /* If we have more than one inbound queue, then turn on RSS in the
2964         * routing block.
2965         */
2966        if (qdev->rss_ring_count > 1) {
2967                status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2968                                        RT_IDX_RSS_MATCH, 1);
2969                if (status) {
2970                        QPRINTK(qdev, IFUP, ERR,
2971                                "Failed to init routing register for MATCH RSS packets.\n");
2972                        return status;
2973                }
2974        }
2975
2976        status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2977                                    RT_IDX_CAM_HIT, 1);
2978        if (status) {
2979                QPRINTK(qdev, IFUP, ERR,
2980                        "Failed to init routing register for CAM packets.\n");
2981                return status;
2982        }
2983        return status;
2984}
2985
2986static int ql_adapter_initialize(struct ql_adapter *qdev)
2987{
2988        u32 value, mask;
2989        int i;
2990        int status = 0;
2991
2992        /*
2993         * Set up the System register to halt on errors.
2994         */
2995        value = SYS_EFE | SYS_FAE;
2996        mask = value << 16;
2997        ql_write32(qdev, SYS, mask | value);
2998
2999        /* Set the default queue. */
3000        value = NIC_RCV_CFG_DFQ;
3001        mask = NIC_RCV_CFG_DFQ_MASK;
3002        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3003
3004        /* Set the MPI interrupt to enabled. */
3005        ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3006
3007        /* Enable the function, set pagesize, enable error checking. */
3008        value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3009            FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3010
3011        /* Set/clear header splitting. */
3012        mask = FSC_VM_PAGESIZE_MASK |
3013            FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3014        ql_write32(qdev, FSC, mask | value);
3015
3016        ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3017                min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3018
3019        /* Start up the rx queues. */
3020        for (i = 0; i < qdev->rx_ring_count; i++) {
3021                status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3022                if (status) {
3023                        QPRINTK(qdev, IFUP, ERR,
3024                                "Failed to start rx ring[%d].\n", i);
3025                        return status;
3026                }
3027        }
3028
3029        /* If there is more than one inbound completion queue
3030         * then download a RICB to configure RSS.
3031         */
3032        if (qdev->rss_ring_count > 1) {
3033                status = ql_start_rss(qdev);
3034                if (status) {
3035                        QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3036                        return status;
3037                }
3038        }
3039
3040        /* Start up the tx queues. */
3041        for (i = 0; i < qdev->tx_ring_count; i++) {
3042                status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3043                if (status) {
3044                        QPRINTK(qdev, IFUP, ERR,
3045                                "Failed to start tx ring[%d].\n", i);
3046                        return status;
3047                }
3048        }
3049
3050        status = ql_port_initialize(qdev);
3051        if (status) {
3052                QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3053                return status;
3054        }
3055
3056        status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3057                                     MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3058        if (status) {
3059                QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3060                return status;
3061        }
3062
3063        status = ql_route_initialize(qdev);
3064        if (status) {
3065                QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3066                return status;
3067        }
3068
3069        /* Start NAPI for the RSS queues. */
3070        for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3071                QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3072                        i);
3073                napi_enable(&qdev->rx_ring[i].napi);
3074        }
3075
3076        return status;
3077}
3078
3079/* Issue soft reset to chip. */
3080static int ql_adapter_reset(struct ql_adapter *qdev)
3081{
3082        u32 value;
3083        int max_wait_time;
3084        int status = 0;
3085        int resetCnt = 0;
3086
3087#define MAX_RESET_CNT   1
3088issueReset:
3089        resetCnt++;
3090        QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3091        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3092        /* Wait for reset to complete. */
3093        max_wait_time = 3;
3094        QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3095                max_wait_time);
3096        do {
3097                value = ql_read32(qdev, RST_FO);
3098                if ((value & RST_FO_FR) == 0)
3099                        break;
3100
3101                ssleep(1);
3102        } while ((--max_wait_time));
3103        if (value & RST_FO_FR) {
3104                QPRINTK(qdev, IFDOWN, ERR,
3105                        "Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
3106                if (resetCnt < MAX_RESET_CNT)
3107                        goto issueReset;
3108        }
3109        if (max_wait_time == 0) {
3110                status = -ETIMEDOUT;
3111                QPRINTK(qdev, IFDOWN, ERR,
3112                        "ETIMEOUT!!! errored out of resetting the chip!\n");
3113        }
3114
3115        return status;
3116}
3117
3118static void ql_display_dev_info(struct net_device *ndev)
3119{
3120        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3121
3122        QPRINTK(qdev, PROBE, INFO,
3123                "Function #%d, NIC Roll %d, NIC Rev = %d, "
3124                "XG Roll = %d, XG Rev = %d.\n",
3125                qdev->func,
3126                qdev->chip_rev_id & 0x0000000f,
3127                qdev->chip_rev_id >> 4 & 0x0000000f,
3128                qdev->chip_rev_id >> 8 & 0x0000000f,
3129                qdev->chip_rev_id >> 12 & 0x0000000f);
3130        QPRINTK(qdev, PROBE, INFO,
3131                "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3132                ndev->dev_addr[0], ndev->dev_addr[1],
3133                ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3134                ndev->dev_addr[5]);
3135}
3136
3137static int ql_adapter_down(struct ql_adapter *qdev)
3138{
3139        struct net_device *ndev = qdev->ndev;
3140        int i, status = 0;
3141        struct rx_ring *rx_ring;
3142
3143        netif_stop_queue(ndev);
3144        netif_carrier_off(ndev);
3145
3146        cancel_delayed_work_sync(&qdev->asic_reset_work);
3147        cancel_delayed_work_sync(&qdev->mpi_reset_work);
3148        cancel_delayed_work_sync(&qdev->mpi_work);
3149
3150        /* The default queue at index 0 is always processed in
3151         * a workqueue.
3152         */
3153        cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3154
3155        /* The rest of the rx_rings are processed in
3156         * a workqueue only if it's a single interrupt
3157         * environment (MSI/Legacy).
3158         */
3159        for (i = 1; i > qdev->rx_ring_count; i++) {
3160                rx_ring = &qdev->rx_ring[i];
3161                /* Only the RSS rings use NAPI on multi irq
3162                 * environment.  Outbound completion processing
3163                 * is done in interrupt context.
3164                 */
3165                if (i >= qdev->rss_ring_first_cq_id) {
3166                        napi_disable(&rx_ring->napi);
3167                } else {
3168                        cancel_delayed_work_sync(&rx_ring->rx_work);
3169                }
3170        }
3171
3172        clear_bit(QL_ADAPTER_UP, &qdev->flags);
3173
3174        ql_disable_interrupts(qdev);
3175
3176        ql_tx_ring_clean(qdev);
3177
3178        spin_lock(&qdev->hw_lock);
3179        status = ql_adapter_reset(qdev);
3180        if (status)
3181                QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3182                        qdev->func);
3183        spin_unlock(&qdev->hw_lock);
3184        return status;
3185}
3186
3187static int ql_adapter_up(struct ql_adapter *qdev)
3188{
3189        int err = 0;
3190
3191        spin_lock(&qdev->hw_lock);
3192        err = ql_adapter_initialize(qdev);
3193        if (err) {
3194                QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3195                spin_unlock(&qdev->hw_lock);
3196                goto err_init;
3197        }
3198        spin_unlock(&qdev->hw_lock);
3199        set_bit(QL_ADAPTER_UP, &qdev->flags);
3200        ql_enable_interrupts(qdev);
3201        ql_enable_all_completion_interrupts(qdev);
3202        if ((ql_read32(qdev, STS) & qdev->port_init)) {
3203                netif_carrier_on(qdev->ndev);
3204                netif_start_queue(qdev->ndev);
3205        }
3206
3207        return 0;
3208err_init:
3209        ql_adapter_reset(qdev);
3210        return err;
3211}
3212
3213static int ql_cycle_adapter(struct ql_adapter *qdev)
3214{
3215        int status;
3216
3217        status = ql_adapter_down(qdev);
3218        if (status)
3219                goto error;
3220
3221        status = ql_adapter_up(qdev);
3222        if (status)
3223                goto error;
3224
3225        return status;
3226error:
3227        QPRINTK(qdev, IFUP, ALERT,
3228                "Driver up/down cycle failed, closing device\n");
3229        rtnl_lock();
3230        dev_close(qdev->ndev);
3231        rtnl_unlock();
3232        return status;
3233}
3234
3235static void ql_release_adapter_resources(struct ql_adapter *qdev)
3236{
3237        ql_free_mem_resources(qdev);
3238        ql_free_irq(qdev);
3239}
3240
3241static int ql_get_adapter_resources(struct ql_adapter *qdev)
3242{
3243        int status = 0;
3244
3245        if (ql_alloc_mem_resources(qdev)) {
3246                QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
3247                return -ENOMEM;
3248        }
3249        status = ql_request_irq(qdev);
3250        if (status)
3251                goto err_irq;
3252        return status;
3253err_irq:
3254        ql_free_mem_resources(qdev);
3255        return status;
3256}
3257
3258static int qlge_close(struct net_device *ndev)
3259{
3260        struct ql_adapter *qdev = netdev_priv(ndev);
3261
3262        /*
3263         * Wait for device to recover from a reset.
3264         * (Rarely happens, but possible.)
3265         */
3266        while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3267                msleep(1);
3268        ql_adapter_down(qdev);
3269        ql_release_adapter_resources(qdev);
3270        ql_free_ring_cb(qdev);
3271        return 0;
3272}
3273
3274static int ql_configure_rings(struct ql_adapter *qdev)
3275{
3276        int i;
3277        struct rx_ring *rx_ring;
3278        struct tx_ring *tx_ring;
3279        int cpu_cnt = num_online_cpus();
3280
3281        /*
3282         * For each processor present we allocate one
3283         * rx_ring for outbound completions, and one
3284         * rx_ring for inbound completions.  Plus there is
3285         * always the one default queue.  For the CPU
3286         * counts we end up with the following rx_rings:
3287         * rx_ring count =
3288         *  one default queue +
3289         *  (CPU count * outbound completion rx_ring) +
3290         *  (CPU count * inbound (RSS) completion rx_ring)
3291         * To keep it simple we limit the total number of
3292         * queues to < 32, so we truncate CPU to 8.
3293         * This limitation can be removed when requested.
3294         */
3295
3296        if (cpu_cnt > 8)
3297                cpu_cnt = 8;
3298
3299        /*
3300         * rx_ring[0] is always the default queue.
3301         */
3302        /* Allocate outbound completion ring for each CPU. */
3303        qdev->tx_ring_count = cpu_cnt;
3304        /* Allocate inbound completion (RSS) ring for each CPU. */
3305        qdev->rss_ring_count = cpu_cnt;
3306        /* cq_id for the first inbound ring handler. */
3307        qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3308        /*
3309         * qdev->rx_ring_count:
3310         * Total number of rx_rings.  This includes the one
3311         * default queue, a number of outbound completion
3312         * handler rx_rings, and the number of inbound
3313         * completion handler rx_rings.
3314         */
3315        qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3316
3317        if (ql_alloc_ring_cb(qdev))
3318                return -ENOMEM;
3319
3320        for (i = 0; i < qdev->tx_ring_count; i++) {
3321                tx_ring = &qdev->tx_ring[i];
3322                memset((void *)tx_ring, 0, sizeof(tx_ring));
3323                tx_ring->qdev = qdev;
3324                tx_ring->wq_id = i;
3325                tx_ring->wq_len = qdev->tx_ring_size;
3326                tx_ring->wq_size =
3327                    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3328
3329                /*
3330                 * The completion queue ID for the tx rings start
3331                 * immediately after the default Q ID, which is zero.
3332                 */
3333                tx_ring->cq_id = i + 1;
3334        }
3335
3336        for (i = 0; i < qdev->rx_ring_count; i++) {
3337                rx_ring = &qdev->rx_ring[i];
3338                memset((void *)rx_ring, 0, sizeof(rx_ring));
3339                rx_ring->qdev = qdev;
3340                rx_ring->cq_id = i;
3341                rx_ring->cpu = i % cpu_cnt;        /* CPU to run handler on. */
3342                if (i == 0) {        /* Default queue at index 0. */
3343                        /*
3344                         * Default queue handles bcast/mcast plus
3345                         * async events.  Needs buffers.
3346                         */
3347                        rx_ring->cq_len = qdev->rx_ring_size;
3348                        rx_ring->cq_size =
3349                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3350                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3351                        rx_ring->lbq_size =
3352                            rx_ring->lbq_len * sizeof(struct bq_element);
3353                        rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3354                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3355                        rx_ring->sbq_size =
3356                            rx_ring->sbq_len * sizeof(struct bq_element);
3357                        rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3358                        rx_ring->type = DEFAULT_Q;
3359                } else if (i < qdev->rss_ring_first_cq_id) {
3360                        /*
3361                         * Outbound queue handles outbound completions only.
3362                         */
3363                        /* outbound cq is same size as tx_ring it services. */
3364                        rx_ring->cq_len = qdev->tx_ring_size;
3365                        rx_ring->cq_size =
3366                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3367                        rx_ring->lbq_len = 0;
3368                        rx_ring->lbq_size = 0;
3369                        rx_ring->lbq_buf_size = 0;
3370                        rx_ring->sbq_len = 0;
3371                        rx_ring->sbq_size = 0;
3372                        rx_ring->sbq_buf_size = 0;
3373                        rx_ring->type = TX_Q;
3374                } else {        /* Inbound completions (RSS) queues */
3375                        /*
3376                         * Inbound queues handle unicast frames only.
3377                         */
3378                        rx_ring->cq_len = qdev->rx_ring_size;
3379                        rx_ring->cq_size =
3380                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3381                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3382                        rx_ring->lbq_size =
3383                            rx_ring->lbq_len * sizeof(struct bq_element);
3384                        rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3385                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3386                        rx_ring->sbq_size =
3387                            rx_ring->sbq_len * sizeof(struct bq_element);
3388                        rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3389                        rx_ring->type = RX_Q;
3390                }
3391        }
3392        return 0;
3393}
3394
3395static int qlge_open(struct net_device *ndev)
3396{
3397        int err = 0;
3398        struct ql_adapter *qdev = netdev_priv(ndev);
3399
3400        err = ql_configure_rings(qdev);
3401        if (err)
3402                return err;
3403
3404        err = ql_get_adapter_resources(qdev);
3405        if (err)
3406                goto error_up;
3407
3408        err = ql_adapter_up(qdev);
3409        if (err)
3410                goto error_up;
3411
3412        return err;
3413
3414error_up:
3415        ql_release_adapter_resources(qdev);
3416        ql_free_ring_cb(qdev);
3417        return err;
3418}
3419
3420static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3421{
3422        struct ql_adapter *qdev = netdev_priv(ndev);
3423
3424        if (ndev->mtu == 1500 && new_mtu == 9000) {
3425                QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3426        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3427                QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3428        } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3429                   (ndev->mtu == 9000 && new_mtu == 9000)) {
3430                return 0;
3431        } else
3432                return -EINVAL;
3433        ndev->mtu = new_mtu;
3434        return 0;
3435}
3436
3437static struct net_device_stats *qlge_get_stats(struct net_device
3438                                               *ndev)
3439{
3440        struct ql_adapter *qdev = netdev_priv(ndev);
3441        return &qdev->stats;
3442}
3443
3444static void qlge_set_multicast_list(struct net_device *ndev)
3445{
3446        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3447        struct dev_mc_list *mc_ptr;
3448        int i;
3449
3450        spin_lock(&qdev->hw_lock);
3451        /*
3452         * Set or clear promiscuous mode if a
3453         * transition is taking place.
3454         */
3455        if (ndev->flags & IFF_PROMISC) {
3456                if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3457                        if (ql_set_routing_reg
3458                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3459                                QPRINTK(qdev, HW, ERR,
3460                                        "Failed to set promiscous mode.\n");
3461                        } else {
3462                                set_bit(QL_PROMISCUOUS, &qdev->flags);
3463                        }
3464                }
3465        } else {
3466                if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3467                        if (ql_set_routing_reg
3468                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3469                                QPRINTK(qdev, HW, ERR,
3470                                        "Failed to clear promiscous mode.\n");
3471                        } else {
3472                                clear_bit(QL_PROMISCUOUS, &qdev->flags);
3473                        }
3474                }
3475        }
3476
3477        /*
3478         * Set or clear all multicast mode if a
3479         * transition is taking place.
3480         */
3481        if ((ndev->flags & IFF_ALLMULTI) ||
3482            (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3483                if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3484                        if (ql_set_routing_reg
3485                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3486                                QPRINTK(qdev, HW, ERR,
3487                                        "Failed to set all-multi mode.\n");
3488                        } else {
3489                                set_bit(QL_ALLMULTI, &qdev->flags);
3490                        }
3491                }
3492        } else {
3493                if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3494                        if (ql_set_routing_reg
3495                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3496                                QPRINTK(qdev, HW, ERR,
3497                                        "Failed to clear all-multi mode.\n");
3498                        } else {
3499                                clear_bit(QL_ALLMULTI, &qdev->flags);
3500                        }
3501                }
3502        }
3503
3504        if (ndev->mc_count) {
3505                for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3506                     i++, mc_ptr = mc_ptr->next)
3507                        if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3508                                                MAC_ADDR_TYPE_MULTI_MAC, i)) {
3509                                QPRINTK(qdev, HW, ERR,
3510                                        "Failed to loadmulticast address.\n");
3511                                goto exit;
3512                        }
3513                if (ql_set_routing_reg
3514                    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3515                        QPRINTK(qdev, HW, ERR,
3516                                "Failed to set multicast match mode.\n");
3517                } else {
3518                        set_bit(QL_ALLMULTI, &qdev->flags);
3519                }
3520        }
3521exit:
3522        spin_unlock(&qdev->hw_lock);
3523}
3524
3525static int qlge_set_mac_address(struct net_device *ndev, void *p)
3526{
3527        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3528        struct sockaddr *addr = p;
3529
3530        if (netif_running(ndev))
3531                return -EBUSY;
3532
3533        if (!is_valid_ether_addr(addr->sa_data))
3534                return -EADDRNOTAVAIL;
3535        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3536
3537        spin_lock(&qdev->hw_lock);
3538        if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3539                        MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3540                QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3541                return -1;
3542        }
3543        spin_unlock(&qdev->hw_lock);
3544
3545        return 0;
3546}
3547
3548static void qlge_tx_timeout(struct net_device *ndev)
3549{
3550        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3551        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
3552}
3553
3554static void ql_asic_reset_work(struct work_struct *work)
3555{
3556        struct ql_adapter *qdev =
3557            container_of(work, struct ql_adapter, asic_reset_work.work);
3558        ql_cycle_adapter(qdev);
3559}
3560
3561static void ql_get_board_info(struct ql_adapter *qdev)
3562{
3563        qdev->func =
3564            (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3565        if (qdev->func) {
3566                qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3567                qdev->port_link_up = STS_PL1;
3568                qdev->port_init = STS_PI1;
3569                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3570                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3571        } else {
3572                qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3573                qdev->port_link_up = STS_PL0;
3574                qdev->port_init = STS_PI0;
3575                qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3576                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3577        }
3578        qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3579}
3580
3581static void ql_release_all(struct pci_dev *pdev)
3582{
3583        struct net_device *ndev = pci_get_drvdata(pdev);
3584        struct ql_adapter *qdev = netdev_priv(ndev);
3585
3586        if (qdev->workqueue) {
3587                destroy_workqueue(qdev->workqueue);
3588                qdev->workqueue = NULL;
3589        }
3590        if (qdev->q_workqueue) {
3591                destroy_workqueue(qdev->q_workqueue);
3592                qdev->q_workqueue = NULL;
3593        }
3594        if (qdev->reg_base)
3595                iounmap((void *)qdev->reg_base);
3596        if (qdev->doorbell_area)
3597                iounmap(qdev->doorbell_area);
3598        pci_release_regions(pdev);
3599        pci_set_drvdata(pdev, NULL);
3600}
3601
3602static int __devinit ql_init_device(struct pci_dev *pdev,
3603                                    struct net_device *ndev, int cards_found)
3604{
3605        struct ql_adapter *qdev = netdev_priv(ndev);
3606        int pos, err = 0;
3607        u16 val16;
3608
3609        memset((void *)qdev, 0, sizeof(qdev));
3610        err = pci_enable_device(pdev);
3611        if (err) {
3612                dev_err(&pdev->dev, "PCI device enable failed.\n");
3613                return err;
3614        }
3615
3616        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3617        if (pos <= 0) {
3618                dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3619                        "aborting.\n");
3620                goto err_out;
3621        } else {
3622                pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3623                val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3624                val16 |= (PCI_EXP_DEVCTL_CERE |
3625                          PCI_EXP_DEVCTL_NFERE |
3626                          PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3627                pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3628        }
3629
3630        err = pci_request_regions(pdev, DRV_NAME);
3631        if (err) {
3632                dev_err(&pdev->dev, "PCI region request failed.\n");
3633                goto err_out;
3634        }
3635
3636        pci_set_master(pdev);
3637        if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3638                set_bit(QL_DMA64, &qdev->flags);
3639                err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3640        } else {
3641                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3642                if (!err)
3643                       err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3644        }
3645
3646        if (err) {
3647                dev_err(&pdev->dev, "No usable DMA configuration.\n");
3648                goto err_out;
3649        }
3650
3651        pci_set_drvdata(pdev, ndev);
3652        qdev->reg_base =
3653            ioremap_nocache(pci_resource_start(pdev, 1),
3654                            pci_resource_len(pdev, 1));
3655        if (!qdev->reg_base) {
3656                dev_err(&pdev->dev, "Register mapping failed.\n");
3657                err = -ENOMEM;
3658                goto err_out;
3659        }
3660
3661        qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3662        qdev->doorbell_area =
3663            ioremap_nocache(pci_resource_start(pdev, 3),
3664                            pci_resource_len(pdev, 3));
3665        if (!qdev->doorbell_area) {
3666                dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3667                err = -ENOMEM;
3668                goto err_out;
3669        }
3670
3671        ql_get_board_info(qdev);
3672        qdev->ndev = ndev;
3673        qdev->pdev = pdev;
3674        qdev->msg_enable = netif_msg_init(debug, default_msg);
3675        spin_lock_init(&qdev->hw_lock);
3676        spin_lock_init(&qdev->stats_lock);
3677
3678        /* make sure the EEPROM is good */
3679        err = ql_get_flash_params(qdev);
3680        if (err) {
3681                dev_err(&pdev->dev, "Invalid FLASH.\n");
3682                goto err_out;
3683        }
3684
3685        if (!is_valid_ether_addr(qdev->flash.mac_addr))
3686                goto err_out;
3687
3688        memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3689        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3690
3691        /* Set up the default ring sizes. */
3692        qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3693        qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3694
3695        /* Set up the coalescing parameters. */
3696        qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3697        qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3698        qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3699        qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3700
3701        /*
3702         * Set up the operating parameters.
3703         */
3704        qdev->rx_csum = 1;
3705
3706        qdev->q_workqueue = create_workqueue(ndev->name);
3707        qdev->workqueue = create_singlethread_workqueue(ndev->name);
3708        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3709        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3710        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3711
3712        if (!cards_found) {
3713                dev_info(&pdev->dev, "%s\n", DRV_STRING);
3714                dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3715                         DRV_NAME, DRV_VERSION);
3716        }
3717        return 0;
3718err_out:
3719        ql_release_all(pdev);
3720        pci_disable_device(pdev);
3721        return err;
3722}
3723
3724static int __devinit qlge_probe(struct pci_dev *pdev,
3725                                const struct pci_device_id *pci_entry)
3726{
3727        struct net_device *ndev = NULL;
3728        struct ql_adapter *qdev = NULL;
3729        static int cards_found = 0;
3730        int err = 0;
3731
3732        ndev = alloc_etherdev(sizeof(struct ql_adapter));
3733        if (!ndev)
3734                return -ENOMEM;
3735
3736        err = ql_init_device(pdev, ndev, cards_found);
3737        if (err < 0) {
3738                free_netdev(ndev);
3739                return err;
3740        }
3741
3742        qdev = netdev_priv(ndev);
3743        SET_NETDEV_DEV(ndev, &pdev->dev);
3744        ndev->features = (0
3745                          | NETIF_F_IP_CSUM
3746                          | NETIF_F_SG
3747                          | NETIF_F_TSO
3748                          | NETIF_F_TSO6
3749                          | NETIF_F_TSO_ECN
3750                          | NETIF_F_HW_VLAN_TX
3751                          | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3752
3753        if (test_bit(QL_DMA64, &qdev->flags))
3754                ndev->features |= NETIF_F_HIGHDMA;
3755
3756        /*
3757         * Set up net_device structure.
3758         */
3759        ndev->tx_queue_len = qdev->tx_ring_size;
3760        ndev->irq = pdev->irq;
3761        ndev->open = qlge_open;
3762        ndev->stop = qlge_close;
3763        ndev->hard_start_xmit = qlge_send;
3764        SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3765        ndev->change_mtu = qlge_change_mtu;
3766        ndev->get_stats = qlge_get_stats;
3767        ndev->set_multicast_list = qlge_set_multicast_list;
3768        ndev->set_mac_address = qlge_set_mac_address;
3769        ndev->tx_timeout = qlge_tx_timeout;
3770        ndev->watchdog_timeo = 10 * HZ;
3771        ndev->vlan_rx_register = ql_vlan_rx_register;
3772        ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
3773        ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
3774        err = register_netdev(ndev);
3775        if (err) {
3776                dev_err(&pdev->dev, "net device registration failed.\n");
3777                ql_release_all(pdev);
3778                pci_disable_device(pdev);
3779                return err;
3780        }
3781        netif_carrier_off(ndev);
3782        netif_stop_queue(ndev);
3783        ql_display_dev_info(ndev);
3784        cards_found++;
3785        return 0;
3786}
3787
3788static void __devexit qlge_remove(struct pci_dev *pdev)
3789{
3790        struct net_device *ndev = pci_get_drvdata(pdev);
3791        unregister_netdev(ndev);
3792        ql_release_all(pdev);
3793        pci_disable_device(pdev);
3794        free_netdev(ndev);
3795}
3796
3797/*
3798 * This callback is called by the PCI subsystem whenever
3799 * a PCI bus error is detected.
3800 */
3801static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3802                                               enum pci_channel_state state)
3803{
3804        struct net_device *ndev = pci_get_drvdata(pdev);
3805        struct ql_adapter *qdev = netdev_priv(ndev);
3806
3807        if (netif_running(ndev))
3808                ql_adapter_down(qdev);
3809
3810        pci_disable_device(pdev);
3811
3812        /* Request a slot reset. */
3813        return PCI_ERS_RESULT_NEED_RESET;
3814}
3815
3816/*
3817 * This callback is called after the PCI buss has been reset.
3818 * Basically, this tries to restart the card from scratch.
3819 * This is a shortened version of the device probe/discovery code,
3820 * it resembles the first-half of the () routine.
3821 */
3822static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3823{
3824        struct net_device *ndev = pci_get_drvdata(pdev);
3825        struct ql_adapter *qdev = netdev_priv(ndev);
3826
3827        if (pci_enable_device(pdev)) {
3828                QPRINTK(qdev, IFUP, ERR,
3829                        "Cannot re-enable PCI device after reset.\n");
3830                return PCI_ERS_RESULT_DISCONNECT;
3831        }
3832
3833        pci_set_master(pdev);
3834
3835        netif_carrier_off(ndev);
3836        netif_stop_queue(ndev);
3837        ql_adapter_reset(qdev);
3838
3839        /* Make sure the EEPROM is good */
3840        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3841
3842        if (!is_valid_ether_addr(ndev->perm_addr)) {
3843                QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3844                return PCI_ERS_RESULT_DISCONNECT;
3845        }
3846
3847        return PCI_ERS_RESULT_RECOVERED;
3848}
3849
3850static void qlge_io_resume(struct pci_dev *pdev)
3851{
3852        struct net_device *ndev = pci_get_drvdata(pdev);
3853        struct ql_adapter *qdev = netdev_priv(ndev);
3854
3855        pci_set_master(pdev);
3856
3857        if (netif_running(ndev)) {
3858                if (ql_adapter_up(qdev)) {
3859                        QPRINTK(qdev, IFUP, ERR,
3860                                "Device initialization failed after reset.\n");
3861                        return;
3862                }
3863        }
3864
3865        netif_device_attach(ndev);
3866}
3867
3868static struct pci_error_handlers qlge_err_handler = {
3869        .error_detected = qlge_io_error_detected,
3870        .slot_reset = qlge_io_slot_reset,
3871        .resume = qlge_io_resume,
3872};
3873
3874static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3875{
3876        struct net_device *ndev = pci_get_drvdata(pdev);
3877        struct ql_adapter *qdev = netdev_priv(ndev);
3878        int err;
3879
3880        netif_device_detach(ndev);
3881
3882        if (netif_running(ndev)) {
3883                err = ql_adapter_down(qdev);
3884                if (!err)
3885                        return err;
3886        }
3887
3888        err = pci_save_state(pdev);
3889        if (err)
3890                return err;
3891
3892        pci_disable_device(pdev);
3893
3894        pci_set_power_state(pdev, pci_choose_state(pdev, state));
3895
3896        return 0;
3897}
3898
3899#ifdef CONFIG_PM
3900static int qlge_resume(struct pci_dev *pdev)
3901{
3902        struct net_device *ndev = pci_get_drvdata(pdev);
3903        struct ql_adapter *qdev = netdev_priv(ndev);
3904        int err;
3905
3906        pci_set_power_state(pdev, PCI_D0);
3907        pci_restore_state(pdev);
3908        err = pci_enable_device(pdev);
3909        if (err) {
3910                QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3911                return err;
3912        }
3913        pci_set_master(pdev);
3914
3915        pci_enable_wake(pdev, PCI_D3hot, 0);
3916        pci_enable_wake(pdev, PCI_D3cold, 0);
3917
3918        if (netif_running(ndev)) {
3919                err = ql_adapter_up(qdev);
3920                if (err)
3921                        return err;
3922        }
3923
3924        netif_device_attach(ndev);
3925
3926        return 0;
3927}
3928#endif /* CONFIG_PM */
3929
3930static void qlge_shutdown(struct pci_dev *pdev)
3931{
3932        qlge_suspend(pdev, PMSG_SUSPEND);
3933}
3934
3935static struct pci_driver qlge_driver = {
3936        .name = DRV_NAME,
3937        .id_table = qlge_pci_tbl,
3938        .probe = qlge_probe,
3939        .remove = __devexit_p(qlge_remove),
3940#ifdef CONFIG_PM
3941        .suspend = qlge_suspend,
3942        .resume = qlge_resume,
3943#endif
3944        .shutdown = qlge_shutdown,
3945        .err_handler = &qlge_err_handler
3946};
3947
3948static int __init qlge_init_module(void)
3949{
3950        return pci_register_driver(&qlge_driver);
3951}
3952
3953static void __exit qlge_exit(void)
3954{
3955        pci_unregister_driver(&qlge_driver);
3956}
3957
3958module_init(qlge_init_module);
3959module_exit(qlge_exit);