Showing error 1795

User: Jiri Slaby
Error type: Invalid Pointer Dereference
Error type description: A pointer which is invalid is being dereferenced
File location: drivers/infiniband/hw/cxgb3/iwch_cm.c
Line in file: 742
Project: Linux Kernel
Project version: 2.6.28
Tools: Smatch (1.59)
Entered: 2013-09-11 08:47:26 UTC


Source:

   1/*
   2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/module.h>
  33#include <linux/list.h>
  34#include <linux/workqueue.h>
  35#include <linux/skbuff.h>
  36#include <linux/timer.h>
  37#include <linux/notifier.h>
  38#include <linux/inetdevice.h>
  39
  40#include <net/neighbour.h>
  41#include <net/netevent.h>
  42#include <net/route.h>
  43
  44#include "tcb.h"
  45#include "cxgb3_offload.h"
  46#include "iwch.h"
  47#include "iwch_provider.h"
  48#include "iwch_cm.h"
  49
  50static char *states[] = {
  51        "idle",
  52        "listen",
  53        "connecting",
  54        "mpa_wait_req",
  55        "mpa_req_sent",
  56        "mpa_req_rcvd",
  57        "mpa_rep_sent",
  58        "fpdu_mode",
  59        "aborting",
  60        "closing",
  61        "moribund",
  62        "dead",
  63        NULL,
  64};
  65
  66int peer2peer = 0;
  67module_param(peer2peer, int, 0644);
  68MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
  69
  70static int ep_timeout_secs = 60;
  71module_param(ep_timeout_secs, int, 0644);
  72MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  73                                   "in seconds (default=60)");
  74
  75static int mpa_rev = 1;
  76module_param(mpa_rev, int, 0644);
  77MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  78                 "1 is spec compliant. (default=1)");
  79
  80static int markers_enabled = 0;
  81module_param(markers_enabled, int, 0644);
  82MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  83
  84static int crc_enabled = 1;
  85module_param(crc_enabled, int, 0644);
  86MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  87
  88static int rcv_win = 256 * 1024;
  89module_param(rcv_win, int, 0644);
  90MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
  91
  92static int snd_win = 32 * 1024;
  93module_param(snd_win, int, 0644);
  94MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
  95
  96static unsigned int nocong = 0;
  97module_param(nocong, uint, 0644);
  98MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
  99
 100static unsigned int cong_flavor = 1;
 101module_param(cong_flavor, uint, 0644);
 102MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
 103
 104static void process_work(struct work_struct *work);
 105static struct workqueue_struct *workq;
 106static DECLARE_WORK(skb_work, process_work);
 107
 108static struct sk_buff_head rxq;
 109static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
 110
 111static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
 112static void ep_timeout(unsigned long arg);
 113static void connect_reply_upcall(struct iwch_ep *ep, int status);
 114
 115static void start_ep_timer(struct iwch_ep *ep)
 116{
 117        PDBG("%s ep %p\n", __func__, ep);
 118        if (timer_pending(&ep->timer)) {
 119                PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
 120                del_timer_sync(&ep->timer);
 121        } else
 122                get_ep(&ep->com);
 123        ep->timer.expires = jiffies + ep_timeout_secs * HZ;
 124        ep->timer.data = (unsigned long)ep;
 125        ep->timer.function = ep_timeout;
 126        add_timer(&ep->timer);
 127}
 128
 129static void stop_ep_timer(struct iwch_ep *ep)
 130{
 131        PDBG("%s ep %p\n", __func__, ep);
 132        if (!timer_pending(&ep->timer)) {
 133                printk(KERN_ERR "%s timer stopped when its not running!  ep %p state %u\n",
 134                        __func__, ep, ep->com.state);
 135                WARN_ON(1);
 136                return;
 137        }
 138        del_timer_sync(&ep->timer);
 139        put_ep(&ep->com);
 140}
 141
 142static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
 143{
 144        struct cpl_tid_release *req;
 145
 146        skb = get_skb(skb, sizeof *req, GFP_KERNEL);
 147        if (!skb)
 148                return;
 149        req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
 150        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 151        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
 152        skb->priority = CPL_PRIORITY_SETUP;
 153        cxgb3_ofld_send(tdev, skb);
 154        return;
 155}
 156
 157int iwch_quiesce_tid(struct iwch_ep *ep)
 158{
 159        struct cpl_set_tcb_field *req;
 160        struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 161
 162        if (!skb)
 163                return -ENOMEM;
 164        req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
 165        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 166        req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
 167        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
 168        req->reply = 0;
 169        req->cpu_idx = 0;
 170        req->word = htons(W_TCB_RX_QUIESCE);
 171        req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
 172        req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
 173
 174        skb->priority = CPL_PRIORITY_DATA;
 175        cxgb3_ofld_send(ep->com.tdev, skb);
 176        return 0;
 177}
 178
 179int iwch_resume_tid(struct iwch_ep *ep)
 180{
 181        struct cpl_set_tcb_field *req;
 182        struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 183
 184        if (!skb)
 185                return -ENOMEM;
 186        req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
 187        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 188        req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
 189        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
 190        req->reply = 0;
 191        req->cpu_idx = 0;
 192        req->word = htons(W_TCB_RX_QUIESCE);
 193        req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
 194        req->val = 0;
 195
 196        skb->priority = CPL_PRIORITY_DATA;
 197        cxgb3_ofld_send(ep->com.tdev, skb);
 198        return 0;
 199}
 200
 201static void set_emss(struct iwch_ep *ep, u16 opt)
 202{
 203        PDBG("%s ep %p opt %u\n", __func__, ep, opt);
 204        ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
 205        if (G_TCPOPT_TSTAMP(opt))
 206                ep->emss -= 12;
 207        if (ep->emss < 128)
 208                ep->emss = 128;
 209        PDBG("emss=%d\n", ep->emss);
 210}
 211
 212static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
 213{
 214        unsigned long flags;
 215        enum iwch_ep_state state;
 216
 217        spin_lock_irqsave(&epc->lock, flags);
 218        state = epc->state;
 219        spin_unlock_irqrestore(&epc->lock, flags);
 220        return state;
 221}
 222
 223static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
 224{
 225        epc->state = new;
 226}
 227
 228static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
 229{
 230        unsigned long flags;
 231
 232        spin_lock_irqsave(&epc->lock, flags);
 233        PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
 234        __state_set(epc, new);
 235        spin_unlock_irqrestore(&epc->lock, flags);
 236        return;
 237}
 238
 239static void *alloc_ep(int size, gfp_t gfp)
 240{
 241        struct iwch_ep_common *epc;
 242
 243        epc = kzalloc(size, gfp);
 244        if (epc) {
 245                kref_init(&epc->kref);
 246                spin_lock_init(&epc->lock);
 247                init_waitqueue_head(&epc->waitq);
 248        }
 249        PDBG("%s alloc ep %p\n", __func__, epc);
 250        return epc;
 251}
 252
 253void __free_ep(struct kref *kref)
 254{
 255        struct iwch_ep_common *epc;
 256        epc = container_of(kref, struct iwch_ep_common, kref);
 257        PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
 258        kfree(epc);
 259}
 260
 261static void release_ep_resources(struct iwch_ep *ep)
 262{
 263        PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
 264        cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
 265        dst_release(ep->dst);
 266        l2t_release(L2DATA(ep->com.tdev), ep->l2t);
 267        put_ep(&ep->com);
 268}
 269
 270static void process_work(struct work_struct *work)
 271{
 272        struct sk_buff *skb = NULL;
 273        void *ep;
 274        struct t3cdev *tdev;
 275        int ret;
 276
 277        while ((skb = skb_dequeue(&rxq))) {
 278                ep = *((void **) (skb->cb));
 279                tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
 280                ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
 281                if (ret & CPL_RET_BUF_DONE)
 282                        kfree_skb(skb);
 283
 284                /*
 285                 * ep was referenced in sched(), and is freed here.
 286                 */
 287                put_ep((struct iwch_ep_common *)ep);
 288        }
 289}
 290
 291static int status2errno(int status)
 292{
 293        switch (status) {
 294        case CPL_ERR_NONE:
 295                return 0;
 296        case CPL_ERR_CONN_RESET:
 297                return -ECONNRESET;
 298        case CPL_ERR_ARP_MISS:
 299                return -EHOSTUNREACH;
 300        case CPL_ERR_CONN_TIMEDOUT:
 301                return -ETIMEDOUT;
 302        case CPL_ERR_TCAM_FULL:
 303                return -ENOMEM;
 304        case CPL_ERR_CONN_EXIST:
 305                return -EADDRINUSE;
 306        default:
 307                return -EIO;
 308        }
 309}
 310
 311/*
 312 * Try and reuse skbs already allocated...
 313 */
 314static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
 315{
 316        if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
 317                skb_trim(skb, 0);
 318                skb_get(skb);
 319        } else {
 320                skb = alloc_skb(len, gfp);
 321        }
 322        return skb;
 323}
 324
 325static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
 326                                 __be32 peer_ip, __be16 local_port,
 327                                 __be16 peer_port, u8 tos)
 328{
 329        struct rtable *rt;
 330        struct flowi fl = {
 331                .oif = 0,
 332                .nl_u = {
 333                         .ip4_u = {
 334                                   .daddr = peer_ip,
 335                                   .saddr = local_ip,
 336                                   .tos = tos}
 337                         },
 338                .proto = IPPROTO_TCP,
 339                .uli_u = {
 340                          .ports = {
 341                                    .sport = local_port,
 342                                    .dport = peer_port}
 343                          }
 344        };
 345
 346        if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
 347                return NULL;
 348        return rt;
 349}
 350
 351static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
 352{
 353        int i = 0;
 354
 355        while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
 356                ++i;
 357        return i;
 358}
 359
 360static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
 361{
 362        PDBG("%s t3cdev %p\n", __func__, dev);
 363        kfree_skb(skb);
 364}
 365
 366/*
 367 * Handle an ARP failure for an active open.
 368 */
 369static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
 370{
 371        printk(KERN_ERR MOD "ARP failure duing connect\n");
 372        kfree_skb(skb);
 373}
 374
 375/*
 376 * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
 377 * and send it along.
 378 */
 379static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
 380{
 381        struct cpl_abort_req *req = cplhdr(skb);
 382
 383        PDBG("%s t3cdev %p\n", __func__, dev);
 384        req->cmd = CPL_ABORT_NO_RST;
 385        cxgb3_ofld_send(dev, skb);
 386}
 387
 388static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
 389{
 390        struct cpl_close_con_req *req;
 391        struct sk_buff *skb;
 392
 393        PDBG("%s ep %p\n", __func__, ep);
 394        skb = get_skb(NULL, sizeof(*req), gfp);
 395        if (!skb) {
 396                printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
 397                return -ENOMEM;
 398        }
 399        skb->priority = CPL_PRIORITY_DATA;
 400        set_arp_failure_handler(skb, arp_failure_discard);
 401        req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
 402        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
 403        req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
 404        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
 405        l2t_send(ep->com.tdev, skb, ep->l2t);
 406        return 0;
 407}
 408
 409static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
 410{
 411        struct cpl_abort_req *req;
 412
 413        PDBG("%s ep %p\n", __func__, ep);
 414        skb = get_skb(skb, sizeof(*req), gfp);
 415        if (!skb) {
 416                printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
 417                       __func__);
 418                return -ENOMEM;
 419        }
 420        skb->priority = CPL_PRIORITY_DATA;
 421        set_arp_failure_handler(skb, abort_arp_failure);
 422        req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
 423        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
 424        req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
 425        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
 426        req->cmd = CPL_ABORT_SEND_RST;
 427        l2t_send(ep->com.tdev, skb, ep->l2t);
 428        return 0;
 429}
 430
 431static int send_connect(struct iwch_ep *ep)
 432{
 433        struct cpl_act_open_req *req;
 434        struct sk_buff *skb;
 435        u32 opt0h, opt0l, opt2;
 436        unsigned int mtu_idx;
 437        int wscale;
 438
 439        PDBG("%s ep %p\n", __func__, ep);
 440
 441        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 442        if (!skb) {
 443                printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
 444                       __func__);
 445                return -ENOMEM;
 446        }
 447        mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
 448        wscale = compute_wscale(rcv_win);
 449        opt0h = V_NAGLE(0) |
 450            V_NO_CONG(nocong) |
 451            V_KEEP_ALIVE(1) |
 452            F_TCAM_BYPASS |
 453            V_WND_SCALE(wscale) |
 454            V_MSS_IDX(mtu_idx) |
 455            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
 456        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
 457        opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
 458        skb->priority = CPL_PRIORITY_SETUP;
 459        set_arp_failure_handler(skb, act_open_req_arp_failure);
 460
 461        req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
 462        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 463        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
 464        req->local_port = ep->com.local_addr.sin_port;
 465        req->peer_port = ep->com.remote_addr.sin_port;
 466        req->local_ip = ep->com.local_addr.sin_addr.s_addr;
 467        req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
 468        req->opt0h = htonl(opt0h);
 469        req->opt0l = htonl(opt0l);
 470        req->params = 0;
 471        req->opt2 = htonl(opt2);
 472        l2t_send(ep->com.tdev, skb, ep->l2t);
 473        return 0;
 474}
 475
 476static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
 477{
 478        int mpalen;
 479        struct tx_data_wr *req;
 480        struct mpa_message *mpa;
 481        int len;
 482
 483        PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
 484
 485        BUG_ON(skb_cloned(skb));
 486
 487        mpalen = sizeof(*mpa) + ep->plen;
 488        if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
 489                kfree_skb(skb);
 490                skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
 491                if (!skb) {
 492                        connect_reply_upcall(ep, -ENOMEM);
 493                        return;
 494                }
 495        }
 496        skb_trim(skb, 0);
 497        skb_reserve(skb, sizeof(*req));
 498        skb_put(skb, mpalen);
 499        skb->priority = CPL_PRIORITY_DATA;
 500        mpa = (struct mpa_message *) skb->data;
 501        memset(mpa, 0, sizeof(*mpa));
 502        memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
 503        mpa->flags = (crc_enabled ? MPA_CRC : 0) |
 504                     (markers_enabled ? MPA_MARKERS : 0);
 505        mpa->private_data_size = htons(ep->plen);
 506        mpa->revision = mpa_rev;
 507
 508        if (ep->plen)
 509                memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
 510
 511        /*
 512         * Reference the mpa skb.  This ensures the data area
 513         * will remain in memory until the hw acks the tx.
 514         * Function tx_ack() will deref it.
 515         */
 516        skb_get(skb);
 517        set_arp_failure_handler(skb, arp_failure_discard);
 518        skb_reset_transport_header(skb);
 519        len = skb->len;
 520        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
 521        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
 522        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
 523        req->len = htonl(len);
 524        req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
 525                           V_TX_SNDBUF(snd_win>>15));
 526        req->flags = htonl(F_TX_INIT);
 527        req->sndseq = htonl(ep->snd_seq);
 528        BUG_ON(ep->mpa_skb);
 529        ep->mpa_skb = skb;
 530        l2t_send(ep->com.tdev, skb, ep->l2t);
 531        start_ep_timer(ep);
 532        state_set(&ep->com, MPA_REQ_SENT);
 533        return;
 534}
 535
 536static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
 537{
 538        int mpalen;
 539        struct tx_data_wr *req;
 540        struct mpa_message *mpa;
 541        struct sk_buff *skb;
 542
 543        PDBG("%s ep %p plen %d\n", __func__, ep, plen);
 544
 545        mpalen = sizeof(*mpa) + plen;
 546
 547        skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
 548        if (!skb) {
 549                printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
 550                return -ENOMEM;
 551        }
 552        skb_reserve(skb, sizeof(*req));
 553        mpa = (struct mpa_message *) skb_put(skb, mpalen);
 554        memset(mpa, 0, sizeof(*mpa));
 555        memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
 556        mpa->flags = MPA_REJECT;
 557        mpa->revision = mpa_rev;
 558        mpa->private_data_size = htons(plen);
 559        if (plen)
 560                memcpy(mpa->private_data, pdata, plen);
 561
 562        /*
 563         * Reference the mpa skb again.  This ensures the data area
 564         * will remain in memory until the hw acks the tx.
 565         * Function tx_ack() will deref it.
 566         */
 567        skb_get(skb);
 568        skb->priority = CPL_PRIORITY_DATA;
 569        set_arp_failure_handler(skb, arp_failure_discard);
 570        skb_reset_transport_header(skb);
 571        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
 572        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
 573        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
 574        req->len = htonl(mpalen);
 575        req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
 576                           V_TX_SNDBUF(snd_win>>15));
 577        req->flags = htonl(F_TX_INIT);
 578        req->sndseq = htonl(ep->snd_seq);
 579        BUG_ON(ep->mpa_skb);
 580        ep->mpa_skb = skb;
 581        l2t_send(ep->com.tdev, skb, ep->l2t);
 582        return 0;
 583}
 584
 585static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
 586{
 587        int mpalen;
 588        struct tx_data_wr *req;
 589        struct mpa_message *mpa;
 590        int len;
 591        struct sk_buff *skb;
 592
 593        PDBG("%s ep %p plen %d\n", __func__, ep, plen);
 594
 595        mpalen = sizeof(*mpa) + plen;
 596
 597        skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
 598        if (!skb) {
 599                printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
 600                return -ENOMEM;
 601        }
 602        skb->priority = CPL_PRIORITY_DATA;
 603        skb_reserve(skb, sizeof(*req));
 604        mpa = (struct mpa_message *) skb_put(skb, mpalen);
 605        memset(mpa, 0, sizeof(*mpa));
 606        memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
 607        mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
 608                     (markers_enabled ? MPA_MARKERS : 0);
 609        mpa->revision = mpa_rev;
 610        mpa->private_data_size = htons(plen);
 611        if (plen)
 612                memcpy(mpa->private_data, pdata, plen);
 613
 614        /*
 615         * Reference the mpa skb.  This ensures the data area
 616         * will remain in memory until the hw acks the tx.
 617         * Function tx_ack() will deref it.
 618         */
 619        skb_get(skb);
 620        set_arp_failure_handler(skb, arp_failure_discard);
 621        skb_reset_transport_header(skb);
 622        len = skb->len;
 623        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
 624        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
 625        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
 626        req->len = htonl(len);
 627        req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
 628                           V_TX_SNDBUF(snd_win>>15));
 629        req->flags = htonl(F_TX_INIT);
 630        req->sndseq = htonl(ep->snd_seq);
 631        ep->mpa_skb = skb;
 632        state_set(&ep->com, MPA_REP_SENT);
 633        l2t_send(ep->com.tdev, skb, ep->l2t);
 634        return 0;
 635}
 636
 637static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 638{
 639        struct iwch_ep *ep = ctx;
 640        struct cpl_act_establish *req = cplhdr(skb);
 641        unsigned int tid = GET_TID(req);
 642
 643        PDBG("%s ep %p tid %d\n", __func__, ep, tid);
 644
 645        dst_confirm(ep->dst);
 646
 647        /* setup the hwtid for this connection */
 648        ep->hwtid = tid;
 649        cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
 650
 651        ep->snd_seq = ntohl(req->snd_isn);
 652        ep->rcv_seq = ntohl(req->rcv_isn);
 653
 654        set_emss(ep, ntohs(req->tcp_opt));
 655
 656        /* dealloc the atid */
 657        cxgb3_free_atid(ep->com.tdev, ep->atid);
 658
 659        /* start MPA negotiation */
 660        send_mpa_req(ep, skb);
 661
 662        return 0;
 663}
 664
 665static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
 666{
 667        PDBG("%s ep %p\n", __FILE__, ep);
 668        state_set(&ep->com, ABORTING);
 669        send_abort(ep, skb, gfp);
 670}
 671
 672static void close_complete_upcall(struct iwch_ep *ep)
 673{
 674        struct iw_cm_event event;
 675
 676        PDBG("%s ep %p\n", __func__, ep);
 677        memset(&event, 0, sizeof(event));
 678        event.event = IW_CM_EVENT_CLOSE;
 679        if (ep->com.cm_id) {
 680                PDBG("close complete delivered ep %p cm_id %p tid %d\n",
 681                     ep, ep->com.cm_id, ep->hwtid);
 682                ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 683                ep->com.cm_id->rem_ref(ep->com.cm_id);
 684                ep->com.cm_id = NULL;
 685                ep->com.qp = NULL;
 686        }
 687}
 688
 689static void peer_close_upcall(struct iwch_ep *ep)
 690{
 691        struct iw_cm_event event;
 692
 693        PDBG("%s ep %p\n", __func__, ep);
 694        memset(&event, 0, sizeof(event));
 695        event.event = IW_CM_EVENT_DISCONNECT;
 696        if (ep->com.cm_id) {
 697                PDBG("peer close delivered ep %p cm_id %p tid %d\n",
 698                     ep, ep->com.cm_id, ep->hwtid);
 699                ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 700        }
 701}
 702
 703static void peer_abort_upcall(struct iwch_ep *ep)
 704{
 705        struct iw_cm_event event;
 706
 707        PDBG("%s ep %p\n", __func__, ep);
 708        memset(&event, 0, sizeof(event));
 709        event.event = IW_CM_EVENT_CLOSE;
 710        event.status = -ECONNRESET;
 711        if (ep->com.cm_id) {
 712                PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
 713                     ep->com.cm_id, ep->hwtid);
 714                ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 715                ep->com.cm_id->rem_ref(ep->com.cm_id);
 716                ep->com.cm_id = NULL;
 717                ep->com.qp = NULL;
 718        }
 719}
 720
 721static void connect_reply_upcall(struct iwch_ep *ep, int status)
 722{
 723        struct iw_cm_event event;
 724
 725        PDBG("%s ep %p status %d\n", __func__, ep, status);
 726        memset(&event, 0, sizeof(event));
 727        event.event = IW_CM_EVENT_CONNECT_REPLY;
 728        event.status = status;
 729        event.local_addr = ep->com.local_addr;
 730        event.remote_addr = ep->com.remote_addr;
 731
 732        if ((status == 0) || (status == -ECONNREFUSED)) {
 733                event.private_data_len = ep->plen;
 734                event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
 735        }
 736        if (ep->com.cm_id) {
 737                PDBG("%s ep %p tid %d status %d\n", __func__, ep,
 738                     ep->hwtid, status);
 739                ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 740        }
 741        if (status < 0) {
 742                ep->com.cm_id->rem_ref(ep->com.cm_id);
 743                ep->com.cm_id = NULL;
 744                ep->com.qp = NULL;
 745        }
 746}
 747
 748static void connect_request_upcall(struct iwch_ep *ep)
 749{
 750        struct iw_cm_event event;
 751
 752        PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
 753        memset(&event, 0, sizeof(event));
 754        event.event = IW_CM_EVENT_CONNECT_REQUEST;
 755        event.local_addr = ep->com.local_addr;
 756        event.remote_addr = ep->com.remote_addr;
 757        event.private_data_len = ep->plen;
 758        event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
 759        event.provider_data = ep;
 760        if (state_read(&ep->parent_ep->com) != DEAD)
 761                ep->parent_ep->com.cm_id->event_handler(
 762                                                ep->parent_ep->com.cm_id,
 763                                                &event);
 764        put_ep(&ep->parent_ep->com);
 765        ep->parent_ep = NULL;
 766}
 767
 768static void established_upcall(struct iwch_ep *ep)
 769{
 770        struct iw_cm_event event;
 771
 772        PDBG("%s ep %p\n", __func__, ep);
 773        memset(&event, 0, sizeof(event));
 774        event.event = IW_CM_EVENT_ESTABLISHED;
 775        if (ep->com.cm_id) {
 776                PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
 777                ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 778        }
 779}
 780
 781static int update_rx_credits(struct iwch_ep *ep, u32 credits)
 782{
 783        struct cpl_rx_data_ack *req;
 784        struct sk_buff *skb;
 785
 786        PDBG("%s ep %p credits %u\n", __func__, ep, credits);
 787        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
 788        if (!skb) {
 789                printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
 790                return 0;
 791        }
 792
 793        req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
 794        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 795        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
 796        req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
 797        skb->priority = CPL_PRIORITY_ACK;
 798        cxgb3_ofld_send(ep->com.tdev, skb);
 799        return credits;
 800}
 801
 802static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
 803{
 804        struct mpa_message *mpa;
 805        u16 plen;
 806        struct iwch_qp_attributes attrs;
 807        enum iwch_qp_attr_mask mask;
 808        int err;
 809
 810        PDBG("%s ep %p\n", __func__, ep);
 811
 812        /*
 813         * Stop mpa timer.  If it expired, then the state has
 814         * changed and we bail since ep_timeout already aborted
 815         * the connection.
 816         */
 817        stop_ep_timer(ep);
 818        if (state_read(&ep->com) != MPA_REQ_SENT)
 819                return;
 820
 821        /*
 822         * If we get more than the supported amount of private data
 823         * then we must fail this connection.
 824         */
 825        if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
 826                err = -EINVAL;
 827                goto err;
 828        }
 829
 830        /*
 831         * copy the new data into our accumulation buffer.
 832         */
 833        skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
 834                                  skb->len);
 835        ep->mpa_pkt_len += skb->len;
 836
 837        /*
 838         * if we don't even have the mpa message, then bail.
 839         */
 840        if (ep->mpa_pkt_len < sizeof(*mpa))
 841                return;
 842        mpa = (struct mpa_message *) ep->mpa_pkt;
 843
 844        /* Validate MPA header. */
 845        if (mpa->revision != mpa_rev) {
 846                err = -EPROTO;
 847                goto err;
 848        }
 849        if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
 850                err = -EPROTO;
 851                goto err;
 852        }
 853
 854        plen = ntohs(mpa->private_data_size);
 855
 856        /*
 857         * Fail if there's too much private data.
 858         */
 859        if (plen > MPA_MAX_PRIVATE_DATA) {
 860                err = -EPROTO;
 861                goto err;
 862        }
 863
 864        /*
 865         * If plen does not account for pkt size
 866         */
 867        if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
 868                err = -EPROTO;
 869                goto err;
 870        }
 871
 872        ep->plen = (u8) plen;
 873
 874        /*
 875         * If we don't have all the pdata yet, then bail.
 876         * We'll continue process when more data arrives.
 877         */
 878        if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
 879                return;
 880
 881        if (mpa->flags & MPA_REJECT) {
 882                err = -ECONNREFUSED;
 883                goto err;
 884        }
 885
 886        /*
 887         * If we get here we have accumulated the entire mpa
 888         * start reply message including private data. And
 889         * the MPA header is valid.
 890         */
 891        state_set(&ep->com, FPDU_MODE);
 892        ep->mpa_attr.initiator = 1;
 893        ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
 894        ep->mpa_attr.recv_marker_enabled = markers_enabled;
 895        ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
 896        ep->mpa_attr.version = mpa_rev;
 897        PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
 898             "xmit_marker_enabled=%d, version=%d\n", __func__,
 899             ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
 900             ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
 901
 902        attrs.mpa_attr = ep->mpa_attr;
 903        attrs.max_ird = ep->ird;
 904        attrs.max_ord = ep->ord;
 905        attrs.llp_stream_handle = ep;
 906        attrs.next_state = IWCH_QP_STATE_RTS;
 907
 908        mask = IWCH_QP_ATTR_NEXT_STATE |
 909            IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
 910            IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
 911
 912        /* bind QP and TID with INIT_WR */
 913        err = iwch_modify_qp(ep->com.qp->rhp,
 914                             ep->com.qp, mask, &attrs, 1);
 915        if (err)
 916                goto err;
 917
 918        if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
 919                iwch_post_zb_read(ep->com.qp);
 920        }
 921
 922        goto out;
 923err:
 924        abort_connection(ep, skb, GFP_KERNEL);
 925out:
 926        connect_reply_upcall(ep, err);
 927        return;
 928}
 929
 930static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
 931{
 932        struct mpa_message *mpa;
 933        u16 plen;
 934
 935        PDBG("%s ep %p\n", __func__, ep);
 936
 937        /*
 938         * Stop mpa timer.  If it expired, then the state has
 939         * changed and we bail since ep_timeout already aborted
 940         * the connection.
 941         */
 942        stop_ep_timer(ep);
 943        if (state_read(&ep->com) != MPA_REQ_WAIT)
 944                return;
 945
 946        /*
 947         * If we get more than the supported amount of private data
 948         * then we must fail this connection.
 949         */
 950        if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
 951                abort_connection(ep, skb, GFP_KERNEL);
 952                return;
 953        }
 954
 955        PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
 956
 957        /*
 958         * Copy the new data into our accumulation buffer.
 959         */
 960        skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
 961                                  skb->len);
 962        ep->mpa_pkt_len += skb->len;
 963
 964        /*
 965         * If we don't even have the mpa message, then bail.
 966         * We'll continue process when more data arrives.
 967         */
 968        if (ep->mpa_pkt_len < sizeof(*mpa))
 969                return;
 970        PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
 971        mpa = (struct mpa_message *) ep->mpa_pkt;
 972
 973        /*
 974         * Validate MPA Header.
 975         */
 976        if (mpa->revision != mpa_rev) {
 977                abort_connection(ep, skb, GFP_KERNEL);
 978                return;
 979        }
 980
 981        if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
 982                abort_connection(ep, skb, GFP_KERNEL);
 983                return;
 984        }
 985
 986        plen = ntohs(mpa->private_data_size);
 987
 988        /*
 989         * Fail if there's too much private data.
 990         */
 991        if (plen > MPA_MAX_PRIVATE_DATA) {
 992                abort_connection(ep, skb, GFP_KERNEL);
 993                return;
 994        }
 995
 996        /*
 997         * If plen does not account for pkt size
 998         */
 999        if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1000                abort_connection(ep, skb, GFP_KERNEL);
1001                return;
1002        }
1003        ep->plen = (u8) plen;
1004
1005        /*
1006         * If we don't have all the pdata yet, then bail.
1007         */
1008        if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1009                return;
1010
1011        /*
1012         * If we get here we have accumulated the entire mpa
1013         * start reply message including private data.
1014         */
1015        ep->mpa_attr.initiator = 0;
1016        ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1017        ep->mpa_attr.recv_marker_enabled = markers_enabled;
1018        ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1019        ep->mpa_attr.version = mpa_rev;
1020        PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1021             "xmit_marker_enabled=%d, version=%d\n", __func__,
1022             ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1023             ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1024
1025        state_set(&ep->com, MPA_REQ_RCVD);
1026
1027        /* drive upcall */
1028        connect_request_upcall(ep);
1029        return;
1030}
1031
1032static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1033{
1034        struct iwch_ep *ep = ctx;
1035        struct cpl_rx_data *hdr = cplhdr(skb);
1036        unsigned int dlen = ntohs(hdr->len);
1037
1038        PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1039
1040        skb_pull(skb, sizeof(*hdr));
1041        skb_trim(skb, dlen);
1042
1043        ep->rcv_seq += dlen;
1044        BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1045
1046        switch (state_read(&ep->com)) {
1047        case MPA_REQ_SENT:
1048                process_mpa_reply(ep, skb);
1049                break;
1050        case MPA_REQ_WAIT:
1051                process_mpa_request(ep, skb);
1052                break;
1053        case MPA_REP_SENT:
1054                break;
1055        default:
1056                printk(KERN_ERR MOD "%s Unexpected streaming data."
1057                       " ep %p state %d tid %d\n",
1058                       __func__, ep, state_read(&ep->com), ep->hwtid);
1059
1060                /*
1061                 * The ep will timeout and inform the ULP of the failure.
1062                 * See ep_timeout().
1063                 */
1064                break;
1065        }
1066
1067        /* update RX credits */
1068        update_rx_credits(ep, dlen);
1069
1070        return CPL_RET_BUF_DONE;
1071}
1072
1073/*
1074 * Upcall from the adapter indicating data has been transmitted.
1075 * For us its just the single MPA request or reply.  We can now free
1076 * the skb holding the mpa message.
1077 */
1078static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1079{
1080        struct iwch_ep *ep = ctx;
1081        struct cpl_wr_ack *hdr = cplhdr(skb);
1082        unsigned int credits = ntohs(hdr->credits);
1083
1084        PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1085
1086        if (credits == 0) {
1087                PDBG(KERN_ERR "%s 0 credit ack  ep %p state %u\n",
1088                        __func__, ep, state_read(&ep->com));
1089                return CPL_RET_BUF_DONE;
1090        }
1091
1092        BUG_ON(credits != 1);
1093        dst_confirm(ep->dst);
1094        if (!ep->mpa_skb) {
1095                PDBG("%s rdma_init wr_ack ep %p state %u\n",
1096                        __func__, ep, state_read(&ep->com));
1097                if (ep->mpa_attr.initiator) {
1098                        PDBG("%s initiator ep %p state %u\n",
1099                                __func__, ep, state_read(&ep->com));
1100                        if (peer2peer)
1101                                iwch_post_zb_read(ep->com.qp);
1102                } else {
1103                        PDBG("%s responder ep %p state %u\n",
1104                                __func__, ep, state_read(&ep->com));
1105                        ep->com.rpl_done = 1;
1106                        wake_up(&ep->com.waitq);
1107                }
1108        } else {
1109                PDBG("%s lsm ack ep %p state %u freeing skb\n",
1110                        __func__, ep, state_read(&ep->com));
1111                kfree_skb(ep->mpa_skb);
1112                ep->mpa_skb = NULL;
1113        }
1114        return CPL_RET_BUF_DONE;
1115}
1116
1117static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1118{
1119        struct iwch_ep *ep = ctx;
1120        unsigned long flags;
1121        int release = 0;
1122
1123        PDBG("%s ep %p\n", __func__, ep);
1124        BUG_ON(!ep);
1125
1126        /*
1127         * We get 2 abort replies from the HW.  The first one must
1128         * be ignored except for scribbling that we need one more.
1129         */
1130        if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
1131                ep->flags |= ABORT_REQ_IN_PROGRESS;
1132                return CPL_RET_BUF_DONE;
1133        }
1134
1135        spin_lock_irqsave(&ep->com.lock, flags);
1136        switch (ep->com.state) {
1137        case ABORTING:
1138                close_complete_upcall(ep);
1139                __state_set(&ep->com, DEAD);
1140                release = 1;
1141                break;
1142        default:
1143                printk(KERN_ERR "%s ep %p state %d\n",
1144                     __func__, ep, ep->com.state);
1145                break;
1146        }
1147        spin_unlock_irqrestore(&ep->com.lock, flags);
1148
1149        if (release)
1150                release_ep_resources(ep);
1151        return CPL_RET_BUF_DONE;
1152}
1153
1154/*
1155 * Return whether a failed active open has allocated a TID
1156 */
1157static inline int act_open_has_tid(int status)
1158{
1159        return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1160               status != CPL_ERR_ARP_MISS;
1161}
1162
1163static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1164{
1165        struct iwch_ep *ep = ctx;
1166        struct cpl_act_open_rpl *rpl = cplhdr(skb);
1167
1168        PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1169             status2errno(rpl->status));
1170        connect_reply_upcall(ep, status2errno(rpl->status));
1171        state_set(&ep->com, DEAD);
1172        if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
1173                release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1174        cxgb3_free_atid(ep->com.tdev, ep->atid);
1175        dst_release(ep->dst);
1176        l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1177        put_ep(&ep->com);
1178        return CPL_RET_BUF_DONE;
1179}
1180
1181static int listen_start(struct iwch_listen_ep *ep)
1182{
1183        struct sk_buff *skb;
1184        struct cpl_pass_open_req *req;
1185
1186        PDBG("%s ep %p\n", __func__, ep);
1187        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1188        if (!skb) {
1189                printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1190                return -ENOMEM;
1191        }
1192
1193        req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1194        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1195        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1196        req->local_port = ep->com.local_addr.sin_port;
1197        req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1198        req->peer_port = 0;
1199        req->peer_ip = 0;
1200        req->peer_netmask = 0;
1201        req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1202        req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1203        req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1204
1205        skb->priority = 1;
1206        cxgb3_ofld_send(ep->com.tdev, skb);
1207        return 0;
1208}
1209
1210static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1211{
1212        struct iwch_listen_ep *ep = ctx;
1213        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1214
1215        PDBG("%s ep %p status %d error %d\n", __func__, ep,
1216             rpl->status, status2errno(rpl->status));
1217        ep->com.rpl_err = status2errno(rpl->status);
1218        ep->com.rpl_done = 1;
1219        wake_up(&ep->com.waitq);
1220
1221        return CPL_RET_BUF_DONE;
1222}
1223
1224static int listen_stop(struct iwch_listen_ep *ep)
1225{
1226        struct sk_buff *skb;
1227        struct cpl_close_listserv_req *req;
1228
1229        PDBG("%s ep %p\n", __func__, ep);
1230        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1231        if (!skb) {
1232                printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1233                return -ENOMEM;
1234        }
1235        req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1236        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1237        req->cpu_idx = 0;
1238        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1239        skb->priority = 1;
1240        cxgb3_ofld_send(ep->com.tdev, skb);
1241        return 0;
1242}
1243
1244static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1245                             void *ctx)
1246{
1247        struct iwch_listen_ep *ep = ctx;
1248        struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1249
1250        PDBG("%s ep %p\n", __func__, ep);
1251        ep->com.rpl_err = status2errno(rpl->status);
1252        ep->com.rpl_done = 1;
1253        wake_up(&ep->com.waitq);
1254        return CPL_RET_BUF_DONE;
1255}
1256
1257static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1258{
1259        struct cpl_pass_accept_rpl *rpl;
1260        unsigned int mtu_idx;
1261        u32 opt0h, opt0l, opt2;
1262        int wscale;
1263
1264        PDBG("%s ep %p\n", __func__, ep);
1265        BUG_ON(skb_cloned(skb));
1266        skb_trim(skb, sizeof(*rpl));
1267        skb_get(skb);
1268        mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1269        wscale = compute_wscale(rcv_win);
1270        opt0h = V_NAGLE(0) |
1271            V_NO_CONG(nocong) |
1272            V_KEEP_ALIVE(1) |
1273            F_TCAM_BYPASS |
1274            V_WND_SCALE(wscale) |
1275            V_MSS_IDX(mtu_idx) |
1276            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1277        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1278        opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1279
1280        rpl = cplhdr(skb);
1281        rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1282        OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1283        rpl->peer_ip = peer_ip;
1284        rpl->opt0h = htonl(opt0h);
1285        rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1286        rpl->opt2 = htonl(opt2);
1287        rpl->rsvd = rpl->opt2;        /* workaround for HW bug */
1288        skb->priority = CPL_PRIORITY_SETUP;
1289        l2t_send(ep->com.tdev, skb, ep->l2t);
1290
1291        return;
1292}
1293
1294static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1295                      struct sk_buff *skb)
1296{
1297        PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1298             peer_ip);
1299        BUG_ON(skb_cloned(skb));
1300        skb_trim(skb, sizeof(struct cpl_tid_release));
1301        skb_get(skb);
1302
1303        if (tdev->type != T3A)
1304                release_tid(tdev, hwtid, skb);
1305        else {
1306                struct cpl_pass_accept_rpl *rpl;
1307
1308                rpl = cplhdr(skb);
1309                skb->priority = CPL_PRIORITY_SETUP;
1310                rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1311                OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1312                                                      hwtid));
1313                rpl->peer_ip = peer_ip;
1314                rpl->opt0h = htonl(F_TCAM_BYPASS);
1315                rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1316                rpl->opt2 = 0;
1317                rpl->rsvd = rpl->opt2;
1318                cxgb3_ofld_send(tdev, skb);
1319        }
1320}
1321
1322static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1323{
1324        struct iwch_ep *child_ep, *parent_ep = ctx;
1325        struct cpl_pass_accept_req *req = cplhdr(skb);
1326        unsigned int hwtid = GET_TID(req);
1327        struct dst_entry *dst;
1328        struct l2t_entry *l2t;
1329        struct rtable *rt;
1330        struct iff_mac tim;
1331
1332        PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1333
1334        if (state_read(&parent_ep->com) != LISTEN) {
1335                printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1336                       __func__);
1337                goto reject;
1338        }
1339
1340        /*
1341         * Find the netdev for this connection request.
1342         */
1343        tim.mac_addr = req->dst_mac;
1344        tim.vlan_tag = ntohs(req->vlan_tag);
1345        if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1346                printk(KERN_ERR
1347                        "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1348                        __func__,
1349                        req->dst_mac[0],
1350                        req->dst_mac[1],
1351                        req->dst_mac[2],
1352                        req->dst_mac[3],
1353                        req->dst_mac[4],
1354                        req->dst_mac[5]);
1355                goto reject;
1356        }
1357
1358        /* Find output route */
1359        rt = find_route(tdev,
1360                        req->local_ip,
1361                        req->peer_ip,
1362                        req->local_port,
1363                        req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1364        if (!rt) {
1365                printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1366                       __func__);
1367                goto reject;
1368        }
1369        dst = &rt->u.dst;
1370        l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1371        if (!l2t) {
1372                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1373                       __func__);
1374                dst_release(dst);
1375                goto reject;
1376        }
1377        child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1378        if (!child_ep) {
1379                printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1380                       __func__);
1381                l2t_release(L2DATA(tdev), l2t);
1382                dst_release(dst);
1383                goto reject;
1384        }
1385        state_set(&child_ep->com, CONNECTING);
1386        child_ep->com.tdev = tdev;
1387        child_ep->com.cm_id = NULL;
1388        child_ep->com.local_addr.sin_family = PF_INET;
1389        child_ep->com.local_addr.sin_port = req->local_port;
1390        child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1391        child_ep->com.remote_addr.sin_family = PF_INET;
1392        child_ep->com.remote_addr.sin_port = req->peer_port;
1393        child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1394        get_ep(&parent_ep->com);
1395        child_ep->parent_ep = parent_ep;
1396        child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1397        child_ep->l2t = l2t;
1398        child_ep->dst = dst;
1399        child_ep->hwtid = hwtid;
1400        init_timer(&child_ep->timer);
1401        cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1402        accept_cr(child_ep, req->peer_ip, skb);
1403        goto out;
1404reject:
1405        reject_cr(tdev, hwtid, req->peer_ip, skb);
1406out:
1407        return CPL_RET_BUF_DONE;
1408}
1409
1410static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1411{
1412        struct iwch_ep *ep = ctx;
1413        struct cpl_pass_establish *req = cplhdr(skb);
1414
1415        PDBG("%s ep %p\n", __func__, ep);
1416        ep->snd_seq = ntohl(req->snd_isn);
1417        ep->rcv_seq = ntohl(req->rcv_isn);
1418
1419        set_emss(ep, ntohs(req->tcp_opt));
1420
1421        dst_confirm(ep->dst);
1422        state_set(&ep->com, MPA_REQ_WAIT);
1423        start_ep_timer(ep);
1424
1425        return CPL_RET_BUF_DONE;
1426}
1427
1428static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1429{
1430        struct iwch_ep *ep = ctx;
1431        struct iwch_qp_attributes attrs;
1432        unsigned long flags;
1433        int disconnect = 1;
1434        int release = 0;
1435
1436        PDBG("%s ep %p\n", __func__, ep);
1437        dst_confirm(ep->dst);
1438
1439        spin_lock_irqsave(&ep->com.lock, flags);
1440        switch (ep->com.state) {
1441        case MPA_REQ_WAIT:
1442                __state_set(&ep->com, CLOSING);
1443                break;
1444        case MPA_REQ_SENT:
1445                __state_set(&ep->com, CLOSING);
1446                connect_reply_upcall(ep, -ECONNRESET);
1447                break;
1448        case MPA_REQ_RCVD:
1449
1450                /*
1451                 * We're gonna mark this puppy DEAD, but keep
1452                 * the reference on it until the ULP accepts or
1453                 * rejects the CR.
1454                 */
1455                __state_set(&ep->com, CLOSING);
1456                get_ep(&ep->com);
1457                break;
1458        case MPA_REP_SENT:
1459                __state_set(&ep->com, CLOSING);
1460                ep->com.rpl_done = 1;
1461                ep->com.rpl_err = -ECONNRESET;
1462                PDBG("waking up ep %p\n", ep);
1463                wake_up(&ep->com.waitq);
1464                break;
1465        case FPDU_MODE:
1466                start_ep_timer(ep);
1467                __state_set(&ep->com, CLOSING);
1468                attrs.next_state = IWCH_QP_STATE_CLOSING;
1469                iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1470                               IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1471                peer_close_upcall(ep);
1472                break;
1473        case ABORTING:
1474                disconnect = 0;
1475                break;
1476        case CLOSING:
1477                __state_set(&ep->com, MORIBUND);
1478                disconnect = 0;
1479                break;
1480        case MORIBUND:
1481                stop_ep_timer(ep);
1482                if (ep->com.cm_id && ep->com.qp) {
1483                        attrs.next_state = IWCH_QP_STATE_IDLE;
1484                        iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1485                                       IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1486                }
1487                close_complete_upcall(ep);
1488                __state_set(&ep->com, DEAD);
1489                release = 1;
1490                disconnect = 0;
1491                break;
1492        case DEAD:
1493                disconnect = 0;
1494                break;
1495        default:
1496                BUG_ON(1);
1497        }
1498        spin_unlock_irqrestore(&ep->com.lock, flags);
1499        if (disconnect)
1500                iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1501        if (release)
1502                release_ep_resources(ep);
1503        return CPL_RET_BUF_DONE;
1504}
1505
1506/*
1507 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1508 */
1509static int is_neg_adv_abort(unsigned int status)
1510{
1511        return status == CPL_ERR_RTX_NEG_ADVICE ||
1512               status == CPL_ERR_PERSIST_NEG_ADVICE;
1513}
1514
1515static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1516{
1517        struct cpl_abort_req_rss *req = cplhdr(skb);
1518        struct iwch_ep *ep = ctx;
1519        struct cpl_abort_rpl *rpl;
1520        struct sk_buff *rpl_skb;
1521        struct iwch_qp_attributes attrs;
1522        int ret;
1523        int release = 0;
1524        unsigned long flags;
1525
1526        if (is_neg_adv_abort(req->status)) {
1527                PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1528                     ep->hwtid);
1529                t3_l2t_send_event(ep->com.tdev, ep->l2t);
1530                return CPL_RET_BUF_DONE;
1531        }
1532
1533        /*
1534         * We get 2 peer aborts from the HW.  The first one must
1535         * be ignored except for scribbling that we need one more.
1536         */
1537        if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
1538                ep->flags |= PEER_ABORT_IN_PROGRESS;
1539                return CPL_RET_BUF_DONE;
1540        }
1541
1542        spin_lock_irqsave(&ep->com.lock, flags);
1543        PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1544        switch (ep->com.state) {
1545        case CONNECTING:
1546                break;
1547        case MPA_REQ_WAIT:
1548                stop_ep_timer(ep);
1549                break;
1550        case MPA_REQ_SENT:
1551                stop_ep_timer(ep);
1552                connect_reply_upcall(ep, -ECONNRESET);
1553                break;
1554        case MPA_REP_SENT:
1555                ep->com.rpl_done = 1;
1556                ep->com.rpl_err = -ECONNRESET;
1557                PDBG("waking up ep %p\n", ep);
1558                wake_up(&ep->com.waitq);
1559                break;
1560        case MPA_REQ_RCVD:
1561
1562                /*
1563                 * We're gonna mark this puppy DEAD, but keep
1564                 * the reference on it until the ULP accepts or
1565                 * rejects the CR.
1566                 */
1567                get_ep(&ep->com);
1568                break;
1569        case MORIBUND:
1570        case CLOSING:
1571                stop_ep_timer(ep);
1572                /*FALLTHROUGH*/
1573        case FPDU_MODE:
1574                if (ep->com.cm_id && ep->com.qp) {
1575                        attrs.next_state = IWCH_QP_STATE_ERROR;
1576                        ret = iwch_modify_qp(ep->com.qp->rhp,
1577                                     ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1578                                     &attrs, 1);
1579                        if (ret)
1580                                printk(KERN_ERR MOD
1581                                       "%s - qp <- error failed!\n",
1582                                       __func__);
1583                }
1584                peer_abort_upcall(ep);
1585                break;
1586        case ABORTING:
1587                break;
1588        case DEAD:
1589                PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1590                spin_unlock_irqrestore(&ep->com.lock, flags);
1591                return CPL_RET_BUF_DONE;
1592        default:
1593                BUG_ON(1);
1594                break;
1595        }
1596        dst_confirm(ep->dst);
1597        if (ep->com.state != ABORTING) {
1598                __state_set(&ep->com, DEAD);
1599                release = 1;
1600        }
1601        spin_unlock_irqrestore(&ep->com.lock, flags);
1602
1603        rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1604        if (!rpl_skb) {
1605                printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1606                       __func__);
1607                release = 1;
1608                goto out;
1609        }
1610        rpl_skb->priority = CPL_PRIORITY_DATA;
1611        rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1612        rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1613        rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1614        OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1615        rpl->cmd = CPL_ABORT_NO_RST;
1616        cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1617out:
1618        if (release)
1619                release_ep_resources(ep);
1620        return CPL_RET_BUF_DONE;
1621}
1622
1623static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1624{
1625        struct iwch_ep *ep = ctx;
1626        struct iwch_qp_attributes attrs;
1627        unsigned long flags;
1628        int release = 0;
1629
1630        PDBG("%s ep %p\n", __func__, ep);
1631        BUG_ON(!ep);
1632
1633        /* The cm_id may be null if we failed to connect */
1634        spin_lock_irqsave(&ep->com.lock, flags);
1635        switch (ep->com.state) {
1636        case CLOSING:
1637                __state_set(&ep->com, MORIBUND);
1638                break;
1639        case MORIBUND:
1640                stop_ep_timer(ep);
1641                if ((ep->com.cm_id) && (ep->com.qp)) {
1642                        attrs.next_state = IWCH_QP_STATE_IDLE;
1643                        iwch_modify_qp(ep->com.qp->rhp,
1644                                             ep->com.qp,
1645                                             IWCH_QP_ATTR_NEXT_STATE,
1646                                             &attrs, 1);
1647                }
1648                close_complete_upcall(ep);
1649                __state_set(&ep->com, DEAD);
1650                release = 1;
1651                break;
1652        case ABORTING:
1653        case DEAD:
1654                break;
1655        default:
1656                BUG_ON(1);
1657                break;
1658        }
1659        spin_unlock_irqrestore(&ep->com.lock, flags);
1660        if (release)
1661                release_ep_resources(ep);
1662        return CPL_RET_BUF_DONE;
1663}
1664
1665/*
1666 * T3A does 3 things when a TERM is received:
1667 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1668 * 2) generate an async event on the QP with the TERMINATE opcode
1669 * 3) post a TERMINATE opcde cqe into the associated CQ.
1670 *
1671 * For (1), we save the message in the qp for later consumer consumption.
1672 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1673 * For (3), we toss the CQE in cxio_poll_cq().
1674 *
1675 * terminate() handles case (1)...
1676 */
1677static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1678{
1679        struct iwch_ep *ep = ctx;
1680
1681        PDBG("%s ep %p\n", __func__, ep);
1682        skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1683        PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1684        skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1685                                  skb->len);
1686        ep->com.qp->attr.terminate_msg_len = skb->len;
1687        ep->com.qp->attr.is_terminate_local = 0;
1688        return CPL_RET_BUF_DONE;
1689}
1690
1691static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1692{
1693        struct cpl_rdma_ec_status *rep = cplhdr(skb);
1694        struct iwch_ep *ep = ctx;
1695
1696        PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1697             rep->status);
1698        if (rep->status) {
1699                struct iwch_qp_attributes attrs;
1700
1701                printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1702                       __func__, ep->hwtid);
1703                stop_ep_timer(ep);
1704                attrs.next_state = IWCH_QP_STATE_ERROR;
1705                iwch_modify_qp(ep->com.qp->rhp,
1706                               ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1707                               &attrs, 1);
1708                abort_connection(ep, NULL, GFP_KERNEL);
1709        }
1710        return CPL_RET_BUF_DONE;
1711}
1712
1713static void ep_timeout(unsigned long arg)
1714{
1715        struct iwch_ep *ep = (struct iwch_ep *)arg;
1716        struct iwch_qp_attributes attrs;
1717        unsigned long flags;
1718        int abort = 1;
1719
1720        spin_lock_irqsave(&ep->com.lock, flags);
1721        PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1722             ep->com.state);
1723        switch (ep->com.state) {
1724        case MPA_REQ_SENT:
1725                __state_set(&ep->com, ABORTING);
1726                connect_reply_upcall(ep, -ETIMEDOUT);
1727                break;
1728        case MPA_REQ_WAIT:
1729                __state_set(&ep->com, ABORTING);
1730                break;
1731        case CLOSING:
1732        case MORIBUND:
1733                if (ep->com.cm_id && ep->com.qp) {
1734                        attrs.next_state = IWCH_QP_STATE_ERROR;
1735                        iwch_modify_qp(ep->com.qp->rhp,
1736                                     ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1737                                     &attrs, 1);
1738                }
1739                __state_set(&ep->com, ABORTING);
1740                break;
1741        default:
1742                printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1743                        __func__, ep, ep->com.state);
1744                WARN_ON(1);
1745                abort = 0;
1746        }
1747        spin_unlock_irqrestore(&ep->com.lock, flags);
1748        if (abort)
1749                abort_connection(ep, NULL, GFP_ATOMIC);
1750        put_ep(&ep->com);
1751}
1752
1753int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1754{
1755        int err;
1756        struct iwch_ep *ep = to_ep(cm_id);
1757        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1758
1759        if (state_read(&ep->com) == DEAD) {
1760                put_ep(&ep->com);
1761                return -ECONNRESET;
1762        }
1763        BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1764        if (mpa_rev == 0)
1765                abort_connection(ep, NULL, GFP_KERNEL);
1766        else {
1767                err = send_mpa_reject(ep, pdata, pdata_len);
1768                err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1769        }
1770        return 0;
1771}
1772
1773int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1774{
1775        int err;
1776        struct iwch_qp_attributes attrs;
1777        enum iwch_qp_attr_mask mask;
1778        struct iwch_ep *ep = to_ep(cm_id);
1779        struct iwch_dev *h = to_iwch_dev(cm_id->device);
1780        struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1781
1782        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1783        if (state_read(&ep->com) == DEAD)
1784                return -ECONNRESET;
1785
1786        BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1787        BUG_ON(!qp);
1788
1789        if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1790            (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1791                abort_connection(ep, NULL, GFP_KERNEL);
1792                return -EINVAL;
1793        }
1794
1795        cm_id->add_ref(cm_id);
1796        ep->com.cm_id = cm_id;
1797        ep->com.qp = qp;
1798
1799        ep->com.rpl_done = 0;
1800        ep->com.rpl_err = 0;
1801        ep->ird = conn_param->ird;
1802        ep->ord = conn_param->ord;
1803        PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1804
1805        get_ep(&ep->com);
1806
1807        /* bind QP to EP and move to RTS */
1808        attrs.mpa_attr = ep->mpa_attr;
1809        attrs.max_ird = ep->ird;
1810        attrs.max_ord = ep->ord;
1811        attrs.llp_stream_handle = ep;
1812        attrs.next_state = IWCH_QP_STATE_RTS;
1813
1814        /* bind QP and TID with INIT_WR */
1815        mask = IWCH_QP_ATTR_NEXT_STATE |
1816                             IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1817                             IWCH_QP_ATTR_MPA_ATTR |
1818                             IWCH_QP_ATTR_MAX_IRD |
1819                             IWCH_QP_ATTR_MAX_ORD;
1820
1821        err = iwch_modify_qp(ep->com.qp->rhp,
1822                             ep->com.qp, mask, &attrs, 1);
1823        if (err)
1824                goto err;
1825
1826        /* if needed, wait for wr_ack */
1827        if (iwch_rqes_posted(qp)) {
1828                wait_event(ep->com.waitq, ep->com.rpl_done);
1829                err = ep->com.rpl_err;
1830                if (err)
1831                        goto err;
1832        }
1833
1834        err = send_mpa_reply(ep, conn_param->private_data,
1835                             conn_param->private_data_len);
1836        if (err)
1837                goto err;
1838
1839
1840        state_set(&ep->com, FPDU_MODE);
1841        established_upcall(ep);
1842        put_ep(&ep->com);
1843        return 0;
1844err:
1845        ep->com.cm_id = NULL;
1846        ep->com.qp = NULL;
1847        cm_id->rem_ref(cm_id);
1848        put_ep(&ep->com);
1849        return err;
1850}
1851
1852static int is_loopback_dst(struct iw_cm_id *cm_id)
1853{
1854        struct net_device *dev;
1855
1856        dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1857        if (!dev)
1858                return 0;
1859        dev_put(dev);
1860        return 1;
1861}
1862
1863int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1864{
1865        int err = 0;
1866        struct iwch_dev *h = to_iwch_dev(cm_id->device);
1867        struct iwch_ep *ep;
1868        struct rtable *rt;
1869
1870        if (is_loopback_dst(cm_id)) {
1871                err = -ENOSYS;
1872                goto out;
1873        }
1874
1875        ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1876        if (!ep) {
1877                printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1878                err = -ENOMEM;
1879                goto out;
1880        }
1881        init_timer(&ep->timer);
1882        ep->plen = conn_param->private_data_len;
1883        if (ep->plen)
1884                memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1885                       conn_param->private_data, ep->plen);
1886        ep->ird = conn_param->ird;
1887        ep->ord = conn_param->ord;
1888        ep->com.tdev = h->rdev.t3cdev_p;
1889
1890        cm_id->add_ref(cm_id);
1891        ep->com.cm_id = cm_id;
1892        ep->com.qp = get_qhp(h, conn_param->qpn);
1893        BUG_ON(!ep->com.qp);
1894        PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1895             ep->com.qp, cm_id);
1896
1897        /*
1898         * Allocate an active TID to initiate a TCP connection.
1899         */
1900        ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1901        if (ep->atid == -1) {
1902                printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1903                err = -ENOMEM;
1904                goto fail2;
1905        }
1906
1907        /* find a route */
1908        rt = find_route(h->rdev.t3cdev_p,
1909                        cm_id->local_addr.sin_addr.s_addr,
1910                        cm_id->remote_addr.sin_addr.s_addr,
1911                        cm_id->local_addr.sin_port,
1912                        cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1913        if (!rt) {
1914                printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1915                err = -EHOSTUNREACH;
1916                goto fail3;
1917        }
1918        ep->dst = &rt->u.dst;
1919
1920        /* get a l2t entry */
1921        ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1922                             ep->dst->neighbour->dev);
1923        if (!ep->l2t) {
1924                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1925                err = -ENOMEM;
1926                goto fail4;
1927        }
1928
1929        state_set(&ep->com, CONNECTING);
1930        ep->tos = IPTOS_LOWDELAY;
1931        ep->com.local_addr = cm_id->local_addr;
1932        ep->com.remote_addr = cm_id->remote_addr;
1933
1934        /* send connect request to rnic */
1935        err = send_connect(ep);
1936        if (!err)
1937                goto out;
1938
1939        l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1940fail4:
1941        dst_release(ep->dst);
1942fail3:
1943        cxgb3_free_atid(ep->com.tdev, ep->atid);
1944fail2:
1945        cm_id->rem_ref(cm_id);
1946        put_ep(&ep->com);
1947out:
1948        return err;
1949}
1950
1951int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1952{
1953        int err = 0;
1954        struct iwch_dev *h = to_iwch_dev(cm_id->device);
1955        struct iwch_listen_ep *ep;
1956
1957
1958        might_sleep();
1959
1960        ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1961        if (!ep) {
1962                printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1963                err = -ENOMEM;
1964                goto fail1;
1965        }
1966        PDBG("%s ep %p\n", __func__, ep);
1967        ep->com.tdev = h->rdev.t3cdev_p;
1968        cm_id->add_ref(cm_id);
1969        ep->com.cm_id = cm_id;
1970        ep->backlog = backlog;
1971        ep->com.local_addr = cm_id->local_addr;
1972
1973        /*
1974         * Allocate a server TID.
1975         */
1976        ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1977        if (ep->stid == -1) {
1978                printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1979                err = -ENOMEM;
1980                goto fail2;
1981        }
1982
1983        state_set(&ep->com, LISTEN);
1984        err = listen_start(ep);
1985        if (err)
1986                goto fail3;
1987
1988        /* wait for pass_open_rpl */
1989        wait_event(ep->com.waitq, ep->com.rpl_done);
1990        err = ep->com.rpl_err;
1991        if (!err) {
1992                cm_id->provider_data = ep;
1993                goto out;
1994        }
1995fail3:
1996        cxgb3_free_stid(ep->com.tdev, ep->stid);
1997fail2:
1998        cm_id->rem_ref(cm_id);
1999        put_ep(&ep->com);
2000fail1:
2001out:
2002        return err;
2003}
2004
2005int iwch_destroy_listen(struct iw_cm_id *cm_id)
2006{
2007        int err;
2008        struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2009
2010        PDBG("%s ep %p\n", __func__, ep);
2011
2012        might_sleep();
2013        state_set(&ep->com, DEAD);
2014        ep->com.rpl_done = 0;
2015        ep->com.rpl_err = 0;
2016        err = listen_stop(ep);
2017        wait_event(ep->com.waitq, ep->com.rpl_done);
2018        cxgb3_free_stid(ep->com.tdev, ep->stid);
2019        err = ep->com.rpl_err;
2020        cm_id->rem_ref(cm_id);
2021        put_ep(&ep->com);
2022        return err;
2023}
2024
2025int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2026{
2027        int ret=0;
2028        unsigned long flags;
2029        int close = 0;
2030
2031        spin_lock_irqsave(&ep->com.lock, flags);
2032
2033        PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2034             states[ep->com.state], abrupt);
2035
2036        switch (ep->com.state) {
2037        case MPA_REQ_WAIT:
2038        case MPA_REQ_SENT:
2039        case MPA_REQ_RCVD:
2040        case MPA_REP_SENT:
2041        case FPDU_MODE:
2042                close = 1;
2043                if (abrupt)
2044                        ep->com.state = ABORTING;
2045                else {
2046                        ep->com.state = CLOSING;
2047                        start_ep_timer(ep);
2048                }
2049                break;
2050        case CLOSING:
2051                close = 1;
2052                if (abrupt) {
2053                        stop_ep_timer(ep);
2054                        ep->com.state = ABORTING;
2055                } else
2056                        ep->com.state = MORIBUND;
2057                break;
2058        case MORIBUND:
2059        case ABORTING:
2060        case DEAD:
2061                PDBG("%s ignoring disconnect ep %p state %u\n",
2062                     __func__, ep, ep->com.state);
2063                break;
2064        default:
2065                BUG();
2066                break;
2067        }
2068
2069        spin_unlock_irqrestore(&ep->com.lock, flags);
2070        if (close) {
2071                if (abrupt)
2072                        ret = send_abort(ep, NULL, gfp);
2073                else
2074                        ret = send_halfclose(ep, gfp);
2075        }
2076        return ret;
2077}
2078
2079int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2080                     struct l2t_entry *l2t)
2081{
2082        struct iwch_ep *ep = ctx;
2083
2084        if (ep->dst != old)
2085                return 0;
2086
2087        PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2088             l2t);
2089        dst_hold(new);
2090        l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2091        ep->l2t = l2t;
2092        dst_release(old);
2093        ep->dst = new;
2094        return 1;
2095}
2096
2097/*
2098 * All the CM events are handled on a work queue to have a safe context.
2099 */
2100static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2101{
2102        struct iwch_ep_common *epc = ctx;
2103
2104        get_ep(epc);
2105
2106        /*
2107         * Save ctx and tdev in the skb->cb area.
2108         */
2109        *((void **) skb->cb) = ctx;
2110        *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2111
2112        /*
2113         * Queue the skb and schedule the worker thread.
2114         */
2115        skb_queue_tail(&rxq, skb);
2116        queue_work(workq, &skb_work);
2117        return 0;
2118}
2119
2120static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2121{
2122        struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2123
2124        if (rpl->status != CPL_ERR_NONE) {
2125                printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2126                       "for tid %u\n", rpl->status, GET_TID(rpl));
2127        }
2128        return CPL_RET_BUF_DONE;
2129}
2130
2131int __init iwch_cm_init(void)
2132{
2133        skb_queue_head_init(&rxq);
2134
2135        workq = create_singlethread_workqueue("iw_cxgb3");
2136        if (!workq)
2137                return -ENOMEM;
2138
2139        /*
2140         * All upcalls from the T3 Core go to sched() to
2141         * schedule the processing on a work queue.
2142         */
2143        t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2144        t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2145        t3c_handlers[CPL_RX_DATA] = sched;
2146        t3c_handlers[CPL_TX_DMA_ACK] = sched;
2147        t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2148        t3c_handlers[CPL_ABORT_RPL] = sched;
2149        t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2150        t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2151        t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2152        t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2153        t3c_handlers[CPL_PEER_CLOSE] = sched;
2154        t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2155        t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2156        t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2157        t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2158        t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2159
2160        /*
2161         * These are the real handlers that are called from a
2162         * work queue.
2163         */
2164        work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2165        work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2166        work_handlers[CPL_RX_DATA] = rx_data;
2167        work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2168        work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2169        work_handlers[CPL_ABORT_RPL] = abort_rpl;
2170        work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2171        work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2172        work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2173        work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2174        work_handlers[CPL_PEER_CLOSE] = peer_close;
2175        work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2176        work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2177        work_handlers[CPL_RDMA_TERMINATE] = terminate;
2178        work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2179        return 0;
2180}
2181
2182void __exit iwch_cm_term(void)
2183{
2184        flush_workqueue(workq);
2185        destroy_workqueue(workq);
2186}