Showing error 1775

User: Jiri Slaby
Error type: Invalid Pointer Dereference
Error type description: A pointer which is invalid is being dereferenced
File location: net/sunrpc/clnt.c
Line in file: 530
Project: Linux Kernel
Project version: 2.6.28
Tools: Smatch (1.59)
Entered: 2013-09-10 20:24:52 UTC


Source:

   1/*
   2 *  linux/net/sunrpc/clnt.c
   3 *
   4 *  This file contains the high-level RPC interface.
   5 *  It is modeled as a finite state machine to support both synchronous
   6 *  and asynchronous requests.
   7 *
   8 *  -        RPC header generation and argument serialization.
   9 *  -        Credential refresh.
  10 *  -        TCP connect handling.
  11 *  -        Retry of operation when it is suspected the operation failed because
  12 *        of uid squashing on the server, or when the credentials were stale
  13 *        and need to be refreshed, or when a packet was damaged in transit.
  14 *        This may be have to be moved to the VFS layer.
  15 *
  16 *  NB: BSD uses a more intelligent approach to guessing when a request
  17 *  or reply has been lost by keeping the RTO estimate for each procedure.
  18 *  We currently make do with a constant timeout value.
  19 *
  20 *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
  21 *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
  22 */
  23
  24#include <asm/system.h>
  25
  26#include <linux/module.h>
  27#include <linux/types.h>
  28#include <linux/kallsyms.h>
  29#include <linux/mm.h>
  30#include <linux/slab.h>
  31#include <linux/smp_lock.h>
  32#include <linux/utsname.h>
  33#include <linux/workqueue.h>
  34#include <linux/in6.h>
  35
  36#include <linux/sunrpc/clnt.h>
  37#include <linux/sunrpc/rpc_pipe_fs.h>
  38#include <linux/sunrpc/metrics.h>
  39
  40
  41#ifdef RPC_DEBUG
  42# define RPCDBG_FACILITY        RPCDBG_CALL
  43#endif
  44
  45#define dprint_status(t)                                        \
  46        dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,                \
  47                        __func__, t->tk_status)
  48
  49/*
  50 * All RPC clients are linked into this list
  51 */
  52static LIST_HEAD(all_clients);
  53static DEFINE_SPINLOCK(rpc_client_lock);
  54
  55static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
  56
  57
  58static void        call_start(struct rpc_task *task);
  59static void        call_reserve(struct rpc_task *task);
  60static void        call_reserveresult(struct rpc_task *task);
  61static void        call_allocate(struct rpc_task *task);
  62static void        call_decode(struct rpc_task *task);
  63static void        call_bind(struct rpc_task *task);
  64static void        call_bind_status(struct rpc_task *task);
  65static void        call_transmit(struct rpc_task *task);
  66static void        call_status(struct rpc_task *task);
  67static void        call_transmit_status(struct rpc_task *task);
  68static void        call_refresh(struct rpc_task *task);
  69static void        call_refreshresult(struct rpc_task *task);
  70static void        call_timeout(struct rpc_task *task);
  71static void        call_connect(struct rpc_task *task);
  72static void        call_connect_status(struct rpc_task *task);
  73
  74static __be32        *rpc_encode_header(struct rpc_task *task);
  75static __be32        *rpc_verify_header(struct rpc_task *task);
  76static int        rpc_ping(struct rpc_clnt *clnt, int flags);
  77
  78static void rpc_register_client(struct rpc_clnt *clnt)
  79{
  80        spin_lock(&rpc_client_lock);
  81        list_add(&clnt->cl_clients, &all_clients);
  82        spin_unlock(&rpc_client_lock);
  83}
  84
  85static void rpc_unregister_client(struct rpc_clnt *clnt)
  86{
  87        spin_lock(&rpc_client_lock);
  88        list_del(&clnt->cl_clients);
  89        spin_unlock(&rpc_client_lock);
  90}
  91
  92static int
  93rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
  94{
  95        static uint32_t clntid;
  96        int error;
  97
  98        clnt->cl_vfsmnt = ERR_PTR(-ENOENT);
  99        clnt->cl_dentry = ERR_PTR(-ENOENT);
 100        if (dir_name == NULL)
 101                return 0;
 102
 103        clnt->cl_vfsmnt = rpc_get_mount();
 104        if (IS_ERR(clnt->cl_vfsmnt))
 105                return PTR_ERR(clnt->cl_vfsmnt);
 106
 107        for (;;) {
 108                snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
 109                                "%s/clnt%x", dir_name,
 110                                (unsigned int)clntid++);
 111                clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';
 112                clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);
 113                if (!IS_ERR(clnt->cl_dentry))
 114                        return 0;
 115                error = PTR_ERR(clnt->cl_dentry);
 116                if (error != -EEXIST) {
 117                        printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
 118                                        clnt->cl_pathname, error);
 119                        rpc_put_mount();
 120                        return error;
 121                }
 122        }
 123}
 124
 125static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
 126{
 127        struct rpc_program        *program = args->program;
 128        struct rpc_version        *version;
 129        struct rpc_clnt                *clnt = NULL;
 130        struct rpc_auth                *auth;
 131        int err;
 132        size_t len;
 133
 134        /* sanity check the name before trying to print it */
 135        err = -EINVAL;
 136        len = strlen(args->servername);
 137        if (len > RPC_MAXNETNAMELEN)
 138                goto out_no_rpciod;
 139        len++;
 140
 141        dprintk("RPC:       creating %s client for %s (xprt %p)\n",
 142                        program->name, args->servername, xprt);
 143
 144        err = rpciod_up();
 145        if (err)
 146                goto out_no_rpciod;
 147        err = -EINVAL;
 148        if (!xprt)
 149                goto out_no_xprt;
 150
 151        if (args->version >= program->nrvers)
 152                goto out_err;
 153        version = program->version[args->version];
 154        if (version == NULL)
 155                goto out_err;
 156
 157        err = -ENOMEM;
 158        clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
 159        if (!clnt)
 160                goto out_err;
 161        clnt->cl_parent = clnt;
 162
 163        clnt->cl_server = clnt->cl_inline_name;
 164        if (len > sizeof(clnt->cl_inline_name)) {
 165                char *buf = kmalloc(len, GFP_KERNEL);
 166                if (buf != NULL)
 167                        clnt->cl_server = buf;
 168                else
 169                        len = sizeof(clnt->cl_inline_name);
 170        }
 171        strlcpy(clnt->cl_server, args->servername, len);
 172
 173        clnt->cl_xprt     = xprt;
 174        clnt->cl_procinfo = version->procs;
 175        clnt->cl_maxproc  = version->nrprocs;
 176        clnt->cl_protname = program->name;
 177        clnt->cl_prog     = args->prognumber ? : program->number;
 178        clnt->cl_vers     = version->number;
 179        clnt->cl_stats    = program->stats;
 180        clnt->cl_metrics  = rpc_alloc_iostats(clnt);
 181        err = -ENOMEM;
 182        if (clnt->cl_metrics == NULL)
 183                goto out_no_stats;
 184        clnt->cl_program  = program;
 185        INIT_LIST_HEAD(&clnt->cl_tasks);
 186        spin_lock_init(&clnt->cl_lock);
 187
 188        if (!xprt_bound(clnt->cl_xprt))
 189                clnt->cl_autobind = 1;
 190
 191        clnt->cl_timeout = xprt->timeout;
 192        if (args->timeout != NULL) {
 193                memcpy(&clnt->cl_timeout_default, args->timeout,
 194                                sizeof(clnt->cl_timeout_default));
 195                clnt->cl_timeout = &clnt->cl_timeout_default;
 196        }
 197
 198        clnt->cl_rtt = &clnt->cl_rtt_default;
 199        rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
 200
 201        kref_init(&clnt->cl_kref);
 202
 203        err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
 204        if (err < 0)
 205                goto out_no_path;
 206
 207        auth = rpcauth_create(args->authflavor, clnt);
 208        if (IS_ERR(auth)) {
 209                printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
 210                                args->authflavor);
 211                err = PTR_ERR(auth);
 212                goto out_no_auth;
 213        }
 214
 215        /* save the nodename */
 216        clnt->cl_nodelen = strlen(init_utsname()->nodename);
 217        if (clnt->cl_nodelen > UNX_MAXNODENAME)
 218                clnt->cl_nodelen = UNX_MAXNODENAME;
 219        memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
 220        rpc_register_client(clnt);
 221        return clnt;
 222
 223out_no_auth:
 224        if (!IS_ERR(clnt->cl_dentry)) {
 225                rpc_rmdir(clnt->cl_dentry);
 226                rpc_put_mount();
 227        }
 228out_no_path:
 229        rpc_free_iostats(clnt->cl_metrics);
 230out_no_stats:
 231        if (clnt->cl_server != clnt->cl_inline_name)
 232                kfree(clnt->cl_server);
 233        kfree(clnt);
 234out_err:
 235        xprt_put(xprt);
 236out_no_xprt:
 237        rpciod_down();
 238out_no_rpciod:
 239        return ERR_PTR(err);
 240}
 241
 242/*
 243 * rpc_create - create an RPC client and transport with one call
 244 * @args: rpc_clnt create argument structure
 245 *
 246 * Creates and initializes an RPC transport and an RPC client.
 247 *
 248 * It can ping the server in order to determine if it is up, and to see if
 249 * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
 250 * this behavior so asynchronous tasks can also use rpc_create.
 251 */
 252struct rpc_clnt *rpc_create(struct rpc_create_args *args)
 253{
 254        struct rpc_xprt *xprt;
 255        struct rpc_clnt *clnt;
 256        struct xprt_create xprtargs = {
 257                .ident = args->protocol,
 258                .srcaddr = args->saddress,
 259                .dstaddr = args->address,
 260                .addrlen = args->addrsize,
 261        };
 262        char servername[48];
 263
 264        /*
 265         * If the caller chooses not to specify a hostname, whip
 266         * up a string representation of the passed-in address.
 267         */
 268        if (args->servername == NULL) {
 269                servername[0] = '\0';
 270                switch (args->address->sa_family) {
 271                case AF_INET: {
 272                        struct sockaddr_in *sin =
 273                                        (struct sockaddr_in *)args->address;
 274                        snprintf(servername, sizeof(servername), NIPQUAD_FMT,
 275                                 NIPQUAD(sin->sin_addr.s_addr));
 276                        break;
 277                }
 278                case AF_INET6: {
 279                        struct sockaddr_in6 *sin =
 280                                        (struct sockaddr_in6 *)args->address;
 281                        snprintf(servername, sizeof(servername), NIP6_FMT,
 282                                 NIP6(sin->sin6_addr));
 283                        break;
 284                }
 285                default:
 286                        /* caller wants default server name, but
 287                         * address family isn't recognized. */
 288                        return ERR_PTR(-EINVAL);
 289                }
 290                args->servername = servername;
 291        }
 292
 293        xprt = xprt_create_transport(&xprtargs);
 294        if (IS_ERR(xprt))
 295                return (struct rpc_clnt *)xprt;
 296
 297        /*
 298         * By default, kernel RPC client connects from a reserved port.
 299         * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
 300         * but it is always enabled for rpciod, which handles the connect
 301         * operation.
 302         */
 303        xprt->resvport = 1;
 304        if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
 305                xprt->resvport = 0;
 306
 307        clnt = rpc_new_client(args, xprt);
 308        if (IS_ERR(clnt))
 309                return clnt;
 310
 311        if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
 312                int err = rpc_ping(clnt, RPC_TASK_SOFT);
 313                if (err != 0) {
 314                        rpc_shutdown_client(clnt);
 315                        return ERR_PTR(err);
 316                }
 317        }
 318
 319        clnt->cl_softrtry = 1;
 320        if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
 321                clnt->cl_softrtry = 0;
 322
 323        if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
 324                clnt->cl_autobind = 1;
 325        if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
 326                clnt->cl_discrtry = 1;
 327        if (!(args->flags & RPC_CLNT_CREATE_QUIET))
 328                clnt->cl_chatty = 1;
 329
 330        return clnt;
 331}
 332EXPORT_SYMBOL_GPL(rpc_create);
 333
 334/*
 335 * This function clones the RPC client structure. It allows us to share the
 336 * same transport while varying parameters such as the authentication
 337 * flavour.
 338 */
 339struct rpc_clnt *
 340rpc_clone_client(struct rpc_clnt *clnt)
 341{
 342        struct rpc_clnt *new;
 343        int err = -ENOMEM;
 344
 345        new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
 346        if (!new)
 347                goto out_no_clnt;
 348        new->cl_parent = clnt;
 349        /* Turn off autobind on clones */
 350        new->cl_autobind = 0;
 351        INIT_LIST_HEAD(&new->cl_tasks);
 352        spin_lock_init(&new->cl_lock);
 353        rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
 354        new->cl_metrics = rpc_alloc_iostats(clnt);
 355        if (new->cl_metrics == NULL)
 356                goto out_no_stats;
 357        kref_init(&new->cl_kref);
 358        err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
 359        if (err != 0)
 360                goto out_no_path;
 361        if (new->cl_auth)
 362                atomic_inc(&new->cl_auth->au_count);
 363        xprt_get(clnt->cl_xprt);
 364        kref_get(&clnt->cl_kref);
 365        rpc_register_client(new);
 366        rpciod_up();
 367        return new;
 368out_no_path:
 369        rpc_free_iostats(new->cl_metrics);
 370out_no_stats:
 371        kfree(new);
 372out_no_clnt:
 373        dprintk("RPC:       %s: returned error %d\n", __func__, err);
 374        return ERR_PTR(err);
 375}
 376EXPORT_SYMBOL_GPL(rpc_clone_client);
 377
 378/*
 379 * Properly shut down an RPC client, terminating all outstanding
 380 * requests.
 381 */
 382void rpc_shutdown_client(struct rpc_clnt *clnt)
 383{
 384        dprintk("RPC:       shutting down %s client for %s\n",
 385                        clnt->cl_protname, clnt->cl_server);
 386
 387        while (!list_empty(&clnt->cl_tasks)) {
 388                rpc_killall_tasks(clnt);
 389                wait_event_timeout(destroy_wait,
 390                        list_empty(&clnt->cl_tasks), 1*HZ);
 391        }
 392
 393        rpc_release_client(clnt);
 394}
 395EXPORT_SYMBOL_GPL(rpc_shutdown_client);
 396
 397/*
 398 * Free an RPC client
 399 */
 400static void
 401rpc_free_client(struct kref *kref)
 402{
 403        struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
 404
 405        dprintk("RPC:       destroying %s client for %s\n",
 406                        clnt->cl_protname, clnt->cl_server);
 407        if (!IS_ERR(clnt->cl_dentry)) {
 408                rpc_rmdir(clnt->cl_dentry);
 409                rpc_put_mount();
 410        }
 411        if (clnt->cl_parent != clnt) {
 412                rpc_release_client(clnt->cl_parent);
 413                goto out_free;
 414        }
 415        if (clnt->cl_server != clnt->cl_inline_name)
 416                kfree(clnt->cl_server);
 417out_free:
 418        rpc_unregister_client(clnt);
 419        rpc_free_iostats(clnt->cl_metrics);
 420        clnt->cl_metrics = NULL;
 421        xprt_put(clnt->cl_xprt);
 422        rpciod_down();
 423        kfree(clnt);
 424}
 425
 426/*
 427 * Free an RPC client
 428 */
 429static void
 430rpc_free_auth(struct kref *kref)
 431{
 432        struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
 433
 434        if (clnt->cl_auth == NULL) {
 435                rpc_free_client(kref);
 436                return;
 437        }
 438
 439        /*
 440         * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
 441         *       release remaining GSS contexts. This mechanism ensures
 442         *       that it can do so safely.
 443         */
 444        kref_init(kref);
 445        rpcauth_release(clnt->cl_auth);
 446        clnt->cl_auth = NULL;
 447        kref_put(kref, rpc_free_client);
 448}
 449
 450/*
 451 * Release reference to the RPC client
 452 */
 453void
 454rpc_release_client(struct rpc_clnt *clnt)
 455{
 456        dprintk("RPC:       rpc_release_client(%p)\n", clnt);
 457
 458        if (list_empty(&clnt->cl_tasks))
 459                wake_up(&destroy_wait);
 460        kref_put(&clnt->cl_kref, rpc_free_auth);
 461}
 462
 463/**
 464 * rpc_bind_new_program - bind a new RPC program to an existing client
 465 * @old: old rpc_client
 466 * @program: rpc program to set
 467 * @vers: rpc program version
 468 *
 469 * Clones the rpc client and sets up a new RPC program. This is mainly
 470 * of use for enabling different RPC programs to share the same transport.
 471 * The Sun NFSv2/v3 ACL protocol can do this.
 472 */
 473struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
 474                                      struct rpc_program *program,
 475                                      u32 vers)
 476{
 477        struct rpc_clnt *clnt;
 478        struct rpc_version *version;
 479        int err;
 480
 481        BUG_ON(vers >= program->nrvers || !program->version[vers]);
 482        version = program->version[vers];
 483        clnt = rpc_clone_client(old);
 484        if (IS_ERR(clnt))
 485                goto out;
 486        clnt->cl_procinfo = version->procs;
 487        clnt->cl_maxproc  = version->nrprocs;
 488        clnt->cl_protname = program->name;
 489        clnt->cl_prog     = program->number;
 490        clnt->cl_vers     = version->number;
 491        clnt->cl_stats    = program->stats;
 492        err = rpc_ping(clnt, RPC_TASK_SOFT);
 493        if (err != 0) {
 494                rpc_shutdown_client(clnt);
 495                clnt = ERR_PTR(err);
 496        }
 497out:
 498        return clnt;
 499}
 500EXPORT_SYMBOL_GPL(rpc_bind_new_program);
 501
 502/*
 503 * Default callback for async RPC calls
 504 */
 505static void
 506rpc_default_callback(struct rpc_task *task, void *data)
 507{
 508}
 509
 510static const struct rpc_call_ops rpc_default_ops = {
 511        .rpc_call_done = rpc_default_callback,
 512};
 513
 514/**
 515 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
 516 * @task_setup_data: pointer to task initialisation data
 517 */
 518struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
 519{
 520        struct rpc_task *task, *ret;
 521
 522        task = rpc_new_task(task_setup_data);
 523        if (task == NULL) {
 524                rpc_release_calldata(task_setup_data->callback_ops,
 525                                task_setup_data->callback_data);
 526                ret = ERR_PTR(-ENOMEM);
 527                goto out;
 528        }
 529
 530        if (task->tk_status != 0) {
 531                ret = ERR_PTR(task->tk_status);
 532                rpc_put_task(task);
 533                goto out;
 534        }
 535        atomic_inc(&task->tk_count);
 536        rpc_execute(task);
 537        ret = task;
 538out:
 539        return ret;
 540}
 541EXPORT_SYMBOL_GPL(rpc_run_task);
 542
 543/**
 544 * rpc_call_sync - Perform a synchronous RPC call
 545 * @clnt: pointer to RPC client
 546 * @msg: RPC call parameters
 547 * @flags: RPC call flags
 548 */
 549int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
 550{
 551        struct rpc_task        *task;
 552        struct rpc_task_setup task_setup_data = {
 553                .rpc_client = clnt,
 554                .rpc_message = msg,
 555                .callback_ops = &rpc_default_ops,
 556                .flags = flags,
 557        };
 558        int status;
 559
 560        BUG_ON(flags & RPC_TASK_ASYNC);
 561
 562        task = rpc_run_task(&task_setup_data);
 563        if (IS_ERR(task))
 564                return PTR_ERR(task);
 565        status = task->tk_status;
 566        rpc_put_task(task);
 567        return status;
 568}
 569EXPORT_SYMBOL_GPL(rpc_call_sync);
 570
 571/**
 572 * rpc_call_async - Perform an asynchronous RPC call
 573 * @clnt: pointer to RPC client
 574 * @msg: RPC call parameters
 575 * @flags: RPC call flags
 576 * @tk_ops: RPC call ops
 577 * @data: user call data
 578 */
 579int
 580rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
 581               const struct rpc_call_ops *tk_ops, void *data)
 582{
 583        struct rpc_task        *task;
 584        struct rpc_task_setup task_setup_data = {
 585                .rpc_client = clnt,
 586                .rpc_message = msg,
 587                .callback_ops = tk_ops,
 588                .callback_data = data,
 589                .flags = flags|RPC_TASK_ASYNC,
 590        };
 591
 592        task = rpc_run_task(&task_setup_data);
 593        if (IS_ERR(task))
 594                return PTR_ERR(task);
 595        rpc_put_task(task);
 596        return 0;
 597}
 598EXPORT_SYMBOL_GPL(rpc_call_async);
 599
 600void
 601rpc_call_start(struct rpc_task *task)
 602{
 603        task->tk_action = call_start;
 604}
 605EXPORT_SYMBOL_GPL(rpc_call_start);
 606
 607/**
 608 * rpc_peeraddr - extract remote peer address from clnt's xprt
 609 * @clnt: RPC client structure
 610 * @buf: target buffer
 611 * @bufsize: length of target buffer
 612 *
 613 * Returns the number of bytes that are actually in the stored address.
 614 */
 615size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
 616{
 617        size_t bytes;
 618        struct rpc_xprt *xprt = clnt->cl_xprt;
 619
 620        bytes = sizeof(xprt->addr);
 621        if (bytes > bufsize)
 622                bytes = bufsize;
 623        memcpy(buf, &clnt->cl_xprt->addr, bytes);
 624        return xprt->addrlen;
 625}
 626EXPORT_SYMBOL_GPL(rpc_peeraddr);
 627
 628/**
 629 * rpc_peeraddr2str - return remote peer address in printable format
 630 * @clnt: RPC client structure
 631 * @format: address format
 632 *
 633 */
 634const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
 635                             enum rpc_display_format_t format)
 636{
 637        struct rpc_xprt *xprt = clnt->cl_xprt;
 638
 639        if (xprt->address_strings[format] != NULL)
 640                return xprt->address_strings[format];
 641        else
 642                return "unprintable";
 643}
 644EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
 645
 646void
 647rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
 648{
 649        struct rpc_xprt *xprt = clnt->cl_xprt;
 650        if (xprt->ops->set_buffer_size)
 651                xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
 652}
 653EXPORT_SYMBOL_GPL(rpc_setbufsize);
 654
 655/*
 656 * Return size of largest payload RPC client can support, in bytes
 657 *
 658 * For stream transports, this is one RPC record fragment (see RFC
 659 * 1831), as we don't support multi-record requests yet.  For datagram
 660 * transports, this is the size of an IP packet minus the IP, UDP, and
 661 * RPC header sizes.
 662 */
 663size_t rpc_max_payload(struct rpc_clnt *clnt)
 664{
 665        return clnt->cl_xprt->max_payload;
 666}
 667EXPORT_SYMBOL_GPL(rpc_max_payload);
 668
 669/**
 670 * rpc_force_rebind - force transport to check that remote port is unchanged
 671 * @clnt: client to rebind
 672 *
 673 */
 674void rpc_force_rebind(struct rpc_clnt *clnt)
 675{
 676        if (clnt->cl_autobind)
 677                xprt_clear_bound(clnt->cl_xprt);
 678}
 679EXPORT_SYMBOL_GPL(rpc_force_rebind);
 680
 681/*
 682 * Restart an (async) RPC call. Usually called from within the
 683 * exit handler.
 684 */
 685void
 686rpc_restart_call(struct rpc_task *task)
 687{
 688        if (RPC_ASSASSINATED(task))
 689                return;
 690
 691        task->tk_action = call_start;
 692}
 693EXPORT_SYMBOL_GPL(rpc_restart_call);
 694
 695#ifdef RPC_DEBUG
 696static const char *rpc_proc_name(const struct rpc_task *task)
 697{
 698        const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
 699
 700        if (proc) {
 701                if (proc->p_name)
 702                        return proc->p_name;
 703                else
 704                        return "NULL";
 705        } else
 706                return "no proc";
 707}
 708#endif
 709
 710/*
 711 * 0.  Initial state
 712 *
 713 *     Other FSM states can be visited zero or more times, but
 714 *     this state is visited exactly once for each RPC.
 715 */
 716static void
 717call_start(struct rpc_task *task)
 718{
 719        struct rpc_clnt        *clnt = task->tk_client;
 720
 721        dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
 722                        clnt->cl_protname, clnt->cl_vers,
 723                        rpc_proc_name(task),
 724                        (RPC_IS_ASYNC(task) ? "async" : "sync"));
 725
 726        /* Increment call count */
 727        task->tk_msg.rpc_proc->p_count++;
 728        clnt->cl_stats->rpccnt++;
 729        task->tk_action = call_reserve;
 730}
 731
 732/*
 733 * 1.        Reserve an RPC call slot
 734 */
 735static void
 736call_reserve(struct rpc_task *task)
 737{
 738        dprint_status(task);
 739
 740        if (!rpcauth_uptodatecred(task)) {
 741                task->tk_action = call_refresh;
 742                return;
 743        }
 744
 745        task->tk_status  = 0;
 746        task->tk_action  = call_reserveresult;
 747        xprt_reserve(task);
 748}
 749
 750/*
 751 * 1b.        Grok the result of xprt_reserve()
 752 */
 753static void
 754call_reserveresult(struct rpc_task *task)
 755{
 756        int status = task->tk_status;
 757
 758        dprint_status(task);
 759
 760        /*
 761         * After a call to xprt_reserve(), we must have either
 762         * a request slot or else an error status.
 763         */
 764        task->tk_status = 0;
 765        if (status >= 0) {
 766                if (task->tk_rqstp) {
 767                        task->tk_action = call_allocate;
 768                        return;
 769                }
 770
 771                printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
 772                                __func__, status);
 773                rpc_exit(task, -EIO);
 774                return;
 775        }
 776
 777        /*
 778         * Even though there was an error, we may have acquired
 779         * a request slot somehow.  Make sure not to leak it.
 780         */
 781        if (task->tk_rqstp) {
 782                printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
 783                                __func__, status);
 784                xprt_release(task);
 785        }
 786
 787        switch (status) {
 788        case -EAGAIN:        /* woken up; retry */
 789                task->tk_action = call_reserve;
 790                return;
 791        case -EIO:        /* probably a shutdown */
 792                break;
 793        default:
 794                printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
 795                                __func__, status);
 796                break;
 797        }
 798        rpc_exit(task, status);
 799}
 800
 801/*
 802 * 2.        Allocate the buffer. For details, see sched.c:rpc_malloc.
 803 *        (Note: buffer memory is freed in xprt_release).
 804 */
 805static void
 806call_allocate(struct rpc_task *task)
 807{
 808        unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack;
 809        struct rpc_rqst *req = task->tk_rqstp;
 810        struct rpc_xprt *xprt = task->tk_xprt;
 811        struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
 812
 813        dprint_status(task);
 814
 815        task->tk_status = 0;
 816        task->tk_action = call_bind;
 817
 818        if (req->rq_buffer)
 819                return;
 820
 821        if (proc->p_proc != 0) {
 822                BUG_ON(proc->p_arglen == 0);
 823                if (proc->p_decode != NULL)
 824                        BUG_ON(proc->p_replen == 0);
 825        }
 826
 827        /*
 828         * Calculate the size (in quads) of the RPC call
 829         * and reply headers, and convert both values
 830         * to byte sizes.
 831         */
 832        req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
 833        req->rq_callsize <<= 2;
 834        req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
 835        req->rq_rcvsize <<= 2;
 836
 837        req->rq_buffer = xprt->ops->buf_alloc(task,
 838                                        req->rq_callsize + req->rq_rcvsize);
 839        if (req->rq_buffer != NULL)
 840                return;
 841
 842        dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 843
 844        if (RPC_IS_ASYNC(task) || !signalled()) {
 845                task->tk_action = call_allocate;
 846                rpc_delay(task, HZ>>4);
 847                return;
 848        }
 849
 850        rpc_exit(task, -ERESTARTSYS);
 851}
 852
 853static inline int
 854rpc_task_need_encode(struct rpc_task *task)
 855{
 856        return task->tk_rqstp->rq_snd_buf.len == 0;
 857}
 858
 859static inline void
 860rpc_task_force_reencode(struct rpc_task *task)
 861{
 862        task->tk_rqstp->rq_snd_buf.len = 0;
 863}
 864
 865static inline void
 866rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
 867{
 868        buf->head[0].iov_base = start;
 869        buf->head[0].iov_len = len;
 870        buf->tail[0].iov_len = 0;
 871        buf->page_len = 0;
 872        buf->flags = 0;
 873        buf->len = 0;
 874        buf->buflen = len;
 875}
 876
 877/*
 878 * 3.        Encode arguments of an RPC call
 879 */
 880static void
 881rpc_xdr_encode(struct rpc_task *task)
 882{
 883        struct rpc_rqst        *req = task->tk_rqstp;
 884        kxdrproc_t        encode;
 885        __be32                *p;
 886
 887        dprint_status(task);
 888
 889        rpc_xdr_buf_init(&req->rq_snd_buf,
 890                         req->rq_buffer,
 891                         req->rq_callsize);
 892        rpc_xdr_buf_init(&req->rq_rcv_buf,
 893                         (char *)req->rq_buffer + req->rq_callsize,
 894                         req->rq_rcvsize);
 895
 896        p = rpc_encode_header(task);
 897        if (p == NULL) {
 898                printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
 899                rpc_exit(task, -EIO);
 900                return;
 901        }
 902
 903        encode = task->tk_msg.rpc_proc->p_encode;
 904        if (encode == NULL)
 905                return;
 906
 907        task->tk_status = rpcauth_wrap_req(task, encode, req, p,
 908                        task->tk_msg.rpc_argp);
 909}
 910
 911/*
 912 * 4.        Get the server port number if not yet set
 913 */
 914static void
 915call_bind(struct rpc_task *task)
 916{
 917        struct rpc_xprt *xprt = task->tk_xprt;
 918
 919        dprint_status(task);
 920
 921        task->tk_action = call_connect;
 922        if (!xprt_bound(xprt)) {
 923                task->tk_action = call_bind_status;
 924                task->tk_timeout = xprt->bind_timeout;
 925                xprt->ops->rpcbind(task);
 926        }
 927}
 928
 929/*
 930 * 4a.        Sort out bind result
 931 */
 932static void
 933call_bind_status(struct rpc_task *task)
 934{
 935        int status = -EIO;
 936
 937        if (task->tk_status >= 0) {
 938                dprint_status(task);
 939                task->tk_status = 0;
 940                task->tk_action = call_connect;
 941                return;
 942        }
 943
 944        switch (task->tk_status) {
 945        case -ENOMEM:
 946                dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
 947                rpc_delay(task, HZ >> 2);
 948                goto retry_timeout;
 949        case -EACCES:
 950                dprintk("RPC: %5u remote rpcbind: RPC program/version "
 951                                "unavailable\n", task->tk_pid);
 952                /* fail immediately if this is an RPC ping */
 953                if (task->tk_msg.rpc_proc->p_proc == 0) {
 954                        status = -EOPNOTSUPP;
 955                        break;
 956                }
 957                rpc_delay(task, 3*HZ);
 958                goto retry_timeout;
 959        case -ETIMEDOUT:
 960                dprintk("RPC: %5u rpcbind request timed out\n",
 961                                task->tk_pid);
 962                goto retry_timeout;
 963        case -EPFNOSUPPORT:
 964                /* server doesn't support any rpcbind version we know of */
 965                dprintk("RPC: %5u remote rpcbind service unavailable\n",
 966                                task->tk_pid);
 967                break;
 968        case -EPROTONOSUPPORT:
 969                dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
 970                                task->tk_pid);
 971                task->tk_status = 0;
 972                task->tk_action = call_bind;
 973                return;
 974        default:
 975                dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
 976                                task->tk_pid, -task->tk_status);
 977        }
 978
 979        rpc_exit(task, status);
 980        return;
 981
 982retry_timeout:
 983        task->tk_action = call_timeout;
 984}
 985
 986/*
 987 * 4b.        Connect to the RPC server
 988 */
 989static void
 990call_connect(struct rpc_task *task)
 991{
 992        struct rpc_xprt *xprt = task->tk_xprt;
 993
 994        dprintk("RPC: %5u call_connect xprt %p %s connected\n",
 995                        task->tk_pid, xprt,
 996                        (xprt_connected(xprt) ? "is" : "is not"));
 997
 998        task->tk_action = call_transmit;
 999        if (!xprt_connected(xprt)) {
1000                task->tk_action = call_connect_status;
1001                if (task->tk_status < 0)
1002                        return;
1003                xprt_connect(task);
1004        }
1005}
1006
1007/*
1008 * 4c.        Sort out connect result
1009 */
1010static void
1011call_connect_status(struct rpc_task *task)
1012{
1013        struct rpc_clnt *clnt = task->tk_client;
1014        int status = task->tk_status;
1015
1016        dprint_status(task);
1017
1018        task->tk_status = 0;
1019        if (status >= 0) {
1020                clnt->cl_stats->netreconn++;
1021                task->tk_action = call_transmit;
1022                return;
1023        }
1024
1025        /* Something failed: remote service port may have changed */
1026        rpc_force_rebind(clnt);
1027
1028        switch (status) {
1029        case -ENOTCONN:
1030        case -EAGAIN:
1031                task->tk_action = call_bind;
1032                if (!RPC_IS_SOFT(task))
1033                        return;
1034                /* if soft mounted, test if we've timed out */
1035        case -ETIMEDOUT:
1036                task->tk_action = call_timeout;
1037                return;
1038        }
1039        rpc_exit(task, -EIO);
1040}
1041
1042/*
1043 * 5.        Transmit the RPC request, and wait for reply
1044 */
1045static void
1046call_transmit(struct rpc_task *task)
1047{
1048        dprint_status(task);
1049
1050        task->tk_action = call_status;
1051        if (task->tk_status < 0)
1052                return;
1053        task->tk_status = xprt_prepare_transmit(task);
1054        if (task->tk_status != 0)
1055                return;
1056        task->tk_action = call_transmit_status;
1057        /* Encode here so that rpcsec_gss can use correct sequence number. */
1058        if (rpc_task_need_encode(task)) {
1059                BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1060                rpc_xdr_encode(task);
1061                /* Did the encode result in an error condition? */
1062                if (task->tk_status != 0) {
1063                        /* Was the error nonfatal? */
1064                        if (task->tk_status == -EAGAIN)
1065                                rpc_delay(task, HZ >> 4);
1066                        else
1067                                rpc_exit(task, task->tk_status);
1068                        return;
1069                }
1070        }
1071        xprt_transmit(task);
1072        if (task->tk_status < 0)
1073                return;
1074        /*
1075         * On success, ensure that we call xprt_end_transmit() before sleeping
1076         * in order to allow access to the socket to other RPC requests.
1077         */
1078        call_transmit_status(task);
1079        if (task->tk_msg.rpc_proc->p_decode != NULL)
1080                return;
1081        task->tk_action = rpc_exit_task;
1082        rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1083}
1084
1085/*
1086 * 5a.        Handle cleanup after a transmission
1087 */
1088static void
1089call_transmit_status(struct rpc_task *task)
1090{
1091        task->tk_action = call_status;
1092        /*
1093         * Special case: if we've been waiting on the socket's write_space()
1094         * callback, then don't call xprt_end_transmit().
1095         */
1096        if (task->tk_status == -EAGAIN)
1097                return;
1098        xprt_end_transmit(task);
1099        rpc_task_force_reencode(task);
1100}
1101
1102/*
1103 * 6.        Sort out the RPC call status
1104 */
1105static void
1106call_status(struct rpc_task *task)
1107{
1108        struct rpc_clnt        *clnt = task->tk_client;
1109        struct rpc_rqst        *req = task->tk_rqstp;
1110        int                status;
1111
1112        if (req->rq_received > 0 && !req->rq_bytes_sent)
1113                task->tk_status = req->rq_received;
1114
1115        dprint_status(task);
1116
1117        status = task->tk_status;
1118        if (status >= 0) {
1119                task->tk_action = call_decode;
1120                return;
1121        }
1122
1123        task->tk_status = 0;
1124        switch(status) {
1125        case -EHOSTDOWN:
1126        case -EHOSTUNREACH:
1127        case -ENETUNREACH:
1128                /*
1129                 * Delay any retries for 3 seconds, then handle as if it
1130                 * were a timeout.
1131                 */
1132                rpc_delay(task, 3*HZ);
1133        case -ETIMEDOUT:
1134                task->tk_action = call_timeout;
1135                if (task->tk_client->cl_discrtry)
1136                        xprt_conditional_disconnect(task->tk_xprt,
1137                                        req->rq_connect_cookie);
1138                break;
1139        case -ECONNREFUSED:
1140        case -ENOTCONN:
1141                rpc_force_rebind(clnt);
1142                task->tk_action = call_bind;
1143                break;
1144        case -EAGAIN:
1145                task->tk_action = call_transmit;
1146                break;
1147        case -EIO:
1148                /* shutdown or soft timeout */
1149                rpc_exit(task, status);
1150                break;
1151        default:
1152                if (clnt->cl_chatty)
1153                        printk("%s: RPC call returned error %d\n",
1154                               clnt->cl_protname, -status);
1155                rpc_exit(task, status);
1156        }
1157}
1158
1159/*
1160 * 6a.        Handle RPC timeout
1161 *         We do not release the request slot, so we keep using the
1162 *        same XID for all retransmits.
1163 */
1164static void
1165call_timeout(struct rpc_task *task)
1166{
1167        struct rpc_clnt        *clnt = task->tk_client;
1168
1169        if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1170                dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1171                goto retry;
1172        }
1173
1174        dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1175        task->tk_timeouts++;
1176
1177        if (RPC_IS_SOFT(task)) {
1178                if (clnt->cl_chatty)
1179                        printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1180                                clnt->cl_protname, clnt->cl_server);
1181                rpc_exit(task, -EIO);
1182                return;
1183        }
1184
1185        if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1186                task->tk_flags |= RPC_CALL_MAJORSEEN;
1187                if (clnt->cl_chatty)
1188                        printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1189                        clnt->cl_protname, clnt->cl_server);
1190        }
1191        rpc_force_rebind(clnt);
1192        /*
1193         * Did our request time out due to an RPCSEC_GSS out-of-sequence
1194         * event? RFC2203 requires the server to drop all such requests.
1195         */
1196        rpcauth_invalcred(task);
1197
1198retry:
1199        clnt->cl_stats->rpcretrans++;
1200        task->tk_action = call_bind;
1201        task->tk_status = 0;
1202}
1203
1204/*
1205 * 7.        Decode the RPC reply
1206 */
1207static void
1208call_decode(struct rpc_task *task)
1209{
1210        struct rpc_clnt        *clnt = task->tk_client;
1211        struct rpc_rqst        *req = task->tk_rqstp;
1212        kxdrproc_t        decode = task->tk_msg.rpc_proc->p_decode;
1213        __be32                *p;
1214
1215        dprintk("RPC: %5u call_decode (status %d)\n",
1216                        task->tk_pid, task->tk_status);
1217
1218        if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1219                if (clnt->cl_chatty)
1220                        printk(KERN_NOTICE "%s: server %s OK\n",
1221                                clnt->cl_protname, clnt->cl_server);
1222                task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1223        }
1224
1225        /*
1226         * Ensure that we see all writes made by xprt_complete_rqst()
1227         * before it changed req->rq_received.
1228         */
1229        smp_rmb();
1230        req->rq_rcv_buf.len = req->rq_private_buf.len;
1231
1232        /* Check that the softirq receive buffer is valid */
1233        WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1234                                sizeof(req->rq_rcv_buf)) != 0);
1235
1236        if (req->rq_rcv_buf.len < 12) {
1237                if (!RPC_IS_SOFT(task)) {
1238                        task->tk_action = call_bind;
1239                        clnt->cl_stats->rpcretrans++;
1240                        goto out_retry;
1241                }
1242                dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
1243                                clnt->cl_protname, task->tk_status);
1244                task->tk_action = call_timeout;
1245                goto out_retry;
1246        }
1247
1248        p = rpc_verify_header(task);
1249        if (IS_ERR(p)) {
1250                if (p == ERR_PTR(-EAGAIN))
1251                        goto out_retry;
1252                return;
1253        }
1254
1255        task->tk_action = rpc_exit_task;
1256
1257        if (decode) {
1258                task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1259                                                      task->tk_msg.rpc_resp);
1260        }
1261        dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1262                        task->tk_status);
1263        return;
1264out_retry:
1265        task->tk_status = 0;
1266        /* Note: rpc_verify_header() may have freed the RPC slot */
1267        if (task->tk_rqstp == req) {
1268                req->rq_received = req->rq_rcv_buf.len = 0;
1269                if (task->tk_client->cl_discrtry)
1270                        xprt_conditional_disconnect(task->tk_xprt,
1271                                        req->rq_connect_cookie);
1272        }
1273}
1274
1275/*
1276 * 8.        Refresh the credentials if rejected by the server
1277 */
1278static void
1279call_refresh(struct rpc_task *task)
1280{
1281        dprint_status(task);
1282
1283        task->tk_action = call_refreshresult;
1284        task->tk_status = 0;
1285        task->tk_client->cl_stats->rpcauthrefresh++;
1286        rpcauth_refreshcred(task);
1287}
1288
1289/*
1290 * 8a.        Process the results of a credential refresh
1291 */
1292static void
1293call_refreshresult(struct rpc_task *task)
1294{
1295        int status = task->tk_status;
1296
1297        dprint_status(task);
1298
1299        task->tk_status = 0;
1300        task->tk_action = call_reserve;
1301        if (status >= 0 && rpcauth_uptodatecred(task))
1302                return;
1303        if (status == -EACCES) {
1304                rpc_exit(task, -EACCES);
1305                return;
1306        }
1307        task->tk_action = call_refresh;
1308        if (status != -ETIMEDOUT)
1309                rpc_delay(task, 3*HZ);
1310        return;
1311}
1312
1313static __be32 *
1314rpc_encode_header(struct rpc_task *task)
1315{
1316        struct rpc_clnt *clnt = task->tk_client;
1317        struct rpc_rqst        *req = task->tk_rqstp;
1318        __be32                *p = req->rq_svec[0].iov_base;
1319
1320        /* FIXME: check buffer size? */
1321
1322        p = xprt_skip_transport_header(task->tk_xprt, p);
1323        *p++ = req->rq_xid;                /* XID */
1324        *p++ = htonl(RPC_CALL);                /* CALL */
1325        *p++ = htonl(RPC_VERSION);        /* RPC version */
1326        *p++ = htonl(clnt->cl_prog);        /* program number */
1327        *p++ = htonl(clnt->cl_vers);        /* program version */
1328        *p++ = htonl(task->tk_msg.rpc_proc->p_proc);        /* procedure */
1329        p = rpcauth_marshcred(task, p);
1330        req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1331        return p;
1332}
1333
1334static __be32 *
1335rpc_verify_header(struct rpc_task *task)
1336{
1337        struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1338        int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1339        __be32        *p = iov->iov_base;
1340        u32 n;
1341        int error = -EACCES;
1342
1343        if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1344                /* RFC-1014 says that the representation of XDR data must be a
1345                 * multiple of four bytes
1346                 * - if it isn't pointer subtraction in the NFS client may give
1347                 *   undefined results
1348                 */
1349                dprintk("RPC: %5u %s: XDR representation not a multiple of"
1350                       " 4 bytes: 0x%x\n", task->tk_pid, __func__,
1351                       task->tk_rqstp->rq_rcv_buf.len);
1352                goto out_eio;
1353        }
1354        if ((len -= 3) < 0)
1355                goto out_overflow;
1356        p += 1;        /* skip XID */
1357
1358        if ((n = ntohl(*p++)) != RPC_REPLY) {
1359                dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1360                                task->tk_pid, __func__, n);
1361                goto out_garbage;
1362        }
1363        if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1364                if (--len < 0)
1365                        goto out_overflow;
1366                switch ((n = ntohl(*p++))) {
1367                        case RPC_AUTH_ERROR:
1368                                break;
1369                        case RPC_MISMATCH:
1370                                dprintk("RPC: %5u %s: RPC call version "
1371                                                "mismatch!\n",
1372                                                task->tk_pid, __func__);
1373                                error = -EPROTONOSUPPORT;
1374                                goto out_err;
1375                        default:
1376                                dprintk("RPC: %5u %s: RPC call rejected, "
1377                                                "unknown error: %x\n",
1378                                                task->tk_pid, __func__, n);
1379                                goto out_eio;
1380                }
1381                if (--len < 0)
1382                        goto out_overflow;
1383                switch ((n = ntohl(*p++))) {
1384                case RPC_AUTH_REJECTEDCRED:
1385                case RPC_AUTH_REJECTEDVERF:
1386                case RPCSEC_GSS_CREDPROBLEM:
1387                case RPCSEC_GSS_CTXPROBLEM:
1388                        if (!task->tk_cred_retry)
1389                                break;
1390                        task->tk_cred_retry--;
1391                        dprintk("RPC: %5u %s: retry stale creds\n",
1392                                        task->tk_pid, __func__);
1393                        rpcauth_invalcred(task);
1394                        /* Ensure we obtain a new XID! */
1395                        xprt_release(task);
1396                        task->tk_action = call_refresh;
1397                        goto out_retry;
1398                case RPC_AUTH_BADCRED:
1399                case RPC_AUTH_BADVERF:
1400                        /* possibly garbled cred/verf? */
1401                        if (!task->tk_garb_retry)
1402                                break;
1403                        task->tk_garb_retry--;
1404                        dprintk("RPC: %5u %s: retry garbled creds\n",
1405                                        task->tk_pid, __func__);
1406                        task->tk_action = call_bind;
1407                        goto out_retry;
1408                case RPC_AUTH_TOOWEAK:
1409                        printk(KERN_NOTICE "RPC: server %s requires stronger "
1410                               "authentication.\n", task->tk_client->cl_server);
1411                        break;
1412                default:
1413                        dprintk("RPC: %5u %s: unknown auth error: %x\n",
1414                                        task->tk_pid, __func__, n);
1415                        error = -EIO;
1416                }
1417                dprintk("RPC: %5u %s: call rejected %d\n",
1418                                task->tk_pid, __func__, n);
1419                goto out_err;
1420        }
1421        if (!(p = rpcauth_checkverf(task, p))) {
1422                dprintk("RPC: %5u %s: auth check failed\n",
1423                                task->tk_pid, __func__);
1424                goto out_garbage;                /* bad verifier, retry */
1425        }
1426        len = p - (__be32 *)iov->iov_base - 1;
1427        if (len < 0)
1428                goto out_overflow;
1429        switch ((n = ntohl(*p++))) {
1430        case RPC_SUCCESS:
1431                return p;
1432        case RPC_PROG_UNAVAIL:
1433                dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
1434                                task->tk_pid, __func__,
1435                                (unsigned int)task->tk_client->cl_prog,
1436                                task->tk_client->cl_server);
1437                error = -EPFNOSUPPORT;
1438                goto out_err;
1439        case RPC_PROG_MISMATCH:
1440                dprintk("RPC: %5u %s: program %u, version %u unsupported by "
1441                                "server %s\n", task->tk_pid, __func__,
1442                                (unsigned int)task->tk_client->cl_prog,
1443                                (unsigned int)task->tk_client->cl_vers,
1444                                task->tk_client->cl_server);
1445                error = -EPROTONOSUPPORT;
1446                goto out_err;
1447        case RPC_PROC_UNAVAIL:
1448                dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
1449                                "version %u on server %s\n",
1450                                task->tk_pid, __func__,
1451                                rpc_proc_name(task),
1452                                task->tk_client->cl_prog,
1453                                task->tk_client->cl_vers,
1454                                task->tk_client->cl_server);
1455                error = -EOPNOTSUPP;
1456                goto out_err;
1457        case RPC_GARBAGE_ARGS:
1458                dprintk("RPC: %5u %s: server saw garbage\n",
1459                                task->tk_pid, __func__);
1460                break;                        /* retry */
1461        default:
1462                dprintk("RPC: %5u %s: server accept status: %x\n",
1463                                task->tk_pid, __func__, n);
1464                /* Also retry */
1465        }
1466
1467out_garbage:
1468        task->tk_client->cl_stats->rpcgarbage++;
1469        if (task->tk_garb_retry) {
1470                task->tk_garb_retry--;
1471                dprintk("RPC: %5u %s: retrying\n",
1472                                task->tk_pid, __func__);
1473                task->tk_action = call_bind;
1474out_retry:
1475                return ERR_PTR(-EAGAIN);
1476        }
1477out_eio:
1478        error = -EIO;
1479out_err:
1480        rpc_exit(task, error);
1481        dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
1482                        __func__, error);
1483        return ERR_PTR(error);
1484out_overflow:
1485        dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
1486                        __func__);
1487        goto out_garbage;
1488}
1489
1490static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
1491{
1492        return 0;
1493}
1494
1495static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
1496{
1497        return 0;
1498}
1499
1500static struct rpc_procinfo rpcproc_null = {
1501        .p_encode = rpcproc_encode_null,
1502        .p_decode = rpcproc_decode_null,
1503};
1504
1505static int rpc_ping(struct rpc_clnt *clnt, int flags)
1506{
1507        struct rpc_message msg = {
1508                .rpc_proc = &rpcproc_null,
1509        };
1510        int err;
1511        msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1512        err = rpc_call_sync(clnt, &msg, flags);
1513        put_rpccred(msg.rpc_cred);
1514        return err;
1515}
1516
1517struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
1518{
1519        struct rpc_message msg = {
1520                .rpc_proc = &rpcproc_null,
1521                .rpc_cred = cred,
1522        };
1523        struct rpc_task_setup task_setup_data = {
1524                .rpc_client = clnt,
1525                .rpc_message = &msg,
1526                .callback_ops = &rpc_default_ops,
1527                .flags = flags,
1528        };
1529        return rpc_run_task(&task_setup_data);
1530}
1531EXPORT_SYMBOL_GPL(rpc_call_null);
1532
1533#ifdef RPC_DEBUG
1534static void rpc_show_header(void)
1535{
1536        printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
1537                "-timeout ---ops--\n");
1538}
1539
1540static void rpc_show_task(const struct rpc_clnt *clnt,
1541                          const struct rpc_task *task)
1542{
1543        const char *rpc_waitq = "none";
1544        char *p, action[KSYM_SYMBOL_LEN];
1545
1546        if (RPC_IS_QUEUED(task))
1547                rpc_waitq = rpc_qname(task->tk_waitqueue);
1548
1549        /* map tk_action pointer to a function name; then trim off
1550         * the "+0x0 [sunrpc]" */
1551        sprint_symbol(action, (unsigned long)task->tk_action);
1552        p = strchr(action, '+');
1553        if (p)
1554                *p = '\0';
1555
1556        printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
1557                task->tk_pid, task->tk_flags, task->tk_status,
1558                clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1559                clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
1560                action, rpc_waitq);
1561}
1562
1563void rpc_show_tasks(void)
1564{
1565        struct rpc_clnt *clnt;
1566        struct rpc_task *task;
1567        int header = 0;
1568
1569        spin_lock(&rpc_client_lock);
1570        list_for_each_entry(clnt, &all_clients, cl_clients) {
1571                spin_lock(&clnt->cl_lock);
1572                list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
1573                        if (!header) {
1574                                rpc_show_header();
1575                                header++;
1576                        }
1577                        rpc_show_task(clnt, task);
1578                }
1579                spin_unlock(&clnt->cl_lock);
1580        }
1581        spin_unlock(&rpc_client_lock);
1582}
1583#endif