Showing error 1528

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: net/netfilter/x_tables.c
Line in file: 891
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-21 20:30:05 UTC


Source:

   1/*
   2 * x_tables core - Backend for {ip,ip6,arp}_tables
   3 *
   4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
   5 *
   6 * Based on existing ip_tables code which is
   7 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
   8 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 */
  15
  16#include <linux/kernel.h>
  17#include <linux/socket.h>
  18#include <linux/net.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/string.h>
  22#include <linux/vmalloc.h>
  23#include <linux/mutex.h>
  24#include <linux/mm.h>
  25#include <net/net_namespace.h>
  26
  27#include <linux/netfilter/x_tables.h>
  28#include <linux/netfilter_arp.h>
  29
  30
  31MODULE_LICENSE("GPL");
  32MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  33MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  34
  35#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  36
  37struct compat_delta {
  38        struct compat_delta *next;
  39        unsigned int offset;
  40        short delta;
  41};
  42
  43struct xt_af {
  44        struct mutex mutex;
  45        struct list_head match;
  46        struct list_head target;
  47#ifdef CONFIG_COMPAT
  48        struct mutex compat_mutex;
  49        struct compat_delta *compat_offsets;
  50#endif
  51};
  52
  53static struct xt_af *xt;
  54
  55#ifdef DEBUG_IP_FIREWALL_USER
  56#define duprintf(format, args...) printk(format , ## args)
  57#else
  58#define duprintf(format, args...)
  59#endif
  60
  61static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  62        [NFPROTO_UNSPEC] = "x",
  63        [NFPROTO_IPV4]   = "ip",
  64        [NFPROTO_ARP]    = "arp",
  65        [NFPROTO_BRIDGE] = "eb",
  66        [NFPROTO_IPV6]   = "ip6",
  67};
  68
  69/* Registration hooks for targets. */
  70int
  71xt_register_target(struct xt_target *target)
  72{
  73        u_int8_t af = target->family;
  74        int ret;
  75
  76        ret = mutex_lock_interruptible(&xt[af].mutex);
  77        if (ret != 0)
  78                return ret;
  79        list_add(&target->list, &xt[af].target);
  80        mutex_unlock(&xt[af].mutex);
  81        return ret;
  82}
  83EXPORT_SYMBOL(xt_register_target);
  84
  85void
  86xt_unregister_target(struct xt_target *target)
  87{
  88        u_int8_t af = target->family;
  89
  90        mutex_lock(&xt[af].mutex);
  91        list_del(&target->list);
  92        mutex_unlock(&xt[af].mutex);
  93}
  94EXPORT_SYMBOL(xt_unregister_target);
  95
  96int
  97xt_register_targets(struct xt_target *target, unsigned int n)
  98{
  99        unsigned int i;
 100        int err = 0;
 101
 102        for (i = 0; i < n; i++) {
 103                err = xt_register_target(&target[i]);
 104                if (err)
 105                        goto err;
 106        }
 107        return err;
 108
 109err:
 110        if (i > 0)
 111                xt_unregister_targets(target, i);
 112        return err;
 113}
 114EXPORT_SYMBOL(xt_register_targets);
 115
 116void
 117xt_unregister_targets(struct xt_target *target, unsigned int n)
 118{
 119        unsigned int i;
 120
 121        for (i = 0; i < n; i++)
 122                xt_unregister_target(&target[i]);
 123}
 124EXPORT_SYMBOL(xt_unregister_targets);
 125
 126int
 127xt_register_match(struct xt_match *match)
 128{
 129        u_int8_t af = match->family;
 130        int ret;
 131
 132        ret = mutex_lock_interruptible(&xt[af].mutex);
 133        if (ret != 0)
 134                return ret;
 135
 136        list_add(&match->list, &xt[af].match);
 137        mutex_unlock(&xt[af].mutex);
 138
 139        return ret;
 140}
 141EXPORT_SYMBOL(xt_register_match);
 142
 143void
 144xt_unregister_match(struct xt_match *match)
 145{
 146        u_int8_t af = match->family;
 147
 148        mutex_lock(&xt[af].mutex);
 149        list_del(&match->list);
 150        mutex_unlock(&xt[af].mutex);
 151}
 152EXPORT_SYMBOL(xt_unregister_match);
 153
 154int
 155xt_register_matches(struct xt_match *match, unsigned int n)
 156{
 157        unsigned int i;
 158        int err = 0;
 159
 160        for (i = 0; i < n; i++) {
 161                err = xt_register_match(&match[i]);
 162                if (err)
 163                        goto err;
 164        }
 165        return err;
 166
 167err:
 168        if (i > 0)
 169                xt_unregister_matches(match, i);
 170        return err;
 171}
 172EXPORT_SYMBOL(xt_register_matches);
 173
 174void
 175xt_unregister_matches(struct xt_match *match, unsigned int n)
 176{
 177        unsigned int i;
 178
 179        for (i = 0; i < n; i++)
 180                xt_unregister_match(&match[i]);
 181}
 182EXPORT_SYMBOL(xt_unregister_matches);
 183
 184
 185/*
 186 * These are weird, but module loading must not be done with mutex
 187 * held (since they will register), and we have to have a single
 188 * function to use try_then_request_module().
 189 */
 190
 191/* Find match, grabs ref.  Returns ERR_PTR() on error. */
 192struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
 193{
 194        struct xt_match *m;
 195        int err = 0;
 196
 197        if (mutex_lock_interruptible(&xt[af].mutex) != 0)
 198                return ERR_PTR(-EINTR);
 199
 200        list_for_each_entry(m, &xt[af].match, list) {
 201                if (strcmp(m->name, name) == 0) {
 202                        if (m->revision == revision) {
 203                                if (try_module_get(m->me)) {
 204                                        mutex_unlock(&xt[af].mutex);
 205                                        return m;
 206                                }
 207                        } else
 208                                err = -EPROTOTYPE; /* Found something. */
 209                }
 210        }
 211        mutex_unlock(&xt[af].mutex);
 212
 213        if (af != NFPROTO_UNSPEC)
 214                /* Try searching again in the family-independent list */
 215                return xt_find_match(NFPROTO_UNSPEC, name, revision);
 216
 217        return ERR_PTR(err);
 218}
 219EXPORT_SYMBOL(xt_find_match);
 220
 221/* Find target, grabs ref.  Returns ERR_PTR() on error. */
 222struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
 223{
 224        struct xt_target *t;
 225        int err = 0;
 226
 227        if (mutex_lock_interruptible(&xt[af].mutex) != 0)
 228                return ERR_PTR(-EINTR);
 229
 230        list_for_each_entry(t, &xt[af].target, list) {
 231                if (strcmp(t->name, name) == 0) {
 232                        if (t->revision == revision) {
 233                                if (try_module_get(t->me)) {
 234                                        mutex_unlock(&xt[af].mutex);
 235                                        return t;
 236                                }
 237                        } else
 238                                err = -EPROTOTYPE; /* Found something. */
 239                }
 240        }
 241        mutex_unlock(&xt[af].mutex);
 242
 243        if (af != NFPROTO_UNSPEC)
 244                /* Try searching again in the family-independent list */
 245                return xt_find_target(NFPROTO_UNSPEC, name, revision);
 246
 247        return ERR_PTR(err);
 248}
 249EXPORT_SYMBOL(xt_find_target);
 250
 251struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 252{
 253        struct xt_target *target;
 254
 255        target = try_then_request_module(xt_find_target(af, name, revision),
 256                                         "%st_%s", xt_prefix[af], name);
 257        if (IS_ERR(target) || !target)
 258                return NULL;
 259        return target;
 260}
 261EXPORT_SYMBOL_GPL(xt_request_find_target);
 262
 263static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
 264{
 265        const struct xt_match *m;
 266        int have_rev = 0;
 267
 268        list_for_each_entry(m, &xt[af].match, list) {
 269                if (strcmp(m->name, name) == 0) {
 270                        if (m->revision > *bestp)
 271                                *bestp = m->revision;
 272                        if (m->revision == revision)
 273                                have_rev = 1;
 274                }
 275        }
 276        return have_rev;
 277}
 278
 279static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
 280{
 281        const struct xt_target *t;
 282        int have_rev = 0;
 283
 284        list_for_each_entry(t, &xt[af].target, list) {
 285                if (strcmp(t->name, name) == 0) {
 286                        if (t->revision > *bestp)
 287                                *bestp = t->revision;
 288                        if (t->revision == revision)
 289                                have_rev = 1;
 290                }
 291        }
 292        return have_rev;
 293}
 294
 295/* Returns true or false (if no such extension at all) */
 296int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 297                     int *err)
 298{
 299        int have_rev, best = -1;
 300
 301        if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
 302                *err = -EINTR;
 303                return 1;
 304        }
 305        if (target == 1)
 306                have_rev = target_revfn(af, name, revision, &best);
 307        else
 308                have_rev = match_revfn(af, name, revision, &best);
 309        mutex_unlock(&xt[af].mutex);
 310
 311        /* Nothing at all?  Return 0 to try loading module. */
 312        if (best == -1) {
 313                *err = -ENOENT;
 314                return 0;
 315        }
 316
 317        *err = best;
 318        if (!have_rev)
 319                *err = -EPROTONOSUPPORT;
 320        return 1;
 321}
 322EXPORT_SYMBOL_GPL(xt_find_revision);
 323
 324int xt_check_match(struct xt_mtchk_param *par,
 325                   unsigned int size, u_int8_t proto, bool inv_proto)
 326{
 327        if (XT_ALIGN(par->match->matchsize) != size &&
 328            par->match->matchsize != -1) {
 329                /*
 330                 * ebt_among is exempt from centralized matchsize checking
 331                 * because it uses a dynamic-size data set.
 332                 */
 333                printk("%s_tables: %s match: invalid size %Zu != %u\n",
 334                       xt_prefix[par->family], par->match->name,
 335                       XT_ALIGN(par->match->matchsize), size);
 336                return -EINVAL;
 337        }
 338        if (par->match->table != NULL &&
 339            strcmp(par->match->table, par->table) != 0) {
 340                printk("%s_tables: %s match: only valid in %s table, not %s\n",
 341                       xt_prefix[par->family], par->match->name,
 342                       par->match->table, par->table);
 343                return -EINVAL;
 344        }
 345        if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
 346                printk("%s_tables: %s match: bad hook_mask %#x/%#x\n",
 347                       xt_prefix[par->family], par->match->name,
 348                       par->hook_mask, par->match->hooks);
 349                return -EINVAL;
 350        }
 351        if (par->match->proto && (par->match->proto != proto || inv_proto)) {
 352                printk("%s_tables: %s match: only valid for protocol %u\n",
 353                       xt_prefix[par->family], par->match->name,
 354                       par->match->proto);
 355                return -EINVAL;
 356        }
 357        if (par->match->checkentry != NULL && !par->match->checkentry(par))
 358                return -EINVAL;
 359        return 0;
 360}
 361EXPORT_SYMBOL_GPL(xt_check_match);
 362
 363#ifdef CONFIG_COMPAT
 364int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
 365{
 366        struct compat_delta *tmp;
 367
 368        tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
 369        if (!tmp)
 370                return -ENOMEM;
 371
 372        tmp->offset = offset;
 373        tmp->delta = delta;
 374
 375        if (xt[af].compat_offsets) {
 376                tmp->next = xt[af].compat_offsets->next;
 377                xt[af].compat_offsets->next = tmp;
 378        } else {
 379                xt[af].compat_offsets = tmp;
 380                tmp->next = NULL;
 381        }
 382        return 0;
 383}
 384EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 385
 386void xt_compat_flush_offsets(u_int8_t af)
 387{
 388        struct compat_delta *tmp, *next;
 389
 390        if (xt[af].compat_offsets) {
 391                for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
 392                        next = tmp->next;
 393                        kfree(tmp);
 394                }
 395                xt[af].compat_offsets = NULL;
 396        }
 397}
 398EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 399
 400short xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 401{
 402        struct compat_delta *tmp;
 403        short delta;
 404
 405        for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
 406                if (tmp->offset < offset)
 407                        delta += tmp->delta;
 408        return delta;
 409}
 410EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 411
 412int xt_compat_match_offset(const struct xt_match *match)
 413{
 414        u_int16_t csize = match->compatsize ? : match->matchsize;
 415        return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
 416}
 417EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 418
 419int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 420                              unsigned int *size)
 421{
 422        const struct xt_match *match = m->u.kernel.match;
 423        struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
 424        int pad, off = xt_compat_match_offset(match);
 425        u_int16_t msize = cm->u.user.match_size;
 426
 427        m = *dstptr;
 428        memcpy(m, cm, sizeof(*cm));
 429        if (match->compat_from_user)
 430                match->compat_from_user(m->data, cm->data);
 431        else
 432                memcpy(m->data, cm->data, msize - sizeof(*cm));
 433        pad = XT_ALIGN(match->matchsize) - match->matchsize;
 434        if (pad > 0)
 435                memset(m->data + match->matchsize, 0, pad);
 436
 437        msize += off;
 438        m->u.user.match_size = msize;
 439
 440        *size += off;
 441        *dstptr += msize;
 442        return 0;
 443}
 444EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 445
 446int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
 447                            unsigned int *size)
 448{
 449        const struct xt_match *match = m->u.kernel.match;
 450        struct compat_xt_entry_match __user *cm = *dstptr;
 451        int off = xt_compat_match_offset(match);
 452        u_int16_t msize = m->u.user.match_size - off;
 453
 454        if (copy_to_user(cm, m, sizeof(*cm)) ||
 455            put_user(msize, &cm->u.user.match_size) ||
 456            copy_to_user(cm->u.user.name, m->u.kernel.match->name,
 457                         strlen(m->u.kernel.match->name) + 1))
 458                return -EFAULT;
 459
 460        if (match->compat_to_user) {
 461                if (match->compat_to_user((void __user *)cm->data, m->data))
 462                        return -EFAULT;
 463        } else {
 464                if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
 465                        return -EFAULT;
 466        }
 467
 468        *size -= off;
 469        *dstptr += msize;
 470        return 0;
 471}
 472EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
 473#endif /* CONFIG_COMPAT */
 474
 475int xt_check_target(struct xt_tgchk_param *par,
 476                    unsigned int size, u_int8_t proto, bool inv_proto)
 477{
 478        if (XT_ALIGN(par->target->targetsize) != size) {
 479                printk("%s_tables: %s target: invalid size %Zu != %u\n",
 480                       xt_prefix[par->family], par->target->name,
 481                       XT_ALIGN(par->target->targetsize), size);
 482                return -EINVAL;
 483        }
 484        if (par->target->table != NULL &&
 485            strcmp(par->target->table, par->table) != 0) {
 486                printk("%s_tables: %s target: only valid in %s table, not %s\n",
 487                       xt_prefix[par->family], par->target->name,
 488                       par->target->table, par->table);
 489                return -EINVAL;
 490        }
 491        if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
 492                printk("%s_tables: %s target: bad hook_mask %#x/%#x\n",
 493                       xt_prefix[par->family], par->target->name,
 494                       par->hook_mask, par->target->hooks);
 495                return -EINVAL;
 496        }
 497        if (par->target->proto && (par->target->proto != proto || inv_proto)) {
 498                printk("%s_tables: %s target: only valid for protocol %u\n",
 499                       xt_prefix[par->family], par->target->name,
 500                       par->target->proto);
 501                return -EINVAL;
 502        }
 503        if (par->target->checkentry != NULL && !par->target->checkentry(par))
 504                return -EINVAL;
 505        return 0;
 506}
 507EXPORT_SYMBOL_GPL(xt_check_target);
 508
 509#ifdef CONFIG_COMPAT
 510int xt_compat_target_offset(const struct xt_target *target)
 511{
 512        u_int16_t csize = target->compatsize ? : target->targetsize;
 513        return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
 514}
 515EXPORT_SYMBOL_GPL(xt_compat_target_offset);
 516
 517void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 518                                unsigned int *size)
 519{
 520        const struct xt_target *target = t->u.kernel.target;
 521        struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
 522        int pad, off = xt_compat_target_offset(target);
 523        u_int16_t tsize = ct->u.user.target_size;
 524
 525        t = *dstptr;
 526        memcpy(t, ct, sizeof(*ct));
 527        if (target->compat_from_user)
 528                target->compat_from_user(t->data, ct->data);
 529        else
 530                memcpy(t->data, ct->data, tsize - sizeof(*ct));
 531        pad = XT_ALIGN(target->targetsize) - target->targetsize;
 532        if (pad > 0)
 533                memset(t->data + target->targetsize, 0, pad);
 534
 535        tsize += off;
 536        t->u.user.target_size = tsize;
 537
 538        *size += off;
 539        *dstptr += tsize;
 540}
 541EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
 542
 543int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
 544                             unsigned int *size)
 545{
 546        const struct xt_target *target = t->u.kernel.target;
 547        struct compat_xt_entry_target __user *ct = *dstptr;
 548        int off = xt_compat_target_offset(target);
 549        u_int16_t tsize = t->u.user.target_size - off;
 550
 551        if (copy_to_user(ct, t, sizeof(*ct)) ||
 552            put_user(tsize, &ct->u.user.target_size) ||
 553            copy_to_user(ct->u.user.name, t->u.kernel.target->name,
 554                         strlen(t->u.kernel.target->name) + 1))
 555                return -EFAULT;
 556
 557        if (target->compat_to_user) {
 558                if (target->compat_to_user((void __user *)ct->data, t->data))
 559                        return -EFAULT;
 560        } else {
 561                if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
 562                        return -EFAULT;
 563        }
 564
 565        *size -= off;
 566        *dstptr += tsize;
 567        return 0;
 568}
 569EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
 570#endif
 571
 572struct xt_table_info *xt_alloc_table_info(unsigned int size)
 573{
 574        struct xt_table_info *newinfo;
 575        int cpu;
 576
 577        /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
 578        if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
 579                return NULL;
 580
 581        newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
 582        if (!newinfo)
 583                return NULL;
 584
 585        newinfo->size = size;
 586
 587        for_each_possible_cpu(cpu) {
 588                if (size <= PAGE_SIZE)
 589                        newinfo->entries[cpu] = kmalloc_node(size,
 590                                                        GFP_KERNEL,
 591                                                        cpu_to_node(cpu));
 592                else
 593                        newinfo->entries[cpu] = vmalloc_node(size,
 594                                                        cpu_to_node(cpu));
 595
 596                if (newinfo->entries[cpu] == NULL) {
 597                        xt_free_table_info(newinfo);
 598                        return NULL;
 599                }
 600        }
 601
 602        return newinfo;
 603}
 604EXPORT_SYMBOL(xt_alloc_table_info);
 605
 606void xt_free_table_info(struct xt_table_info *info)
 607{
 608        int cpu;
 609
 610        for_each_possible_cpu(cpu) {
 611                if (info->size <= PAGE_SIZE)
 612                        kfree(info->entries[cpu]);
 613                else
 614                        vfree(info->entries[cpu]);
 615        }
 616        kfree(info);
 617}
 618EXPORT_SYMBOL(xt_free_table_info);
 619
 620/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
 621struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 622                                    const char *name)
 623{
 624        struct xt_table *t;
 625
 626        if (mutex_lock_interruptible(&xt[af].mutex) != 0)
 627                return ERR_PTR(-EINTR);
 628
 629        list_for_each_entry(t, &net->xt.tables[af], list)
 630                if (strcmp(t->name, name) == 0 && try_module_get(t->me))
 631                        return t;
 632        mutex_unlock(&xt[af].mutex);
 633        return NULL;
 634}
 635EXPORT_SYMBOL_GPL(xt_find_table_lock);
 636
 637void xt_table_unlock(struct xt_table *table)
 638{
 639        mutex_unlock(&xt[table->af].mutex);
 640}
 641EXPORT_SYMBOL_GPL(xt_table_unlock);
 642
 643#ifdef CONFIG_COMPAT
 644void xt_compat_lock(u_int8_t af)
 645{
 646        mutex_lock(&xt[af].compat_mutex);
 647}
 648EXPORT_SYMBOL_GPL(xt_compat_lock);
 649
 650void xt_compat_unlock(u_int8_t af)
 651{
 652        mutex_unlock(&xt[af].compat_mutex);
 653}
 654EXPORT_SYMBOL_GPL(xt_compat_unlock);
 655#endif
 656
 657struct xt_table_info *
 658xt_replace_table(struct xt_table *table,
 659              unsigned int num_counters,
 660              struct xt_table_info *newinfo,
 661              int *error)
 662{
 663        struct xt_table_info *oldinfo, *private;
 664
 665        /* Do the substitution. */
 666        write_lock_bh(&table->lock);
 667        private = table->private;
 668        /* Check inside lock: is the old number correct? */
 669        if (num_counters != private->number) {
 670                duprintf("num_counters != table->private->number (%u/%u)\n",
 671                         num_counters, private->number);
 672                write_unlock_bh(&table->lock);
 673                *error = -EAGAIN;
 674                return NULL;
 675        }
 676        oldinfo = private;
 677        table->private = newinfo;
 678        newinfo->initial_entries = oldinfo->initial_entries;
 679        write_unlock_bh(&table->lock);
 680
 681        return oldinfo;
 682}
 683EXPORT_SYMBOL_GPL(xt_replace_table);
 684
 685struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
 686                                   struct xt_table_info *bootstrap,
 687                                   struct xt_table_info *newinfo)
 688{
 689        int ret;
 690        struct xt_table_info *private;
 691        struct xt_table *t;
 692
 693        /* Don't add one object to multiple lists. */
 694        table = kmemdup(table, sizeof(struct xt_table), GFP_KERNEL);
 695        if (!table) {
 696                ret = -ENOMEM;
 697                goto out;
 698        }
 699
 700        ret = mutex_lock_interruptible(&xt[table->af].mutex);
 701        if (ret != 0)
 702                goto out_free;
 703
 704        /* Don't autoload: we'd eat our tail... */
 705        list_for_each_entry(t, &net->xt.tables[table->af], list) {
 706                if (strcmp(t->name, table->name) == 0) {
 707                        ret = -EEXIST;
 708                        goto unlock;
 709                }
 710        }
 711
 712        /* Simplifies replace_table code. */
 713        table->private = bootstrap;
 714        rwlock_init(&table->lock);
 715        if (!xt_replace_table(table, 0, newinfo, &ret))
 716                goto unlock;
 717
 718        private = table->private;
 719        duprintf("table->private->number = %u\n", private->number);
 720
 721        /* save number of initial entries */
 722        private->initial_entries = private->number;
 723
 724        list_add(&table->list, &net->xt.tables[table->af]);
 725        mutex_unlock(&xt[table->af].mutex);
 726        return table;
 727
 728 unlock:
 729        mutex_unlock(&xt[table->af].mutex);
 730out_free:
 731        kfree(table);
 732out:
 733        return ERR_PTR(ret);
 734}
 735EXPORT_SYMBOL_GPL(xt_register_table);
 736
 737void *xt_unregister_table(struct xt_table *table)
 738{
 739        struct xt_table_info *private;
 740
 741        mutex_lock(&xt[table->af].mutex);
 742        private = table->private;
 743        list_del(&table->list);
 744        mutex_unlock(&xt[table->af].mutex);
 745        kfree(table);
 746
 747        return private;
 748}
 749EXPORT_SYMBOL_GPL(xt_unregister_table);
 750
 751#ifdef CONFIG_PROC_FS
 752struct xt_names_priv {
 753        struct seq_net_private p;
 754        u_int8_t af;
 755};
 756static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
 757{
 758        struct xt_names_priv *priv = seq->private;
 759        struct net *net = seq_file_net(seq);
 760        u_int8_t af = priv->af;
 761
 762        mutex_lock(&xt[af].mutex);
 763        return seq_list_start(&net->xt.tables[af], *pos);
 764}
 765
 766static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 767{
 768        struct xt_names_priv *priv = seq->private;
 769        struct net *net = seq_file_net(seq);
 770        u_int8_t af = priv->af;
 771
 772        return seq_list_next(v, &net->xt.tables[af], pos);
 773}
 774
 775static void xt_table_seq_stop(struct seq_file *seq, void *v)
 776{
 777        struct xt_names_priv *priv = seq->private;
 778        u_int8_t af = priv->af;
 779
 780        mutex_unlock(&xt[af].mutex);
 781}
 782
 783static int xt_table_seq_show(struct seq_file *seq, void *v)
 784{
 785        struct xt_table *table = list_entry(v, struct xt_table, list);
 786
 787        if (strlen(table->name))
 788                return seq_printf(seq, "%s\n", table->name);
 789        else
 790                return 0;
 791}
 792
 793static const struct seq_operations xt_table_seq_ops = {
 794        .start        = xt_table_seq_start,
 795        .next        = xt_table_seq_next,
 796        .stop        = xt_table_seq_stop,
 797        .show        = xt_table_seq_show,
 798};
 799
 800static int xt_table_open(struct inode *inode, struct file *file)
 801{
 802        int ret;
 803        struct xt_names_priv *priv;
 804
 805        ret = seq_open_net(inode, file, &xt_table_seq_ops,
 806                           sizeof(struct xt_names_priv));
 807        if (!ret) {
 808                priv = ((struct seq_file *)file->private_data)->private;
 809                priv->af = (unsigned long)PDE(inode)->data;
 810        }
 811        return ret;
 812}
 813
 814static const struct file_operations xt_table_ops = {
 815        .owner         = THIS_MODULE,
 816        .open         = xt_table_open,
 817        .read         = seq_read,
 818        .llseek         = seq_lseek,
 819        .release = seq_release_net,
 820};
 821
 822static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
 823{
 824        struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
 825        u_int16_t af = (unsigned long)pde->data;
 826
 827        mutex_lock(&xt[af].mutex);
 828        return seq_list_start(&xt[af].match, *pos);
 829}
 830
 831static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 832{
 833        struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
 834        u_int16_t af = (unsigned long)pde->data;
 835
 836        return seq_list_next(v, &xt[af].match, pos);
 837}
 838
 839static void xt_match_seq_stop(struct seq_file *seq, void *v)
 840{
 841        struct proc_dir_entry *pde = seq->private;
 842        u_int16_t af = (unsigned long)pde->data;
 843
 844        mutex_unlock(&xt[af].mutex);
 845}
 846
 847static int xt_match_seq_show(struct seq_file *seq, void *v)
 848{
 849        struct xt_match *match = list_entry(v, struct xt_match, list);
 850
 851        if (strlen(match->name))
 852                return seq_printf(seq, "%s\n", match->name);
 853        else
 854                return 0;
 855}
 856
 857static const struct seq_operations xt_match_seq_ops = {
 858        .start        = xt_match_seq_start,
 859        .next        = xt_match_seq_next,
 860        .stop        = xt_match_seq_stop,
 861        .show        = xt_match_seq_show,
 862};
 863
 864static int xt_match_open(struct inode *inode, struct file *file)
 865{
 866        int ret;
 867
 868        ret = seq_open(file, &xt_match_seq_ops);
 869        if (!ret) {
 870                struct seq_file *seq = file->private_data;
 871
 872                seq->private = PDE(inode);
 873        }
 874        return ret;
 875}
 876
 877static const struct file_operations xt_match_ops = {
 878        .owner         = THIS_MODULE,
 879        .open         = xt_match_open,
 880        .read         = seq_read,
 881        .llseek         = seq_lseek,
 882        .release = seq_release,
 883};
 884
 885static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
 886{
 887        struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
 888        u_int16_t af = (unsigned long)pde->data;
 889
 890        mutex_lock(&xt[af].mutex);
 891        return seq_list_start(&xt[af].target, *pos);
 892}
 893
 894static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 895{
 896        struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
 897        u_int16_t af = (unsigned long)pde->data;
 898
 899        return seq_list_next(v, &xt[af].target, pos);
 900}
 901
 902static void xt_target_seq_stop(struct seq_file *seq, void *v)
 903{
 904        struct proc_dir_entry *pde = seq->private;
 905        u_int16_t af = (unsigned long)pde->data;
 906
 907        mutex_unlock(&xt[af].mutex);
 908}
 909
 910static int xt_target_seq_show(struct seq_file *seq, void *v)
 911{
 912        struct xt_target *target = list_entry(v, struct xt_target, list);
 913
 914        if (strlen(target->name))
 915                return seq_printf(seq, "%s\n", target->name);
 916        else
 917                return 0;
 918}
 919
 920static const struct seq_operations xt_target_seq_ops = {
 921        .start        = xt_target_seq_start,
 922        .next        = xt_target_seq_next,
 923        .stop        = xt_target_seq_stop,
 924        .show        = xt_target_seq_show,
 925};
 926
 927static int xt_target_open(struct inode *inode, struct file *file)
 928{
 929        int ret;
 930
 931        ret = seq_open(file, &xt_target_seq_ops);
 932        if (!ret) {
 933                struct seq_file *seq = file->private_data;
 934
 935                seq->private = PDE(inode);
 936        }
 937        return ret;
 938}
 939
 940static const struct file_operations xt_target_ops = {
 941        .owner         = THIS_MODULE,
 942        .open         = xt_target_open,
 943        .read         = seq_read,
 944        .llseek         = seq_lseek,
 945        .release = seq_release,
 946};
 947
 948#define FORMAT_TABLES        "_tables_names"
 949#define        FORMAT_MATCHES        "_tables_matches"
 950#define FORMAT_TARGETS         "_tables_targets"
 951
 952#endif /* CONFIG_PROC_FS */
 953
 954int xt_proto_init(struct net *net, u_int8_t af)
 955{
 956#ifdef CONFIG_PROC_FS
 957        char buf[XT_FUNCTION_MAXNAMELEN];
 958        struct proc_dir_entry *proc;
 959#endif
 960
 961        if (af >= ARRAY_SIZE(xt_prefix))
 962                return -EINVAL;
 963
 964
 965#ifdef CONFIG_PROC_FS
 966        strlcpy(buf, xt_prefix[af], sizeof(buf));
 967        strlcat(buf, FORMAT_TABLES, sizeof(buf));
 968        proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
 969                                (void *)(unsigned long)af);
 970        if (!proc)
 971                goto out;
 972
 973        strlcpy(buf, xt_prefix[af], sizeof(buf));
 974        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
 975        proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
 976                                (void *)(unsigned long)af);
 977        if (!proc)
 978                goto out_remove_tables;
 979
 980        strlcpy(buf, xt_prefix[af], sizeof(buf));
 981        strlcat(buf, FORMAT_TARGETS, sizeof(buf));
 982        proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
 983                                (void *)(unsigned long)af);
 984        if (!proc)
 985                goto out_remove_matches;
 986#endif
 987
 988        return 0;
 989
 990#ifdef CONFIG_PROC_FS
 991out_remove_matches:
 992        strlcpy(buf, xt_prefix[af], sizeof(buf));
 993        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
 994        proc_net_remove(net, buf);
 995
 996out_remove_tables:
 997        strlcpy(buf, xt_prefix[af], sizeof(buf));
 998        strlcat(buf, FORMAT_TABLES, sizeof(buf));
 999        proc_net_remove(net, buf);
1000out:
1001        return -1;
1002#endif
1003}
1004EXPORT_SYMBOL_GPL(xt_proto_init);
1005
1006void xt_proto_fini(struct net *net, u_int8_t af)
1007{
1008#ifdef CONFIG_PROC_FS
1009        char buf[XT_FUNCTION_MAXNAMELEN];
1010
1011        strlcpy(buf, xt_prefix[af], sizeof(buf));
1012        strlcat(buf, FORMAT_TABLES, sizeof(buf));
1013        proc_net_remove(net, buf);
1014
1015        strlcpy(buf, xt_prefix[af], sizeof(buf));
1016        strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1017        proc_net_remove(net, buf);
1018
1019        strlcpy(buf, xt_prefix[af], sizeof(buf));
1020        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1021        proc_net_remove(net, buf);
1022#endif /*CONFIG_PROC_FS*/
1023}
1024EXPORT_SYMBOL_GPL(xt_proto_fini);
1025
1026static int __net_init xt_net_init(struct net *net)
1027{
1028        int i;
1029
1030        for (i = 0; i < NFPROTO_NUMPROTO; i++)
1031                INIT_LIST_HEAD(&net->xt.tables[i]);
1032        return 0;
1033}
1034
1035static struct pernet_operations xt_net_ops = {
1036        .init = xt_net_init,
1037};
1038
1039static int __init xt_init(void)
1040{
1041        int i, rv;
1042
1043        xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1044        if (!xt)
1045                return -ENOMEM;
1046
1047        for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1048                mutex_init(&xt[i].mutex);
1049#ifdef CONFIG_COMPAT
1050                mutex_init(&xt[i].compat_mutex);
1051                xt[i].compat_offsets = NULL;
1052#endif
1053                INIT_LIST_HEAD(&xt[i].target);
1054                INIT_LIST_HEAD(&xt[i].match);
1055        }
1056        rv = register_pernet_subsys(&xt_net_ops);
1057        if (rv < 0)
1058                kfree(xt);
1059        return rv;
1060}
1061
1062static void __exit xt_fini(void)
1063{
1064        unregister_pernet_subsys(&xt_net_ops);
1065        kfree(xt);
1066}
1067
1068module_init(xt_init);
1069module_exit(xt_fini);
1070