Showing error 1485

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
Line in file: 73
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-21 20:30:05 UTC


Source:

  1/* ip_conntrack proc compat - based on ip_conntrack_standalone.c
  2 *
  3 * (C) 1999-2001 Paul `Rusty' Russell
  4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/types.h>
 11#include <linux/proc_fs.h>
 12#include <linux/seq_file.h>
 13#include <linux/percpu.h>
 14#include <net/net_namespace.h>
 15
 16#include <linux/netfilter.h>
 17#include <net/netfilter/nf_conntrack_core.h>
 18#include <net/netfilter/nf_conntrack_l3proto.h>
 19#include <net/netfilter/nf_conntrack_l4proto.h>
 20#include <net/netfilter/nf_conntrack_expect.h>
 21#include <net/netfilter/nf_conntrack_acct.h>
 22
 23struct ct_iter_state {
 24        struct seq_net_private p;
 25        unsigned int bucket;
 26};
 27
 28static struct hlist_node *ct_get_first(struct seq_file *seq)
 29{
 30        struct net *net = seq_file_net(seq);
 31        struct ct_iter_state *st = seq->private;
 32        struct hlist_node *n;
 33
 34        for (st->bucket = 0;
 35             st->bucket < nf_conntrack_htable_size;
 36             st->bucket++) {
 37                n = rcu_dereference(net->ct.hash[st->bucket].first);
 38                if (n)
 39                        return n;
 40        }
 41        return NULL;
 42}
 43
 44static struct hlist_node *ct_get_next(struct seq_file *seq,
 45                                      struct hlist_node *head)
 46{
 47        struct net *net = seq_file_net(seq);
 48        struct ct_iter_state *st = seq->private;
 49
 50        head = rcu_dereference(head->next);
 51        while (head == NULL) {
 52                if (++st->bucket >= nf_conntrack_htable_size)
 53                        return NULL;
 54                head = rcu_dereference(net->ct.hash[st->bucket].first);
 55        }
 56        return head;
 57}
 58
 59static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
 60{
 61        struct hlist_node *head = ct_get_first(seq);
 62
 63        if (head)
 64                while (pos && (head = ct_get_next(seq, head)))
 65                        pos--;
 66        return pos ? NULL : head;
 67}
 68
 69static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
 70        __acquires(RCU)
 71{
 72        rcu_read_lock();
 73        return ct_get_idx(seq, *pos);
 74}
 75
 76static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
 77{
 78        (*pos)++;
 79        return ct_get_next(s, v);
 80}
 81
 82static void ct_seq_stop(struct seq_file *s, void *v)
 83        __releases(RCU)
 84{
 85        rcu_read_unlock();
 86}
 87
 88static int ct_seq_show(struct seq_file *s, void *v)
 89{
 90        const struct nf_conntrack_tuple_hash *hash = v;
 91        const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
 92        const struct nf_conntrack_l3proto *l3proto;
 93        const struct nf_conntrack_l4proto *l4proto;
 94
 95        NF_CT_ASSERT(ct);
 96
 97        /* we only want to print DIR_ORIGINAL */
 98        if (NF_CT_DIRECTION(hash))
 99                return 0;
100        if (nf_ct_l3num(ct) != AF_INET)
101                return 0;
102
103        l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
104        NF_CT_ASSERT(l3proto);
105        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
106        NF_CT_ASSERT(l4proto);
107
108        if (seq_printf(s, "%-8s %u %ld ",
109                      l4proto->name, nf_ct_protonum(ct),
110                      timer_pending(&ct->timeout)
111                      ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
112                return -ENOSPC;
113
114        if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
115                return -ENOSPC;
116
117        if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
118                        l3proto, l4proto))
119                return -ENOSPC;
120
121        if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
122                return -ENOSPC;
123
124        if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
125                if (seq_printf(s, "[UNREPLIED] "))
126                        return -ENOSPC;
127
128        if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
129                        l3proto, l4proto))
130                return -ENOSPC;
131
132        if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
133                return -ENOSPC;
134
135        if (test_bit(IPS_ASSURED_BIT, &ct->status))
136                if (seq_printf(s, "[ASSURED] "))
137                        return -ENOSPC;
138
139#ifdef CONFIG_NF_CONNTRACK_MARK
140        if (seq_printf(s, "mark=%u ", ct->mark))
141                return -ENOSPC;
142#endif
143
144#ifdef CONFIG_NF_CONNTRACK_SECMARK
145        if (seq_printf(s, "secmark=%u ", ct->secmark))
146                return -ENOSPC;
147#endif
148
149        if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
150                return -ENOSPC;
151
152        return 0;
153}
154
155static const struct seq_operations ct_seq_ops = {
156        .start = ct_seq_start,
157        .next  = ct_seq_next,
158        .stop  = ct_seq_stop,
159        .show  = ct_seq_show
160};
161
162static int ct_open(struct inode *inode, struct file *file)
163{
164        return seq_open_net(inode, file, &ct_seq_ops,
165                            sizeof(struct ct_iter_state));
166}
167
168static const struct file_operations ct_file_ops = {
169        .owner   = THIS_MODULE,
170        .open    = ct_open,
171        .read    = seq_read,
172        .llseek  = seq_lseek,
173        .release = seq_release_net,
174};
175
176/* expects */
177struct ct_expect_iter_state {
178        struct seq_net_private p;
179        unsigned int bucket;
180};
181
182static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
183{
184        struct net *net = seq_file_net(seq);
185        struct ct_expect_iter_state *st = seq->private;
186        struct hlist_node *n;
187
188        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
189                n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
190                if (n)
191                        return n;
192        }
193        return NULL;
194}
195
196static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
197                                             struct hlist_node *head)
198{
199        struct net *net = seq_file_net(seq);
200        struct ct_expect_iter_state *st = seq->private;
201
202        head = rcu_dereference(head->next);
203        while (head == NULL) {
204                if (++st->bucket >= nf_ct_expect_hsize)
205                        return NULL;
206                head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
207        }
208        return head;
209}
210
211static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
212{
213        struct hlist_node *head = ct_expect_get_first(seq);
214
215        if (head)
216                while (pos && (head = ct_expect_get_next(seq, head)))
217                        pos--;
218        return pos ? NULL : head;
219}
220
221static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
222        __acquires(RCU)
223{
224        rcu_read_lock();
225        return ct_expect_get_idx(seq, *pos);
226}
227
228static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
229{
230        (*pos)++;
231        return ct_expect_get_next(seq, v);
232}
233
234static void exp_seq_stop(struct seq_file *seq, void *v)
235        __releases(RCU)
236{
237        rcu_read_unlock();
238}
239
240static int exp_seq_show(struct seq_file *s, void *v)
241{
242        struct nf_conntrack_expect *exp;
243        const struct hlist_node *n = v;
244
245        exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
246
247        if (exp->tuple.src.l3num != AF_INET)
248                return 0;
249
250        if (exp->timeout.function)
251                seq_printf(s, "%ld ", timer_pending(&exp->timeout)
252                           ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
253        else
254                seq_printf(s, "- ");
255
256        seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
257
258        print_tuple(s, &exp->tuple,
259                    __nf_ct_l3proto_find(exp->tuple.src.l3num),
260                    __nf_ct_l4proto_find(exp->tuple.src.l3num,
261                                         exp->tuple.dst.protonum));
262        return seq_putc(s, '\n');
263}
264
265static const struct seq_operations exp_seq_ops = {
266        .start = exp_seq_start,
267        .next = exp_seq_next,
268        .stop = exp_seq_stop,
269        .show = exp_seq_show
270};
271
272static int exp_open(struct inode *inode, struct file *file)
273{
274        return seq_open_net(inode, file, &exp_seq_ops,
275                            sizeof(struct ct_expect_iter_state));
276}
277
278static const struct file_operations ip_exp_file_ops = {
279        .owner   = THIS_MODULE,
280        .open    = exp_open,
281        .read    = seq_read,
282        .llseek  = seq_lseek,
283        .release = seq_release_net,
284};
285
286static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
287{
288        struct net *net = seq_file_net(seq);
289        int cpu;
290
291        if (*pos == 0)
292                return SEQ_START_TOKEN;
293
294        for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
295                if (!cpu_possible(cpu))
296                        continue;
297                *pos = cpu+1;
298                return per_cpu_ptr(net->ct.stat, cpu);
299        }
300
301        return NULL;
302}
303
304static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
305{
306        struct net *net = seq_file_net(seq);
307        int cpu;
308
309        for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
310                if (!cpu_possible(cpu))
311                        continue;
312                *pos = cpu+1;
313                return per_cpu_ptr(net->ct.stat, cpu);
314        }
315
316        return NULL;
317}
318
319static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
320{
321}
322
323static int ct_cpu_seq_show(struct seq_file *seq, void *v)
324{
325        struct net *net = seq_file_net(seq);
326        unsigned int nr_conntracks = atomic_read(&net->ct.count);
327        const struct ip_conntrack_stat *st = v;
328
329        if (v == SEQ_START_TOKEN) {
330                seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete\n");
331                return 0;
332        }
333
334        seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
335                        "%08x %08x %08x %08x %08x  %08x %08x %08x \n",
336                   nr_conntracks,
337                   st->searched,
338                   st->found,
339                   st->new,
340                   st->invalid,
341                   st->ignore,
342                   st->delete,
343                   st->delete_list,
344                   st->insert,
345                   st->insert_failed,
346                   st->drop,
347                   st->early_drop,
348                   st->error,
349
350                   st->expect_new,
351                   st->expect_create,
352                   st->expect_delete
353                );
354        return 0;
355}
356
357static const struct seq_operations ct_cpu_seq_ops = {
358        .start  = ct_cpu_seq_start,
359        .next   = ct_cpu_seq_next,
360        .stop   = ct_cpu_seq_stop,
361        .show   = ct_cpu_seq_show,
362};
363
364static int ct_cpu_seq_open(struct inode *inode, struct file *file)
365{
366        return seq_open_net(inode, file, &ct_cpu_seq_ops,
367                            sizeof(struct seq_net_private));
368}
369
370static const struct file_operations ct_cpu_seq_fops = {
371        .owner   = THIS_MODULE,
372        .open    = ct_cpu_seq_open,
373        .read    = seq_read,
374        .llseek  = seq_lseek,
375        .release = seq_release_net,
376};
377
378static int __net_init ip_conntrack_net_init(struct net *net)
379{
380        struct proc_dir_entry *proc, *proc_exp, *proc_stat;
381
382        proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops);
383        if (!proc)
384                goto err1;
385
386        proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440,
387                                        &ip_exp_file_ops);
388        if (!proc_exp)
389                goto err2;
390
391        proc_stat = proc_create("ip_conntrack", S_IRUGO,
392                                net->proc_net_stat, &ct_cpu_seq_fops);
393        if (!proc_stat)
394                goto err3;
395        return 0;
396
397err3:
398        proc_net_remove(net, "ip_conntrack_expect");
399err2:
400        proc_net_remove(net, "ip_conntrack");
401err1:
402        return -ENOMEM;
403}
404
405static void __net_exit ip_conntrack_net_exit(struct net *net)
406{
407        remove_proc_entry("ip_conntrack", net->proc_net_stat);
408        proc_net_remove(net, "ip_conntrack_expect");
409        proc_net_remove(net, "ip_conntrack");
410}
411
412static struct pernet_operations ip_conntrack_net_ops = {
413        .init = ip_conntrack_net_init,
414        .exit = ip_conntrack_net_exit,
415};
416
417int __init nf_conntrack_ipv4_compat_init(void)
418{
419        return register_pernet_subsys(&ip_conntrack_net_ops);
420}
421
422void __exit nf_conntrack_ipv4_compat_fini(void)
423{
424        unregister_pernet_subsys(&ip_conntrack_net_ops);
425}