1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
26#include <linux/capability.h>
27#include <linux/freezer.h>
28#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h>
30#include <trace/sched.h>
31
32#include <asm/param.h>
33#include <asm/uaccess.h>
34#include <asm/unistd.h>
35#include <asm/siginfo.h>
36#include "audit.h"
37
38
39
40
41
42static struct kmem_cache *sigqueue_cachep;
43
44static void __user *sig_handler(struct task_struct *t, int sig)
45{
46 return t->sighand->action[sig - 1].sa.sa_handler;
47}
48
49static int sig_handler_ignored(void __user *handler, int sig)
50{
51
52 return handler == SIG_IGN ||
53 (handler == SIG_DFL && sig_kernel_ignore(sig));
54}
55
56static int sig_ignored(struct task_struct *t, int sig)
57{
58 void __user *handler;
59
60
61
62
63
64
65 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
66 return 0;
67
68 handler = sig_handler(t, sig);
69 if (!sig_handler_ignored(handler, sig))
70 return 0;
71
72
73
74
75 return !tracehook_consider_ignored_signal(t, sig, handler);
76}
77
78
79
80
81
82static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
83{
84 unsigned long ready;
85 long i;
86
87 switch (_NSIG_WORDS) {
88 default:
89 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
90 ready |= signal->sig[i] &~ blocked->sig[i];
91 break;
92
93 case 4: ready = signal->sig[3] &~ blocked->sig[3];
94 ready |= signal->sig[2] &~ blocked->sig[2];
95 ready |= signal->sig[1] &~ blocked->sig[1];
96 ready |= signal->sig[0] &~ blocked->sig[0];
97 break;
98
99 case 2: ready = signal->sig[1] &~ blocked->sig[1];
100 ready |= signal->sig[0] &~ blocked->sig[0];
101 break;
102
103 case 1: ready = signal->sig[0] &~ blocked->sig[0];
104 }
105 return ready != 0;
106}
107
108#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109
110static int recalc_sigpending_tsk(struct task_struct *t)
111{
112 if (t->signal->group_stop_count > 0 ||
113 PENDING(&t->pending, &t->blocked) ||
114 PENDING(&t->signal->shared_pending, &t->blocked)) {
115 set_tsk_thread_flag(t, TIF_SIGPENDING);
116 return 1;
117 }
118
119
120
121
122
123 return 0;
124}
125
126
127
128
129
130void recalc_sigpending_and_wake(struct task_struct *t)
131{
132 if (recalc_sigpending_tsk(t))
133 signal_wake_up(t, 0);
134}
135
136void recalc_sigpending(void)
137{
138 if (unlikely(tracehook_force_sigpending()))
139 set_thread_flag(TIF_SIGPENDING);
140 else if (!recalc_sigpending_tsk(current) && !freezing(current))
141 clear_thread_flag(TIF_SIGPENDING);
142
143}
144
145
146
147int next_signal(struct sigpending *pending, sigset_t *mask)
148{
149 unsigned long i, *s, *m, x;
150 int sig = 0;
151
152 s = pending->signal.sig;
153 m = mask->sig;
154 switch (_NSIG_WORDS) {
155 default:
156 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
157 if ((x = *s &~ *m) != 0) {
158 sig = ffz(~x) + i*_NSIG_BPW + 1;
159 break;
160 }
161 break;
162
163 case 2: if ((x = s[0] &~ m[0]) != 0)
164 sig = 1;
165 else if ((x = s[1] &~ m[1]) != 0)
166 sig = _NSIG_BPW + 1;
167 else
168 break;
169 sig += ffz(~x);
170 break;
171
172 case 1: if ((x = *s &~ *m) != 0)
173 sig = ffz(~x) + 1;
174 break;
175 }
176
177 return sig;
178}
179
180static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
181 int override_rlimit)
182{
183 struct sigqueue *q = NULL;
184 struct user_struct *user;
185
186
187
188
189
190 user = t->user;
191 barrier();
192 atomic_inc(&user->sigpending);
193 if (override_rlimit ||
194 atomic_read(&user->sigpending) <=
195 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
196 q = kmem_cache_alloc(sigqueue_cachep, flags);
197 if (unlikely(q == NULL)) {
198 atomic_dec(&user->sigpending);
199 } else {
200 INIT_LIST_HEAD(&q->list);
201 q->flags = 0;
202 q->user = get_uid(user);
203 }
204 return(q);
205}
206
207static void __sigqueue_free(struct sigqueue *q)
208{
209 if (q->flags & SIGQUEUE_PREALLOC)
210 return;
211 atomic_dec(&q->user->sigpending);
212 free_uid(q->user);
213 kmem_cache_free(sigqueue_cachep, q);
214}
215
216void flush_sigqueue(struct sigpending *queue)
217{
218 struct sigqueue *q;
219
220 sigemptyset(&queue->signal);
221 while (!list_empty(&queue->list)) {
222 q = list_entry(queue->list.next, struct sigqueue , list);
223 list_del_init(&q->list);
224 __sigqueue_free(q);
225 }
226}
227
228
229
230
231void flush_signals(struct task_struct *t)
232{
233 unsigned long flags;
234
235 spin_lock_irqsave(&t->sighand->siglock, flags);
236 clear_tsk_thread_flag(t, TIF_SIGPENDING);
237 flush_sigqueue(&t->pending);
238 flush_sigqueue(&t->signal->shared_pending);
239 spin_unlock_irqrestore(&t->sighand->siglock, flags);
240}
241
242static void __flush_itimer_signals(struct sigpending *pending)
243{
244 sigset_t signal, retain;
245 struct sigqueue *q, *n;
246
247 signal = pending->signal;
248 sigemptyset(&retain);
249
250 list_for_each_entry_safe(q, n, &pending->list, list) {
251 int sig = q->info.si_signo;
252
253 if (likely(q->info.si_code != SI_TIMER)) {
254 sigaddset(&retain, sig);
255 } else {
256 sigdelset(&signal, sig);
257 list_del_init(&q->list);
258 __sigqueue_free(q);
259 }
260 }
261
262 sigorsets(&pending->signal, &signal, &retain);
263}
264
265void flush_itimer_signals(void)
266{
267 struct task_struct *tsk = current;
268 unsigned long flags;
269
270 spin_lock_irqsave(&tsk->sighand->siglock, flags);
271 __flush_itimer_signals(&tsk->pending);
272 __flush_itimer_signals(&tsk->signal->shared_pending);
273 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
274}
275
276void ignore_signals(struct task_struct *t)
277{
278 int i;
279
280 for (i = 0; i < _NSIG; ++i)
281 t->sighand->action[i].sa.sa_handler = SIG_IGN;
282
283 flush_signals(t);
284}
285
286
287
288
289
290void
291flush_signal_handlers(struct task_struct *t, int force_default)
292{
293 int i;
294 struct k_sigaction *ka = &t->sighand->action[0];
295 for (i = _NSIG ; i != 0 ; i--) {
296 if (force_default || ka->sa.sa_handler != SIG_IGN)
297 ka->sa.sa_handler = SIG_DFL;
298 ka->sa.sa_flags = 0;
299 sigemptyset(&ka->sa.sa_mask);
300 ka++;
301 }
302}
303
304int unhandled_signal(struct task_struct *tsk, int sig)
305{
306 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
307 if (is_global_init(tsk))
308 return 1;
309 if (handler != SIG_IGN && handler != SIG_DFL)
310 return 0;
311 return !tracehook_consider_fatal_signal(tsk, sig, handler);
312}
313
314
315
316
317
318
319
320
321
322
323void
324block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
325{
326 unsigned long flags;
327
328 spin_lock_irqsave(¤t->sighand->siglock, flags);
329 current->notifier_mask = mask;
330 current->notifier_data = priv;
331 current->notifier = notifier;
332 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
333}
334
335
336
337void
338unblock_all_signals(void)
339{
340 unsigned long flags;
341
342 spin_lock_irqsave(¤t->sighand->siglock, flags);
343 current->notifier = NULL;
344 current->notifier_data = NULL;
345 recalc_sigpending();
346 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
347}
348
349static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
350{
351 struct sigqueue *q, *first = NULL;
352
353
354
355
356
357 list_for_each_entry(q, &list->list, list) {
358 if (q->info.si_signo == sig) {
359 if (first)
360 goto still_pending;
361 first = q;
362 }
363 }
364
365 sigdelset(&list->signal, sig);
366
367 if (first) {
368still_pending:
369 list_del_init(&first->list);
370 copy_siginfo(info, &first->info);
371 __sigqueue_free(first);
372 } else {
373
374
375
376
377 info->si_signo = sig;
378 info->si_errno = 0;
379 info->si_code = 0;
380 info->si_pid = 0;
381 info->si_uid = 0;
382 }
383}
384
385static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
386 siginfo_t *info)
387{
388 int sig = next_signal(pending, mask);
389
390 if (sig) {
391 if (current->notifier) {
392 if (sigismember(current->notifier_mask, sig)) {
393 if (!(current->notifier)(current->notifier_data)) {
394 clear_thread_flag(TIF_SIGPENDING);
395 return 0;
396 }
397 }
398 }
399
400 collect_signal(sig, pending, info);
401 }
402
403 return sig;
404}
405
406
407
408
409
410
411
412int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
413{
414 int signr;
415
416
417
418
419 signr = __dequeue_signal(&tsk->pending, mask, info);
420 if (!signr) {
421 signr = __dequeue_signal(&tsk->signal->shared_pending,
422 mask, info);
423
424
425
426
427
428
429
430
431
432
433
434
435
436 if (unlikely(signr == SIGALRM)) {
437 struct hrtimer *tmr = &tsk->signal->real_timer;
438
439 if (!hrtimer_is_queued(tmr) &&
440 tsk->signal->it_real_incr.tv64 != 0) {
441 hrtimer_forward(tmr, tmr->base->get_time(),
442 tsk->signal->it_real_incr);
443 hrtimer_restart(tmr);
444 }
445 }
446 }
447
448 recalc_sigpending();
449 if (!signr)
450 return 0;
451
452 if (unlikely(sig_kernel_stop(signr))) {
453
454
455
456
457
458
459
460
461
462
463
464
465 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 }
467 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
468
469
470
471
472
473
474 spin_unlock(&tsk->sighand->siglock);
475 do_schedule_next_timer(info);
476 spin_lock(&tsk->sighand->siglock);
477 }
478 return signr;
479}
480
481
482
483
484
485
486
487
488
489
490
491
492void signal_wake_up(struct task_struct *t, int resume)
493{
494 unsigned int mask;
495
496 set_tsk_thread_flag(t, TIF_SIGPENDING);
497
498
499
500
501
502
503
504
505 mask = TASK_INTERRUPTIBLE;
506 if (resume)
507 mask |= TASK_WAKEKILL;
508 if (!wake_up_state(t, mask))
509 kick_process(t);
510}
511
512
513
514
515
516
517
518
519
520
521static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
522{
523 struct sigqueue *q, *n;
524 sigset_t m;
525
526 sigandsets(&m, mask, &s->signal);
527 if (sigisemptyset(&m))
528 return 0;
529
530 signandsets(&s->signal, &s->signal, mask);
531 list_for_each_entry_safe(q, n, &s->list, list) {
532 if (sigismember(mask, q->info.si_signo)) {
533 list_del_init(&q->list);
534 __sigqueue_free(q);
535 }
536 }
537 return 1;
538}
539
540
541
542
543
544
545static int rm_from_queue(unsigned long mask, struct sigpending *s)
546{
547 struct sigqueue *q, *n;
548
549 if (!sigtestsetmask(&s->signal, mask))
550 return 0;
551
552 sigdelsetmask(&s->signal, mask);
553 list_for_each_entry_safe(q, n, &s->list, list) {
554 if (q->info.si_signo < SIGRTMIN &&
555 (mask & sigmask(q->info.si_signo))) {
556 list_del_init(&q->list);
557 __sigqueue_free(q);
558 }
559 }
560 return 1;
561}
562
563
564
565
566static int check_kill_permission(int sig, struct siginfo *info,
567 struct task_struct *t)
568{
569 struct pid *sid;
570 int error;
571
572 if (!valid_signal(sig))
573 return -EINVAL;
574
575 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
576 return 0;
577
578 error = audit_signal_info(sig, t);
579 if (error)
580 return error;
581
582 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
583 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
584 !capable(CAP_KILL)) {
585 switch (sig) {
586 case SIGCONT:
587 sid = task_session(t);
588
589
590
591
592 if (!sid || sid == task_session(current))
593 break;
594 default:
595 return -EPERM;
596 }
597 }
598
599 return security_task_kill(t, info, sig, 0);
600}
601
602
603
604
605
606
607
608
609
610
611
612static int prepare_signal(int sig, struct task_struct *p)
613{
614 struct signal_struct *signal = p->signal;
615 struct task_struct *t;
616
617 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
618
619
620
621 } else if (sig_kernel_stop(sig)) {
622
623
624
625 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
626 t = p;
627 do {
628 rm_from_queue(sigmask(SIGCONT), &t->pending);
629 } while_each_thread(p, t);
630 } else if (sig == SIGCONT) {
631 unsigned int why;
632
633
634
635
636 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
637 t = p;
638 do {
639 unsigned int state;
640 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655 state = __TASK_STOPPED;
656 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
657 set_tsk_thread_flag(t, TIF_SIGPENDING);
658 state |= TASK_INTERRUPTIBLE;
659 }
660 wake_up_state(t, state);
661 } while_each_thread(p, t);
662
663
664
665
666
667
668
669
670
671 why = 0;
672 if (signal->flags & SIGNAL_STOP_STOPPED)
673 why |= SIGNAL_CLD_CONTINUED;
674 else if (signal->group_stop_count)
675 why |= SIGNAL_CLD_STOPPED;
676
677 if (why) {
678
679
680
681
682
683 signal->flags = why | SIGNAL_STOP_CONTINUED;
684 signal->group_stop_count = 0;
685 signal->group_exit_code = 0;
686 } else {
687
688
689
690
691
692 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
693 }
694 }
695
696 return !sig_ignored(p, sig);
697}
698
699
700
701
702
703
704
705
706
707static inline int wants_signal(int sig, struct task_struct *p)
708{
709 if (sigismember(&p->blocked, sig))
710 return 0;
711 if (p->flags & PF_EXITING)
712 return 0;
713 if (sig == SIGKILL)
714 return 1;
715 if (task_is_stopped_or_traced(p))
716 return 0;
717 return task_curr(p) || !signal_pending(p);
718}
719
720static void complete_signal(int sig, struct task_struct *p, int group)
721{
722 struct signal_struct *signal = p->signal;
723 struct task_struct *t;
724
725
726
727
728
729
730
731 if (wants_signal(sig, p))
732 t = p;
733 else if (!group || thread_group_empty(p))
734
735
736
737
738 return;
739 else {
740
741
742
743 t = signal->curr_target;
744 while (!wants_signal(sig, t)) {
745 t = next_thread(t);
746 if (t == signal->curr_target)
747
748
749
750
751
752 return;
753 }
754 signal->curr_target = t;
755 }
756
757
758
759
760
761 if (sig_fatal(p, sig) &&
762 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
763 !sigismember(&t->real_blocked, sig) &&
764 (sig == SIGKILL ||
765 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
766
767
768
769 if (!sig_kernel_coredump(sig)) {
770
771
772
773
774
775
776 signal->flags = SIGNAL_GROUP_EXIT;
777 signal->group_exit_code = sig;
778 signal->group_stop_count = 0;
779 t = p;
780 do {
781 sigaddset(&t->pending.signal, SIGKILL);
782 signal_wake_up(t, 1);
783 } while_each_thread(p, t);
784 return;
785 }
786 }
787
788
789
790
791
792 signal_wake_up(t, sig == SIGKILL);
793 return;
794}
795
796static inline int legacy_queue(struct sigpending *signals, int sig)
797{
798 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
799}
800
801static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
802 int group)
803{
804 struct sigpending *pending;
805 struct sigqueue *q;
806
807 trace_sched_signal_send(sig, t);
808
809 assert_spin_locked(&t->sighand->siglock);
810 if (!prepare_signal(sig, t))
811 return 0;
812
813 pending = group ? &t->signal->shared_pending : &t->pending;
814
815
816
817
818
819 if (legacy_queue(pending, sig))
820 return 0;
821
822
823
824
825 if (info == SEND_SIG_FORCED)
826 goto out_set;
827
828
829
830
831
832
833
834
835
836 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
837 (is_si_special(info) ||
838 info->si_code >= 0)));
839 if (q) {
840 list_add_tail(&q->list, &pending->list);
841 switch ((unsigned long) info) {
842 case (unsigned long) SEND_SIG_NOINFO:
843 q->info.si_signo = sig;
844 q->info.si_errno = 0;
845 q->info.si_code = SI_USER;
846 q->info.si_pid = task_pid_vnr(current);
847 q->info.si_uid = current->uid;
848 break;
849 case (unsigned long) SEND_SIG_PRIV:
850 q->info.si_signo = sig;
851 q->info.si_errno = 0;
852 q->info.si_code = SI_KERNEL;
853 q->info.si_pid = 0;
854 q->info.si_uid = 0;
855 break;
856 default:
857 copy_siginfo(&q->info, info);
858 break;
859 }
860 } else if (!is_si_special(info)) {
861 if (sig >= SIGRTMIN && info->si_code != SI_USER)
862
863
864
865
866 return -EAGAIN;
867 }
868
869out_set:
870 signalfd_notify(t, sig);
871 sigaddset(&pending->signal, sig);
872 complete_signal(sig, t, group);
873 return 0;
874}
875
876int print_fatal_signals;
877
878static void print_fatal_signal(struct pt_regs *regs, int signr)
879{
880 printk("%s/%d: potentially unexpected fatal signal %d.\n",
881 current->comm, task_pid_nr(current), signr);
882
883#if defined(__i386__) && !defined(__arch_um__)
884 printk("code at %08lx: ", regs->ip);
885 {
886 int i;
887 for (i = 0; i < 16; i++) {
888 unsigned char insn;
889
890 __get_user(insn, (unsigned char *)(regs->ip + i));
891 printk("%02x ", insn);
892 }
893 }
894#endif
895 printk("\n");
896 show_regs(regs);
897}
898
899static int __init setup_print_fatal_signals(char *str)
900{
901 get_option (&str, &print_fatal_signals);
902
903 return 1;
904}
905
906__setup("print-fatal-signals=", setup_print_fatal_signals);
907
908int
909__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
910{
911 return send_signal(sig, info, p, 1);
912}
913
914static int
915specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
916{
917 return send_signal(sig, info, t, 0);
918}
919
920
921
922
923
924
925
926
927
928
929
930
931int
932force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
933{
934 unsigned long int flags;
935 int ret, blocked, ignored;
936 struct k_sigaction *action;
937
938 spin_lock_irqsave(&t->sighand->siglock, flags);
939 action = &t->sighand->action[sig-1];
940 ignored = action->sa.sa_handler == SIG_IGN;
941 blocked = sigismember(&t->blocked, sig);
942 if (blocked || ignored) {
943 action->sa.sa_handler = SIG_DFL;
944 if (blocked) {
945 sigdelset(&t->blocked, sig);
946 recalc_sigpending_and_wake(t);
947 }
948 }
949 if (action->sa.sa_handler == SIG_DFL)
950 t->signal->flags &= ~SIGNAL_UNKILLABLE;
951 ret = specific_send_sig_info(sig, info, t);
952 spin_unlock_irqrestore(&t->sighand->siglock, flags);
953
954 return ret;
955}
956
957void
958force_sig_specific(int sig, struct task_struct *t)
959{
960 force_sig_info(sig, SEND_SIG_FORCED, t);
961}
962
963
964
965
966void zap_other_threads(struct task_struct *p)
967{
968 struct task_struct *t;
969
970 p->signal->group_stop_count = 0;
971
972 for (t = next_thread(p); t != p; t = next_thread(t)) {
973
974
975
976 if (t->exit_state)
977 continue;
978
979
980 sigaddset(&t->pending.signal, SIGKILL);
981 signal_wake_up(t, 1);
982 }
983}
984
985int __fatal_signal_pending(struct task_struct *tsk)
986{
987 return sigismember(&tsk->pending.signal, SIGKILL);
988}
989EXPORT_SYMBOL(__fatal_signal_pending);
990
991struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
992{
993 struct sighand_struct *sighand;
994
995 rcu_read_lock();
996 for (;;) {
997 sighand = rcu_dereference(tsk->sighand);
998 if (unlikely(sighand == NULL))
999 break;
1000
1001 spin_lock_irqsave(&sighand->siglock, *flags);
1002 if (likely(sighand == tsk->sighand))
1003 break;
1004 spin_unlock_irqrestore(&sighand->siglock, *flags);
1005 }
1006 rcu_read_unlock();
1007
1008 return sighand;
1009}
1010
1011int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1012{
1013 unsigned long flags;
1014 int ret;
1015
1016 ret = check_kill_permission(sig, info, p);
1017
1018 if (!ret && sig) {
1019 ret = -ESRCH;
1020 if (lock_task_sighand(p, &flags)) {
1021 ret = __group_send_sig_info(sig, info, p);
1022 unlock_task_sighand(p, &flags);
1023 }
1024 }
1025
1026 return ret;
1027}
1028
1029
1030
1031
1032
1033
1034int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1035{
1036 struct task_struct *p = NULL;
1037 int retval, success;
1038
1039 success = 0;
1040 retval = -ESRCH;
1041 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1042 int err = group_send_sig_info(sig, info, p);
1043 success |= !err;
1044 retval = err;
1045 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1046 return success ? 0 : retval;
1047}
1048
1049int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1050{
1051 int error = -ESRCH;
1052 struct task_struct *p;
1053
1054 rcu_read_lock();
1055retry:
1056 p = pid_task(pid, PIDTYPE_PID);
1057 if (p) {
1058 error = group_send_sig_info(sig, info, p);
1059 if (unlikely(error == -ESRCH))
1060
1061
1062
1063
1064
1065
1066 goto retry;
1067 }
1068 rcu_read_unlock();
1069
1070 return error;
1071}
1072
1073int
1074kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1075{
1076 int error;
1077 rcu_read_lock();
1078 error = kill_pid_info(sig, info, find_vpid(pid));
1079 rcu_read_unlock();
1080 return error;
1081}
1082
1083
1084int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1085 uid_t uid, uid_t euid, u32 secid)
1086{
1087 int ret = -EINVAL;
1088 struct task_struct *p;
1089
1090 if (!valid_signal(sig))
1091 return ret;
1092
1093 read_lock(&tasklist_lock);
1094 p = pid_task(pid, PIDTYPE_PID);
1095 if (!p) {
1096 ret = -ESRCH;
1097 goto out_unlock;
1098 }
1099 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1100 && (euid != p->suid) && (euid != p->uid)
1101 && (uid != p->suid) && (uid != p->uid)) {
1102 ret = -EPERM;
1103 goto out_unlock;
1104 }
1105 ret = security_task_kill(p, info, sig, secid);
1106 if (ret)
1107 goto out_unlock;
1108 if (sig && p->sighand) {
1109 unsigned long flags;
1110 spin_lock_irqsave(&p->sighand->siglock, flags);
1111 ret = __group_send_sig_info(sig, info, p);
1112 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1113 }
1114out_unlock:
1115 read_unlock(&tasklist_lock);
1116 return ret;
1117}
1118EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1119
1120
1121
1122
1123
1124
1125
1126
1127static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1128{
1129 int ret;
1130
1131 if (pid > 0) {
1132 rcu_read_lock();
1133 ret = kill_pid_info(sig, info, find_vpid(pid));
1134 rcu_read_unlock();
1135 return ret;
1136 }
1137
1138 read_lock(&tasklist_lock);
1139 if (pid != -1) {
1140 ret = __kill_pgrp_info(sig, info,
1141 pid ? find_vpid(-pid) : task_pgrp(current));
1142 } else {
1143 int retval = 0, count = 0;
1144 struct task_struct * p;
1145
1146 for_each_process(p) {
1147 if (task_pid_vnr(p) > 1 &&
1148 !same_thread_group(p, current)) {
1149 int err = group_send_sig_info(sig, info, p);
1150 ++count;
1151 if (err != -EPERM)
1152 retval = err;
1153 }
1154 }
1155 ret = count ? retval : -ESRCH;
1156 }
1157 read_unlock(&tasklist_lock);
1158
1159 return ret;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169int
1170send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1171{
1172 int ret;
1173 unsigned long flags;
1174
1175
1176
1177
1178
1179 if (!valid_signal(sig))
1180 return -EINVAL;
1181
1182 spin_lock_irqsave(&p->sighand->siglock, flags);
1183 ret = specific_send_sig_info(sig, info, p);
1184 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1185 return ret;
1186}
1187
1188#define __si_special(priv) \
1189 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1190
1191int
1192send_sig(int sig, struct task_struct *p, int priv)
1193{
1194 return send_sig_info(sig, __si_special(priv), p);
1195}
1196
1197void
1198force_sig(int sig, struct task_struct *p)
1199{
1200 force_sig_info(sig, SEND_SIG_PRIV, p);
1201}
1202
1203
1204
1205
1206
1207
1208
1209int
1210force_sigsegv(int sig, struct task_struct *p)
1211{
1212 if (sig == SIGSEGV) {
1213 unsigned long flags;
1214 spin_lock_irqsave(&p->sighand->siglock, flags);
1215 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1216 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1217 }
1218 force_sig(SIGSEGV, p);
1219 return 0;
1220}
1221
1222int kill_pgrp(struct pid *pid, int sig, int priv)
1223{
1224 int ret;
1225
1226 read_lock(&tasklist_lock);
1227 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1228 read_unlock(&tasklist_lock);
1229
1230 return ret;
1231}
1232EXPORT_SYMBOL(kill_pgrp);
1233
1234int kill_pid(struct pid *pid, int sig, int priv)
1235{
1236 return kill_pid_info(sig, __si_special(priv), pid);
1237}
1238EXPORT_SYMBOL(kill_pid);
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250struct sigqueue *sigqueue_alloc(void)
1251{
1252 struct sigqueue *q;
1253
1254 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1255 q->flags |= SIGQUEUE_PREALLOC;
1256 return(q);
1257}
1258
1259void sigqueue_free(struct sigqueue *q)
1260{
1261 unsigned long flags;
1262 spinlock_t *lock = ¤t->sighand->siglock;
1263
1264 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1265
1266
1267
1268
1269
1270 spin_lock_irqsave(lock, flags);
1271 q->flags &= ~SIGQUEUE_PREALLOC;
1272
1273
1274
1275
1276 if (!list_empty(&q->list))
1277 q = NULL;
1278 spin_unlock_irqrestore(lock, flags);
1279
1280 if (q)
1281 __sigqueue_free(q);
1282}
1283
1284int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1285{
1286 int sig = q->info.si_signo;
1287 struct sigpending *pending;
1288 unsigned long flags;
1289 int ret;
1290
1291 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1292
1293 ret = -1;
1294 if (!likely(lock_task_sighand(t, &flags)))
1295 goto ret;
1296
1297 ret = 1;
1298 if (!prepare_signal(sig, t))
1299 goto out;
1300
1301 ret = 0;
1302 if (unlikely(!list_empty(&q->list))) {
1303
1304
1305
1306
1307 BUG_ON(q->info.si_code != SI_TIMER);
1308 q->info.si_overrun++;
1309 goto out;
1310 }
1311 q->info.si_overrun = 0;
1312
1313 signalfd_notify(t, sig);
1314 pending = group ? &t->signal->shared_pending : &t->pending;
1315 list_add_tail(&q->list, &pending->list);
1316 sigaddset(&pending->signal, sig);
1317 complete_signal(sig, t, group);
1318out:
1319 unlock_task_sighand(t, &flags);
1320ret:
1321 return ret;
1322}
1323
1324
1325
1326
1327static inline void __wake_up_parent(struct task_struct *p,
1328 struct task_struct *parent)
1329{
1330 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340int do_notify_parent(struct task_struct *tsk, int sig)
1341{
1342 struct siginfo info;
1343 unsigned long flags;
1344 struct sighand_struct *psig;
1345 struct task_cputime cputime;
1346 int ret = sig;
1347
1348 BUG_ON(sig == -1);
1349
1350
1351 BUG_ON(task_is_stopped_or_traced(tsk));
1352
1353 BUG_ON(!tsk->ptrace &&
1354 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1355
1356 info.si_signo = sig;
1357 info.si_errno = 0;
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 rcu_read_lock();
1371 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1372 rcu_read_unlock();
1373
1374 info.si_uid = tsk->uid;
1375
1376 thread_group_cputime(tsk, &cputime);
1377 info.si_utime = cputime_to_jiffies(cputime.utime);
1378 info.si_stime = cputime_to_jiffies(cputime.stime);
1379
1380 info.si_status = tsk->exit_code & 0x7f;
1381 if (tsk->exit_code & 0x80)
1382 info.si_code = CLD_DUMPED;
1383 else if (tsk->exit_code & 0x7f)
1384 info.si_code = CLD_KILLED;
1385 else {
1386 info.si_code = CLD_EXITED;
1387 info.si_status = tsk->exit_code >> 8;
1388 }
1389
1390 psig = tsk->parent->sighand;
1391 spin_lock_irqsave(&psig->siglock, flags);
1392 if (!tsk->ptrace && sig == SIGCHLD &&
1393 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1394 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 ret = tsk->exit_signal = -1;
1411 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1412 sig = -1;
1413 }
1414 if (valid_signal(sig) && sig > 0)
1415 __group_send_sig_info(sig, &info, tsk->parent);
1416 __wake_up_parent(tsk, tsk->parent);
1417 spin_unlock_irqrestore(&psig->siglock, flags);
1418
1419 return ret;
1420}
1421
1422static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1423{
1424 struct siginfo info;
1425 unsigned long flags;
1426 struct task_struct *parent;
1427 struct sighand_struct *sighand;
1428
1429 if (tsk->ptrace & PT_PTRACED)
1430 parent = tsk->parent;
1431 else {
1432 tsk = tsk->group_leader;
1433 parent = tsk->real_parent;
1434 }
1435
1436 info.si_signo = SIGCHLD;
1437 info.si_errno = 0;
1438
1439
1440
1441 rcu_read_lock();
1442 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1443 rcu_read_unlock();
1444
1445 info.si_uid = tsk->uid;
1446
1447 info.si_utime = cputime_to_clock_t(tsk->utime);
1448 info.si_stime = cputime_to_clock_t(tsk->stime);
1449
1450 info.si_code = why;
1451 switch (why) {
1452 case CLD_CONTINUED:
1453 info.si_status = SIGCONT;
1454 break;
1455 case CLD_STOPPED:
1456 info.si_status = tsk->signal->group_exit_code & 0x7f;
1457 break;
1458 case CLD_TRAPPED:
1459 info.si_status = tsk->exit_code & 0x7f;
1460 break;
1461 default:
1462 BUG();
1463 }
1464
1465 sighand = parent->sighand;
1466 spin_lock_irqsave(&sighand->siglock, flags);
1467 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1468 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1469 __group_send_sig_info(SIGCHLD, &info, parent);
1470
1471
1472
1473 __wake_up_parent(tsk, parent);
1474 spin_unlock_irqrestore(&sighand->siglock, flags);
1475}
1476
1477static inline int may_ptrace_stop(void)
1478{
1479 if (!likely(current->ptrace & PT_PTRACED))
1480 return 0;
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 if (unlikely(current->mm->core_state) &&
1491 unlikely(current->mm == current->parent->mm))
1492 return 0;
1493
1494 return 1;
1495}
1496
1497
1498
1499
1500
1501static int sigkill_pending(struct task_struct *tsk)
1502{
1503 return sigismember(&tsk->pending.signal, SIGKILL) ||
1504 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1505}
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1519{
1520 if (arch_ptrace_stop_needed(exit_code, info)) {
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 spin_unlock_irq(¤t->sighand->siglock);
1533 arch_ptrace_stop(exit_code, info);
1534 spin_lock_irq(¤t->sighand->siglock);
1535 if (sigkill_pending(current))
1536 return;
1537 }
1538
1539
1540
1541
1542
1543 if (current->signal->group_stop_count > 0)
1544 --current->signal->group_stop_count;
1545
1546 current->last_siginfo = info;
1547 current->exit_code = exit_code;
1548
1549
1550 __set_current_state(TASK_TRACED);
1551 spin_unlock_irq(¤t->sighand->siglock);
1552 read_lock(&tasklist_lock);
1553 if (may_ptrace_stop()) {
1554 do_notify_parent_cldstop(current, CLD_TRAPPED);
1555 read_unlock(&tasklist_lock);
1556 schedule();
1557 } else {
1558
1559
1560
1561
1562 __set_current_state(TASK_RUNNING);
1563 if (clear_code)
1564 current->exit_code = 0;
1565 read_unlock(&tasklist_lock);
1566 }
1567
1568
1569
1570
1571
1572
1573 try_to_freeze();
1574
1575
1576
1577
1578
1579
1580 spin_lock_irq(¤t->sighand->siglock);
1581 current->last_siginfo = NULL;
1582
1583
1584
1585
1586
1587
1588 recalc_sigpending_tsk(current);
1589}
1590
1591void ptrace_notify(int exit_code)
1592{
1593 siginfo_t info;
1594
1595 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1596
1597 memset(&info, 0, sizeof info);
1598 info.si_signo = SIGTRAP;
1599 info.si_code = exit_code;
1600 info.si_pid = task_pid_vnr(current);
1601 info.si_uid = current->uid;
1602
1603
1604 spin_lock_irq(¤t->sighand->siglock);
1605 ptrace_stop(exit_code, 1, &info);
1606 spin_unlock_irq(¤t->sighand->siglock);
1607}
1608
1609static void
1610finish_stop(int stop_count)
1611{
1612
1613
1614
1615
1616
1617 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1618 read_lock(&tasklist_lock);
1619 do_notify_parent_cldstop(current, CLD_STOPPED);
1620 read_unlock(&tasklist_lock);
1621 }
1622
1623 do {
1624 schedule();
1625 } while (try_to_freeze());
1626
1627
1628
1629 current->exit_code = 0;
1630}
1631
1632
1633
1634
1635
1636
1637
1638static int do_signal_stop(int signr)
1639{
1640 struct signal_struct *sig = current->signal;
1641 int stop_count;
1642
1643 if (sig->group_stop_count > 0) {
1644
1645
1646
1647
1648 stop_count = --sig->group_stop_count;
1649 } else {
1650 struct task_struct *t;
1651
1652 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1653 unlikely(signal_group_exit(sig)))
1654 return 0;
1655
1656
1657
1658
1659 sig->group_exit_code = signr;
1660
1661 stop_count = 0;
1662 for (t = next_thread(current); t != current; t = next_thread(t))
1663
1664
1665
1666
1667
1668 if (!(t->flags & PF_EXITING) &&
1669 !task_is_stopped_or_traced(t)) {
1670 stop_count++;
1671 signal_wake_up(t, 0);
1672 }
1673 sig->group_stop_count = stop_count;
1674 }
1675
1676 if (stop_count == 0)
1677 sig->flags = SIGNAL_STOP_STOPPED;
1678 current->exit_code = sig->group_exit_code;
1679 __set_current_state(TASK_STOPPED);
1680
1681 spin_unlock_irq(¤t->sighand->siglock);
1682 finish_stop(stop_count);
1683 return 1;
1684}
1685
1686static int ptrace_signal(int signr, siginfo_t *info,
1687 struct pt_regs *regs, void *cookie)
1688{
1689 if (!(current->ptrace & PT_PTRACED))
1690 return signr;
1691
1692 ptrace_signal_deliver(regs, cookie);
1693
1694
1695 ptrace_stop(signr, 0, info);
1696
1697
1698 signr = current->exit_code;
1699 if (signr == 0)
1700 return signr;
1701
1702 current->exit_code = 0;
1703
1704
1705
1706
1707
1708 if (signr != info->si_signo) {
1709 info->si_signo = signr;
1710 info->si_errno = 0;
1711 info->si_code = SI_USER;
1712 info->si_pid = task_pid_vnr(current->parent);
1713 info->si_uid = current->parent->uid;
1714 }
1715
1716
1717 if (sigismember(¤t->blocked, signr)) {
1718 specific_send_sig_info(signr, info, current);
1719 signr = 0;
1720 }
1721
1722 return signr;
1723}
1724
1725int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1726 struct pt_regs *regs, void *cookie)
1727{
1728 struct sighand_struct *sighand = current->sighand;
1729 struct signal_struct *signal = current->signal;
1730 int signr;
1731
1732relock:
1733
1734
1735
1736
1737
1738
1739 try_to_freeze();
1740
1741 spin_lock_irq(&sighand->siglock);
1742
1743
1744
1745
1746
1747 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1748 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1749 ? CLD_CONTINUED : CLD_STOPPED;
1750 signal->flags &= ~SIGNAL_CLD_MASK;
1751 spin_unlock_irq(&sighand->siglock);
1752
1753 if (unlikely(!tracehook_notify_jctl(1, why)))
1754 goto relock;
1755
1756 read_lock(&tasklist_lock);
1757 do_notify_parent_cldstop(current->group_leader, why);
1758 read_unlock(&tasklist_lock);
1759 goto relock;
1760 }
1761
1762 for (;;) {
1763 struct k_sigaction *ka;
1764
1765 if (unlikely(signal->group_stop_count > 0) &&
1766 do_signal_stop(0))
1767 goto relock;
1768
1769
1770
1771
1772
1773
1774 signr = tracehook_get_signal(current, regs, info, return_ka);
1775 if (unlikely(signr < 0))
1776 goto relock;
1777 if (unlikely(signr != 0))
1778 ka = return_ka;
1779 else {
1780 signr = dequeue_signal(current, ¤t->blocked,
1781 info);
1782
1783 if (!signr)
1784 break;
1785
1786 if (signr != SIGKILL) {
1787 signr = ptrace_signal(signr, info,
1788 regs, cookie);
1789 if (!signr)
1790 continue;
1791 }
1792
1793 ka = &sighand->action[signr-1];
1794 }
1795
1796 if (ka->sa.sa_handler == SIG_IGN)
1797 continue;
1798 if (ka->sa.sa_handler != SIG_DFL) {
1799
1800 *return_ka = *ka;
1801
1802 if (ka->sa.sa_flags & SA_ONESHOT)
1803 ka->sa.sa_handler = SIG_DFL;
1804
1805 break;
1806 }
1807
1808
1809
1810
1811 if (sig_kernel_ignore(signr))
1812 continue;
1813
1814
1815
1816
1817 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1818 !signal_group_exit(signal))
1819 continue;
1820
1821 if (sig_kernel_stop(signr)) {
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832 if (signr != SIGSTOP) {
1833 spin_unlock_irq(&sighand->siglock);
1834
1835
1836
1837 if (is_current_pgrp_orphaned())
1838 goto relock;
1839
1840 spin_lock_irq(&sighand->siglock);
1841 }
1842
1843 if (likely(do_signal_stop(info->si_signo))) {
1844
1845 goto relock;
1846 }
1847
1848
1849
1850
1851
1852 continue;
1853 }
1854
1855 spin_unlock_irq(&sighand->siglock);
1856
1857
1858
1859
1860 current->flags |= PF_SIGNALED;
1861
1862 if (sig_kernel_coredump(signr)) {
1863 if (print_fatal_signals)
1864 print_fatal_signal(regs, info->si_signo);
1865
1866
1867
1868
1869
1870
1871
1872
1873 do_coredump(info->si_signo, info->si_signo, regs);
1874 }
1875
1876
1877
1878
1879 do_group_exit(info->si_signo);
1880
1881 }
1882 spin_unlock_irq(&sighand->siglock);
1883 return signr;
1884}
1885
1886void exit_signals(struct task_struct *tsk)
1887{
1888 int group_stop = 0;
1889 struct task_struct *t;
1890
1891 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1892 tsk->flags |= PF_EXITING;
1893 return;
1894 }
1895
1896 spin_lock_irq(&tsk->sighand->siglock);
1897
1898
1899
1900
1901 tsk->flags |= PF_EXITING;
1902 if (!signal_pending(tsk))
1903 goto out;
1904
1905
1906
1907
1908
1909 for (t = tsk; (t = next_thread(t)) != tsk; )
1910 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1911 recalc_sigpending_and_wake(t);
1912
1913 if (unlikely(tsk->signal->group_stop_count) &&
1914 !--tsk->signal->group_stop_count) {
1915 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1916 group_stop = 1;
1917 }
1918out:
1919 spin_unlock_irq(&tsk->sighand->siglock);
1920
1921 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1922 read_lock(&tasklist_lock);
1923 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1924 read_unlock(&tasklist_lock);
1925 }
1926}
1927
1928EXPORT_SYMBOL(recalc_sigpending);
1929EXPORT_SYMBOL_GPL(dequeue_signal);
1930EXPORT_SYMBOL(flush_signals);
1931EXPORT_SYMBOL(force_sig);
1932EXPORT_SYMBOL(send_sig);
1933EXPORT_SYMBOL(send_sig_info);
1934EXPORT_SYMBOL(sigprocmask);
1935EXPORT_SYMBOL(block_all_signals);
1936EXPORT_SYMBOL(unblock_all_signals);
1937
1938
1939
1940
1941
1942
1943asmlinkage long sys_restart_syscall(void)
1944{
1945 struct restart_block *restart = ¤t_thread_info()->restart_block;
1946 return restart->fn(restart);
1947}
1948
1949long do_no_restart_syscall(struct restart_block *param)
1950{
1951 return -EINTR;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1969{
1970 int error;
1971
1972 spin_lock_irq(¤t->sighand->siglock);
1973 if (oldset)
1974 *oldset = current->blocked;
1975
1976 error = 0;
1977 switch (how) {
1978 case SIG_BLOCK:
1979 sigorsets(¤t->blocked, ¤t->blocked, set);
1980 break;
1981 case SIG_UNBLOCK:
1982 signandsets(¤t->blocked, ¤t->blocked, set);
1983 break;
1984 case SIG_SETMASK:
1985 current->blocked = *set;
1986 break;
1987 default:
1988 error = -EINVAL;
1989 }
1990 recalc_sigpending();
1991 spin_unlock_irq(¤t->sighand->siglock);
1992
1993 return error;
1994}
1995
1996asmlinkage long
1997sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1998{
1999 int error = -EINVAL;
2000 sigset_t old_set, new_set;
2001
2002
2003 if (sigsetsize != sizeof(sigset_t))
2004 goto out;
2005
2006 if (set) {
2007 error = -EFAULT;
2008 if (copy_from_user(&new_set, set, sizeof(*set)))
2009 goto out;
2010 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2011
2012 error = sigprocmask(how, &new_set, &old_set);
2013 if (error)
2014 goto out;
2015 if (oset)
2016 goto set_old;
2017 } else if (oset) {
2018 spin_lock_irq(¤t->sighand->siglock);
2019 old_set = current->blocked;
2020 spin_unlock_irq(¤t->sighand->siglock);
2021
2022 set_old:
2023 error = -EFAULT;
2024 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2025 goto out;
2026 }
2027 error = 0;
2028out:
2029 return error;
2030}
2031
2032long do_sigpending(void __user *set, unsigned long sigsetsize)
2033{
2034 long error = -EINVAL;
2035 sigset_t pending;
2036
2037 if (sigsetsize > sizeof(sigset_t))
2038 goto out;
2039
2040 spin_lock_irq(¤t->sighand->siglock);
2041 sigorsets(&pending, ¤t->pending.signal,
2042 ¤t->signal->shared_pending.signal);
2043 spin_unlock_irq(¤t->sighand->siglock);
2044
2045
2046 sigandsets(&pending, ¤t->blocked, &pending);
2047
2048 error = -EFAULT;
2049 if (!copy_to_user(set, &pending, sigsetsize))
2050 error = 0;
2051
2052out:
2053 return error;
2054}
2055
2056asmlinkage long
2057sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2058{
2059 return do_sigpending(set, sigsetsize);
2060}
2061
2062#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2063
2064int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2065{
2066 int err;
2067
2068 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2069 return -EFAULT;
2070 if (from->si_code < 0)
2071 return __copy_to_user(to, from, sizeof(siginfo_t))
2072 ? -EFAULT : 0;
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082 err = __put_user(from->si_signo, &to->si_signo);
2083 err |= __put_user(from->si_errno, &to->si_errno);
2084 err |= __put_user((short)from->si_code, &to->si_code);
2085 switch (from->si_code & __SI_MASK) {
2086 case __SI_KILL:
2087 err |= __put_user(from->si_pid, &to->si_pid);
2088 err |= __put_user(from->si_uid, &to->si_uid);
2089 break;
2090 case __SI_TIMER:
2091 err |= __put_user(from->si_tid, &to->si_tid);
2092 err |= __put_user(from->si_overrun, &to->si_overrun);
2093 err |= __put_user(from->si_ptr, &to->si_ptr);
2094 break;
2095 case __SI_POLL:
2096 err |= __put_user(from->si_band, &to->si_band);
2097 err |= __put_user(from->si_fd, &to->si_fd);
2098 break;
2099 case __SI_FAULT:
2100 err |= __put_user(from->si_addr, &to->si_addr);
2101#ifdef __ARCH_SI_TRAPNO
2102 err |= __put_user(from->si_trapno, &to->si_trapno);
2103#endif
2104 break;
2105 case __SI_CHLD:
2106 err |= __put_user(from->si_pid, &to->si_pid);
2107 err |= __put_user(from->si_uid, &to->si_uid);
2108 err |= __put_user(from->si_status, &to->si_status);
2109 err |= __put_user(from->si_utime, &to->si_utime);
2110 err |= __put_user(from->si_stime, &to->si_stime);
2111 break;
2112 case __SI_RT:
2113 case __SI_MESGQ:
2114 err |= __put_user(from->si_pid, &to->si_pid);
2115 err |= __put_user(from->si_uid, &to->si_uid);
2116 err |= __put_user(from->si_ptr, &to->si_ptr);
2117 break;
2118 default:
2119 err |= __put_user(from->si_pid, &to->si_pid);
2120 err |= __put_user(from->si_uid, &to->si_uid);
2121 break;
2122 }
2123 return err;
2124}
2125
2126#endif
2127
2128asmlinkage long
2129sys_rt_sigtimedwait(const sigset_t __user *uthese,
2130 siginfo_t __user *uinfo,
2131 const struct timespec __user *uts,
2132 size_t sigsetsize)
2133{
2134 int ret, sig;
2135 sigset_t these;
2136 struct timespec ts;
2137 siginfo_t info;
2138 long timeout = 0;
2139
2140
2141 if (sigsetsize != sizeof(sigset_t))
2142 return -EINVAL;
2143
2144 if (copy_from_user(&these, uthese, sizeof(these)))
2145 return -EFAULT;
2146
2147
2148
2149
2150
2151 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2152 signotset(&these);
2153
2154 if (uts) {
2155 if (copy_from_user(&ts, uts, sizeof(ts)))
2156 return -EFAULT;
2157 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2158 || ts.tv_sec < 0)
2159 return -EINVAL;
2160 }
2161
2162 spin_lock_irq(¤t->sighand->siglock);
2163 sig = dequeue_signal(current, &these, &info);
2164 if (!sig) {
2165 timeout = MAX_SCHEDULE_TIMEOUT;
2166 if (uts)
2167 timeout = (timespec_to_jiffies(&ts)
2168 + (ts.tv_sec || ts.tv_nsec));
2169
2170 if (timeout) {
2171
2172
2173
2174 current->real_blocked = current->blocked;
2175 sigandsets(¤t->blocked, ¤t->blocked, &these);
2176 recalc_sigpending();
2177 spin_unlock_irq(¤t->sighand->siglock);
2178
2179 timeout = schedule_timeout_interruptible(timeout);
2180
2181 spin_lock_irq(¤t->sighand->siglock);
2182 sig = dequeue_signal(current, &these, &info);
2183 current->blocked = current->real_blocked;
2184 siginitset(¤t->real_blocked, 0);
2185 recalc_sigpending();
2186 }
2187 }
2188 spin_unlock_irq(¤t->sighand->siglock);
2189
2190 if (sig) {
2191 ret = sig;
2192 if (uinfo) {
2193 if (copy_siginfo_to_user(uinfo, &info))
2194 ret = -EFAULT;
2195 }
2196 } else {
2197 ret = -EAGAIN;
2198 if (timeout)
2199 ret = -EINTR;
2200 }
2201
2202 return ret;
2203}
2204
2205asmlinkage long
2206sys_kill(pid_t pid, int sig)
2207{
2208 struct siginfo info;
2209
2210 info.si_signo = sig;
2211 info.si_errno = 0;
2212 info.si_code = SI_USER;
2213 info.si_pid = task_tgid_vnr(current);
2214 info.si_uid = current->uid;
2215
2216 return kill_something_info(sig, &info, pid);
2217}
2218
2219static int do_tkill(pid_t tgid, pid_t pid, int sig)
2220{
2221 int error;
2222 struct siginfo info;
2223 struct task_struct *p;
2224 unsigned long flags;
2225
2226 error = -ESRCH;
2227 info.si_signo = sig;
2228 info.si_errno = 0;
2229 info.si_code = SI_TKILL;
2230 info.si_pid = task_tgid_vnr(current);
2231 info.si_uid = current->uid;
2232
2233 rcu_read_lock();
2234 p = find_task_by_vpid(pid);
2235 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2236 error = check_kill_permission(sig, &info, p);
2237
2238
2239
2240
2241
2242
2243
2244
2245 if (!error && sig && lock_task_sighand(p, &flags)) {
2246 error = specific_send_sig_info(sig, &info, p);
2247 unlock_task_sighand(p, &flags);
2248 }
2249 }
2250 rcu_read_unlock();
2251
2252 return error;
2253}
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2266{
2267
2268 if (pid <= 0 || tgid <= 0)
2269 return -EINVAL;
2270
2271 return do_tkill(tgid, pid, sig);
2272}
2273
2274
2275
2276
2277asmlinkage long
2278sys_tkill(pid_t pid, int sig)
2279{
2280
2281 if (pid <= 0)
2282 return -EINVAL;
2283
2284 return do_tkill(0, pid, sig);
2285}
2286
2287asmlinkage long
2288sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2289{
2290 siginfo_t info;
2291
2292 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2293 return -EFAULT;
2294
2295
2296
2297 if (info.si_code >= 0)
2298 return -EPERM;
2299 info.si_signo = sig;
2300
2301
2302 return kill_proc_info(sig, &info, pid);
2303}
2304
2305int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2306{
2307 struct task_struct *t = current;
2308 struct k_sigaction *k;
2309 sigset_t mask;
2310
2311 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2312 return -EINVAL;
2313
2314 k = &t->sighand->action[sig-1];
2315
2316 spin_lock_irq(¤t->sighand->siglock);
2317 if (oact)
2318 *oact = *k;
2319
2320 if (act) {
2321 sigdelsetmask(&act->sa.sa_mask,
2322 sigmask(SIGKILL) | sigmask(SIGSTOP));
2323 *k = *act;
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2336 sigemptyset(&mask);
2337 sigaddset(&mask, sig);
2338 rm_from_queue_full(&mask, &t->signal->shared_pending);
2339 do {
2340 rm_from_queue_full(&mask, &t->pending);
2341 t = next_thread(t);
2342 } while (t != current);
2343 }
2344 }
2345
2346 spin_unlock_irq(¤t->sighand->siglock);
2347 return 0;
2348}
2349
2350int
2351do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2352{
2353 stack_t oss;
2354 int error;
2355
2356 if (uoss) {
2357 oss.ss_sp = (void __user *) current->sas_ss_sp;
2358 oss.ss_size = current->sas_ss_size;
2359 oss.ss_flags = sas_ss_flags(sp);
2360 }
2361
2362 if (uss) {
2363 void __user *ss_sp;
2364 size_t ss_size;
2365 int ss_flags;
2366
2367 error = -EFAULT;
2368 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2369 || __get_user(ss_sp, &uss->ss_sp)
2370 || __get_user(ss_flags, &uss->ss_flags)
2371 || __get_user(ss_size, &uss->ss_size))
2372 goto out;
2373
2374 error = -EPERM;
2375 if (on_sig_stack(sp))
2376 goto out;
2377
2378 error = -EINVAL;
2379
2380
2381
2382
2383
2384
2385
2386
2387 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2388 goto out;
2389
2390 if (ss_flags == SS_DISABLE) {
2391 ss_size = 0;
2392 ss_sp = NULL;
2393 } else {
2394 error = -ENOMEM;
2395 if (ss_size < MINSIGSTKSZ)
2396 goto out;
2397 }
2398
2399 current->sas_ss_sp = (unsigned long) ss_sp;
2400 current->sas_ss_size = ss_size;
2401 }
2402
2403 if (uoss) {
2404 error = -EFAULT;
2405 if (copy_to_user(uoss, &oss, sizeof(oss)))
2406 goto out;
2407 }
2408
2409 error = 0;
2410out:
2411 return error;
2412}
2413
2414#ifdef __ARCH_WANT_SYS_SIGPENDING
2415
2416asmlinkage long
2417sys_sigpending(old_sigset_t __user *set)
2418{
2419 return do_sigpending(set, sizeof(*set));
2420}
2421
2422#endif
2423
2424#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2425
2426
2427
2428asmlinkage long
2429sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2430{
2431 int error;
2432 old_sigset_t old_set, new_set;
2433
2434 if (set) {
2435 error = -EFAULT;
2436 if (copy_from_user(&new_set, set, sizeof(*set)))
2437 goto out;
2438 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2439
2440 spin_lock_irq(¤t->sighand->siglock);
2441 old_set = current->blocked.sig[0];
2442
2443 error = 0;
2444 switch (how) {
2445 default:
2446 error = -EINVAL;
2447 break;
2448 case SIG_BLOCK:
2449 sigaddsetmask(¤t->blocked, new_set);
2450 break;
2451 case SIG_UNBLOCK:
2452 sigdelsetmask(¤t->blocked, new_set);
2453 break;
2454 case SIG_SETMASK:
2455 current->blocked.sig[0] = new_set;
2456 break;
2457 }
2458
2459 recalc_sigpending();
2460 spin_unlock_irq(¤t->sighand->siglock);
2461 if (error)
2462 goto out;
2463 if (oset)
2464 goto set_old;
2465 } else if (oset) {
2466 old_set = current->blocked.sig[0];
2467 set_old:
2468 error = -EFAULT;
2469 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2470 goto out;
2471 }
2472 error = 0;
2473out:
2474 return error;
2475}
2476#endif
2477
2478#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2479asmlinkage long
2480sys_rt_sigaction(int sig,
2481 const struct sigaction __user *act,
2482 struct sigaction __user *oact,
2483 size_t sigsetsize)
2484{
2485 struct k_sigaction new_sa, old_sa;
2486 int ret = -EINVAL;
2487
2488
2489 if (sigsetsize != sizeof(sigset_t))
2490 goto out;
2491
2492 if (act) {
2493 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2494 return -EFAULT;
2495 }
2496
2497 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2498
2499 if (!ret && oact) {
2500 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2501 return -EFAULT;
2502 }
2503out:
2504 return ret;
2505}
2506#endif
2507
2508#ifdef __ARCH_WANT_SYS_SGETMASK
2509
2510
2511
2512
2513asmlinkage long
2514sys_sgetmask(void)
2515{
2516
2517 return current->blocked.sig[0];
2518}
2519
2520asmlinkage long
2521sys_ssetmask(int newmask)
2522{
2523 int old;
2524
2525 spin_lock_irq(¤t->sighand->siglock);
2526 old = current->blocked.sig[0];
2527
2528 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2529 sigmask(SIGSTOP)));
2530 recalc_sigpending();
2531 spin_unlock_irq(¤t->sighand->siglock);
2532
2533 return old;
2534}
2535#endif
2536
2537#ifdef __ARCH_WANT_SYS_SIGNAL
2538
2539
2540
2541asmlinkage unsigned long
2542sys_signal(int sig, __sighandler_t handler)
2543{
2544 struct k_sigaction new_sa, old_sa;
2545 int ret;
2546
2547 new_sa.sa.sa_handler = handler;
2548 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2549 sigemptyset(&new_sa.sa.sa_mask);
2550
2551 ret = do_sigaction(sig, &new_sa, &old_sa);
2552
2553 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2554}
2555#endif
2556
2557#ifdef __ARCH_WANT_SYS_PAUSE
2558
2559asmlinkage long
2560sys_pause(void)
2561{
2562 current->state = TASK_INTERRUPTIBLE;
2563 schedule();
2564 return -ERESTARTNOHAND;
2565}
2566
2567#endif
2568
2569#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2570asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2571{
2572 sigset_t newset;
2573
2574
2575 if (sigsetsize != sizeof(sigset_t))
2576 return -EINVAL;
2577
2578 if (copy_from_user(&newset, unewset, sizeof(newset)))
2579 return -EFAULT;
2580 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2581
2582 spin_lock_irq(¤t->sighand->siglock);
2583 current->saved_sigmask = current->blocked;
2584 current->blocked = newset;
2585 recalc_sigpending();
2586 spin_unlock_irq(¤t->sighand->siglock);
2587
2588 current->state = TASK_INTERRUPTIBLE;
2589 schedule();
2590 set_restore_sigmask();
2591 return -ERESTARTNOHAND;
2592}
2593#endif
2594
2595__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2596{
2597 return NULL;
2598}
2599
2600void __init signals_init(void)
2601{
2602 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2603}