1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/slab.h>
44#include <linux/poll.h>
45#include <linux/fs.h>
46#include <linux/file.h>
47#include <linux/jhash.h>
48#include <linux/init.h>
49#include <linux/futex.h>
50#include <linux/mount.h>
51#include <linux/pagemap.h>
52#include <linux/syscalls.h>
53#include <linux/signal.h>
54#include <linux/module.h>
55#include <linux/magic.h>
56#include <linux/pid.h>
57#include <linux/nsproxy.h>
58
59#include <asm/futex.h>
60
61#include "rtmutex_common.h"
62
63int __read_mostly futex_cmpxchg_enabled;
64
65#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
66
67
68
69
70struct futex_pi_state {
71
72
73
74
75 struct list_head list;
76
77
78
79
80 struct rt_mutex pi_mutex;
81
82 struct task_struct *owner;
83 atomic_t refcount;
84
85 union futex_key key;
86};
87
88
89
90
91
92
93
94
95
96
97struct futex_q {
98 struct plist_node list;
99 wait_queue_head_t waiters;
100
101
102 spinlock_t *lock_ptr;
103
104
105 union futex_key key;
106
107
108 struct futex_pi_state *pi_state;
109 struct task_struct *task;
110
111
112 u32 bitset;
113};
114
115
116
117
118struct futex_hash_bucket {
119 spinlock_t lock;
120 struct plist_head chain;
121};
122
123static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
124
125
126
127
128static inline void futex_lock_mm(struct rw_semaphore *fshared)
129{
130 if (fshared)
131 down_read(fshared);
132}
133
134
135
136
137static inline void futex_unlock_mm(struct rw_semaphore *fshared)
138{
139 if (fshared)
140 up_read(fshared);
141}
142
143
144
145
146static struct futex_hash_bucket *hash_futex(union futex_key *key)
147{
148 u32 hash = jhash2((u32*)&key->both.word,
149 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
150 key->both.offset);
151 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
152}
153
154
155
156
157static inline int match_futex(union futex_key *key1, union futex_key *key2)
158{
159 return (key1->both.word == key2->both.word
160 && key1->both.ptr == key2->both.ptr
161 && key1->both.offset == key2->both.offset);
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
183 union futex_key *key)
184{
185 unsigned long address = (unsigned long)uaddr;
186 struct mm_struct *mm = current->mm;
187 struct vm_area_struct *vma;
188 struct page *page;
189 int err;
190
191
192
193
194 key->both.offset = address % PAGE_SIZE;
195 if (unlikely((address % sizeof(u32)) != 0))
196 return -EINVAL;
197 address -= key->both.offset;
198
199
200
201
202
203
204
205
206 if (!fshared) {
207 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
208 return -EFAULT;
209 key->private.mm = mm;
210 key->private.address = address;
211 return 0;
212 }
213
214
215
216
217 vma = find_extend_vma(mm, address);
218 if (unlikely(!vma))
219 return -EFAULT;
220
221
222
223
224 if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
225 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
226
227
228
229
230
231
232
233
234
235
236 if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
237 key->both.offset |= FUT_OFF_MMSHARED;
238 key->private.mm = mm;
239 key->private.address = address;
240 return 0;
241 }
242
243
244
245
246 key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
247 key->both.offset |= FUT_OFF_INODE;
248 if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
249 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
250 + vma->vm_pgoff);
251 return 0;
252 }
253
254
255
256
257
258
259
260 err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
261 if (err >= 0) {
262 key->shared.pgoff =
263 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
264 put_page(page);
265 return 0;
266 }
267 return err;
268}
269
270
271
272
273
274
275static void get_futex_key_refs(union futex_key *key)
276{
277 if (key->both.ptr == NULL)
278 return;
279 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
280 case FUT_OFF_INODE:
281 atomic_inc(&key->shared.inode->i_count);
282 break;
283 case FUT_OFF_MMSHARED:
284 atomic_inc(&key->private.mm->mm_count);
285 break;
286 }
287}
288
289
290
291
292
293static void drop_futex_key_refs(union futex_key *key)
294{
295 if (!key->both.ptr)
296 return;
297 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
298 case FUT_OFF_INODE:
299 iput(key->shared.inode);
300 break;
301 case FUT_OFF_MMSHARED:
302 mmdrop(key->private.mm);
303 break;
304 }
305}
306
307static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
308{
309 u32 curval;
310
311 pagefault_disable();
312 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
313 pagefault_enable();
314
315 return curval;
316}
317
318static int get_futex_value_locked(u32 *dest, u32 __user *from)
319{
320 int ret;
321
322 pagefault_disable();
323 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
324 pagefault_enable();
325
326 return ret ? -EFAULT : 0;
327}
328
329
330
331
332
333static int futex_handle_fault(unsigned long address,
334 struct rw_semaphore *fshared, int attempt)
335{
336 struct vm_area_struct * vma;
337 struct mm_struct *mm = current->mm;
338 int ret = -EFAULT;
339
340 if (attempt > 2)
341 return ret;
342
343 if (!fshared)
344 down_read(&mm->mmap_sem);
345 vma = find_vma(mm, address);
346 if (vma && address >= vma->vm_start &&
347 (vma->vm_flags & VM_WRITE)) {
348 int fault;
349 fault = handle_mm_fault(mm, vma, address, 1);
350 if (unlikely((fault & VM_FAULT_ERROR))) {
351
352
353
354
355
356 } else {
357 ret = 0;
358 if (fault & VM_FAULT_MAJOR)
359 current->maj_flt++;
360 else
361 current->min_flt++;
362 }
363 }
364 if (!fshared)
365 up_read(&mm->mmap_sem);
366 return ret;
367}
368
369
370
371
372static int refill_pi_state_cache(void)
373{
374 struct futex_pi_state *pi_state;
375
376 if (likely(current->pi_state_cache))
377 return 0;
378
379 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
380
381 if (!pi_state)
382 return -ENOMEM;
383
384 INIT_LIST_HEAD(&pi_state->list);
385
386 pi_state->owner = NULL;
387 atomic_set(&pi_state->refcount, 1);
388
389 current->pi_state_cache = pi_state;
390
391 return 0;
392}
393
394static struct futex_pi_state * alloc_pi_state(void)
395{
396 struct futex_pi_state *pi_state = current->pi_state_cache;
397
398 WARN_ON(!pi_state);
399 current->pi_state_cache = NULL;
400
401 return pi_state;
402}
403
404static void free_pi_state(struct futex_pi_state *pi_state)
405{
406 if (!atomic_dec_and_test(&pi_state->refcount))
407 return;
408
409
410
411
412
413 if (pi_state->owner) {
414 spin_lock_irq(&pi_state->owner->pi_lock);
415 list_del_init(&pi_state->list);
416 spin_unlock_irq(&pi_state->owner->pi_lock);
417
418 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
419 }
420
421 if (current->pi_state_cache)
422 kfree(pi_state);
423 else {
424
425
426
427
428
429 pi_state->owner = NULL;
430 atomic_set(&pi_state->refcount, 1);
431 current->pi_state_cache = pi_state;
432 }
433}
434
435
436
437
438
439static struct task_struct * futex_find_get_task(pid_t pid)
440{
441 struct task_struct *p;
442
443 rcu_read_lock();
444 p = find_task_by_vpid(pid);
445 if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
446 p = ERR_PTR(-ESRCH);
447 else
448 get_task_struct(p);
449
450 rcu_read_unlock();
451
452 return p;
453}
454
455
456
457
458
459
460void exit_pi_state_list(struct task_struct *curr)
461{
462 struct list_head *next, *head = &curr->pi_state_list;
463 struct futex_pi_state *pi_state;
464 struct futex_hash_bucket *hb;
465 union futex_key key;
466
467 if (!futex_cmpxchg_enabled)
468 return;
469
470
471
472
473
474 spin_lock_irq(&curr->pi_lock);
475 while (!list_empty(head)) {
476
477 next = head->next;
478 pi_state = list_entry(next, struct futex_pi_state, list);
479 key = pi_state->key;
480 hb = hash_futex(&key);
481 spin_unlock_irq(&curr->pi_lock);
482
483 spin_lock(&hb->lock);
484
485 spin_lock_irq(&curr->pi_lock);
486
487
488
489
490 if (head->next != next) {
491 spin_unlock(&hb->lock);
492 continue;
493 }
494
495 WARN_ON(pi_state->owner != curr);
496 WARN_ON(list_empty(&pi_state->list));
497 list_del_init(&pi_state->list);
498 pi_state->owner = NULL;
499 spin_unlock_irq(&curr->pi_lock);
500
501 rt_mutex_unlock(&pi_state->pi_mutex);
502
503 spin_unlock(&hb->lock);
504
505 spin_lock_irq(&curr->pi_lock);
506 }
507 spin_unlock_irq(&curr->pi_lock);
508}
509
510static int
511lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
512 union futex_key *key, struct futex_pi_state **ps)
513{
514 struct futex_pi_state *pi_state = NULL;
515 struct futex_q *this, *next;
516 struct plist_head *head;
517 struct task_struct *p;
518 pid_t pid = uval & FUTEX_TID_MASK;
519
520 head = &hb->chain;
521
522 plist_for_each_entry_safe(this, next, head, list) {
523 if (match_futex(&this->key, key)) {
524
525
526
527
528 pi_state = this->pi_state;
529
530
531
532 if (unlikely(!pi_state))
533 return -EINVAL;
534
535 WARN_ON(!atomic_read(&pi_state->refcount));
536 WARN_ON(pid && pi_state->owner &&
537 pi_state->owner->pid != pid);
538
539 atomic_inc(&pi_state->refcount);
540 *ps = pi_state;
541
542 return 0;
543 }
544 }
545
546
547
548
549
550 if (!pid)
551 return -ESRCH;
552 p = futex_find_get_task(pid);
553 if (IS_ERR(p))
554 return PTR_ERR(p);
555
556
557
558
559
560
561
562 spin_lock_irq(&p->pi_lock);
563 if (unlikely(p->flags & PF_EXITING)) {
564
565
566
567
568
569 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
570
571 spin_unlock_irq(&p->pi_lock);
572 put_task_struct(p);
573 return ret;
574 }
575
576 pi_state = alloc_pi_state();
577
578
579
580
581
582 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
583
584
585 pi_state->key = *key;
586
587 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &p->pi_state_list);
589 pi_state->owner = p;
590 spin_unlock_irq(&p->pi_lock);
591
592 put_task_struct(p);
593
594 *ps = pi_state;
595
596 return 0;
597}
598
599
600
601
602
603static void wake_futex(struct futex_q *q)
604{
605 plist_del(&q->list, &q->list.plist);
606
607
608
609
610 wake_up_all(&q->waiters);
611
612
613
614
615
616
617
618
619
620 smp_wmb();
621 q->lock_ptr = NULL;
622}
623
624static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
625{
626 struct task_struct *new_owner;
627 struct futex_pi_state *pi_state = this->pi_state;
628 u32 curval, newval;
629
630 if (!pi_state)
631 return -EINVAL;
632
633 spin_lock(&pi_state->pi_mutex.wait_lock);
634 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
635
636
637
638
639
640
641
642 if (!new_owner)
643 new_owner = this->task;
644
645
646
647
648
649
650 if (!(uval & FUTEX_OWNER_DIED)) {
651 int ret = 0;
652
653 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
654
655 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
656
657 if (curval == -EFAULT)
658 ret = -EFAULT;
659 else if (curval != uval)
660 ret = -EINVAL;
661 if (ret) {
662 spin_unlock(&pi_state->pi_mutex.wait_lock);
663 return ret;
664 }
665 }
666
667 spin_lock_irq(&pi_state->owner->pi_lock);
668 WARN_ON(list_empty(&pi_state->list));
669 list_del_init(&pi_state->list);
670 spin_unlock_irq(&pi_state->owner->pi_lock);
671
672 spin_lock_irq(&new_owner->pi_lock);
673 WARN_ON(!list_empty(&pi_state->list));
674 list_add(&pi_state->list, &new_owner->pi_state_list);
675 pi_state->owner = new_owner;
676 spin_unlock_irq(&new_owner->pi_lock);
677
678 spin_unlock(&pi_state->pi_mutex.wait_lock);
679 rt_mutex_unlock(&pi_state->pi_mutex);
680
681 return 0;
682}
683
684static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
685{
686 u32 oldval;
687
688
689
690
691
692 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
693
694 if (oldval == -EFAULT)
695 return oldval;
696 if (oldval != uval)
697 return -EAGAIN;
698
699 return 0;
700}
701
702
703
704
705static inline void
706double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
707{
708 if (hb1 <= hb2) {
709 spin_lock(&hb1->lock);
710 if (hb1 < hb2)
711 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
712 } else {
713 spin_lock(&hb2->lock);
714 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
715 }
716}
717
718
719
720
721
722static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
723 int nr_wake, u32 bitset)
724{
725 struct futex_hash_bucket *hb;
726 struct futex_q *this, *next;
727 struct plist_head *head;
728 union futex_key key;
729 int ret;
730
731 if (!bitset)
732 return -EINVAL;
733
734 futex_lock_mm(fshared);
735
736 ret = get_futex_key(uaddr, fshared, &key);
737 if (unlikely(ret != 0))
738 goto out;
739
740 hb = hash_futex(&key);
741 spin_lock(&hb->lock);
742 head = &hb->chain;
743
744 plist_for_each_entry_safe(this, next, head, list) {
745 if (match_futex (&this->key, &key)) {
746 if (this->pi_state) {
747 ret = -EINVAL;
748 break;
749 }
750
751
752 if (!(this->bitset & bitset))
753 continue;
754
755 wake_futex(this);
756 if (++ret >= nr_wake)
757 break;
758 }
759 }
760
761 spin_unlock(&hb->lock);
762out:
763 futex_unlock_mm(fshared);
764 return ret;
765}
766
767
768
769
770
771static int
772futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
773 u32 __user *uaddr2,
774 int nr_wake, int nr_wake2, int op)
775{
776 union futex_key key1, key2;
777 struct futex_hash_bucket *hb1, *hb2;
778 struct plist_head *head;
779 struct futex_q *this, *next;
780 int ret, op_ret, attempt = 0;
781
782retryfull:
783 futex_lock_mm(fshared);
784
785 ret = get_futex_key(uaddr1, fshared, &key1);
786 if (unlikely(ret != 0))
787 goto out;
788 ret = get_futex_key(uaddr2, fshared, &key2);
789 if (unlikely(ret != 0))
790 goto out;
791
792 hb1 = hash_futex(&key1);
793 hb2 = hash_futex(&key2);
794
795retry:
796 double_lock_hb(hb1, hb2);
797
798 op_ret = futex_atomic_op_inuser(op, uaddr2);
799 if (unlikely(op_ret < 0)) {
800 u32 dummy;
801
802 spin_unlock(&hb1->lock);
803 if (hb1 != hb2)
804 spin_unlock(&hb2->lock);
805
806#ifndef CONFIG_MMU
807
808
809
810
811 ret = op_ret;
812 goto out;
813#endif
814
815 if (unlikely(op_ret != -EFAULT)) {
816 ret = op_ret;
817 goto out;
818 }
819
820
821
822
823
824
825
826
827 if (attempt++) {
828 ret = futex_handle_fault((unsigned long)uaddr2,
829 fshared, attempt);
830 if (ret)
831 goto out;
832 goto retry;
833 }
834
835
836
837
838
839 futex_unlock_mm(fshared);
840
841 ret = get_user(dummy, uaddr2);
842 if (ret)
843 return ret;
844
845 goto retryfull;
846 }
847
848 head = &hb1->chain;
849
850 plist_for_each_entry_safe(this, next, head, list) {
851 if (match_futex (&this->key, &key1)) {
852 wake_futex(this);
853 if (++ret >= nr_wake)
854 break;
855 }
856 }
857
858 if (op_ret > 0) {
859 head = &hb2->chain;
860
861 op_ret = 0;
862 plist_for_each_entry_safe(this, next, head, list) {
863 if (match_futex (&this->key, &key2)) {
864 wake_futex(this);
865 if (++op_ret >= nr_wake2)
866 break;
867 }
868 }
869 ret += op_ret;
870 }
871
872 spin_unlock(&hb1->lock);
873 if (hb1 != hb2)
874 spin_unlock(&hb2->lock);
875out:
876 futex_unlock_mm(fshared);
877
878 return ret;
879}
880
881
882
883
884
885static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
886 u32 __user *uaddr2,
887 int nr_wake, int nr_requeue, u32 *cmpval)
888{
889 union futex_key key1, key2;
890 struct futex_hash_bucket *hb1, *hb2;
891 struct plist_head *head1;
892 struct futex_q *this, *next;
893 int ret, drop_count = 0;
894
895 retry:
896 futex_lock_mm(fshared);
897
898 ret = get_futex_key(uaddr1, fshared, &key1);
899 if (unlikely(ret != 0))
900 goto out;
901 ret = get_futex_key(uaddr2, fshared, &key2);
902 if (unlikely(ret != 0))
903 goto out;
904
905 hb1 = hash_futex(&key1);
906 hb2 = hash_futex(&key2);
907
908 double_lock_hb(hb1, hb2);
909
910 if (likely(cmpval != NULL)) {
911 u32 curval;
912
913 ret = get_futex_value_locked(&curval, uaddr1);
914
915 if (unlikely(ret)) {
916 spin_unlock(&hb1->lock);
917 if (hb1 != hb2)
918 spin_unlock(&hb2->lock);
919
920
921
922
923
924 futex_unlock_mm(fshared);
925
926 ret = get_user(curval, uaddr1);
927
928 if (!ret)
929 goto retry;
930
931 return ret;
932 }
933 if (curval != *cmpval) {
934 ret = -EAGAIN;
935 goto out_unlock;
936 }
937 }
938
939 head1 = &hb1->chain;
940 plist_for_each_entry_safe(this, next, head1, list) {
941 if (!match_futex (&this->key, &key1))
942 continue;
943 if (++ret <= nr_wake) {
944 wake_futex(this);
945 } else {
946
947
948
949
950 if (likely(head1 != &hb2->chain)) {
951 plist_del(&this->list, &hb1->chain);
952 plist_add(&this->list, &hb2->chain);
953 this->lock_ptr = &hb2->lock;
954#ifdef CONFIG_DEBUG_PI_LIST
955 this->list.plist.lock = &hb2->lock;
956#endif
957 }
958 this->key = key2;
959 get_futex_key_refs(&key2);
960 drop_count++;
961
962 if (ret - nr_wake >= nr_requeue)
963 break;
964 }
965 }
966
967out_unlock:
968 spin_unlock(&hb1->lock);
969 if (hb1 != hb2)
970 spin_unlock(&hb2->lock);
971
972
973 while (--drop_count >= 0)
974 drop_futex_key_refs(&key1);
975
976out:
977 futex_unlock_mm(fshared);
978 return ret;
979}
980
981
982static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
983{
984 struct futex_hash_bucket *hb;
985
986 init_waitqueue_head(&q->waiters);
987
988 get_futex_key_refs(&q->key);
989 hb = hash_futex(&q->key);
990 q->lock_ptr = &hb->lock;
991
992 spin_lock(&hb->lock);
993 return hb;
994}
995
996static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
997{
998 int prio;
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 prio = min(current->normal_prio, MAX_RT_PRIO);
1009
1010 plist_node_init(&q->list, prio);
1011#ifdef CONFIG_DEBUG_PI_LIST
1012 q->list.plist.lock = &hb->lock;
1013#endif
1014 plist_add(&q->list, &hb->chain);
1015 q->task = current;
1016 spin_unlock(&hb->lock);
1017}
1018
1019static inline void
1020queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1021{
1022 spin_unlock(&hb->lock);
1023 drop_futex_key_refs(&q->key);
1024}
1025
1026
1027
1028
1029
1030
1031
1032static int unqueue_me(struct futex_q *q)
1033{
1034 spinlock_t *lock_ptr;
1035 int ret = 0;
1036
1037
1038 retry:
1039 lock_ptr = q->lock_ptr;
1040 barrier();
1041 if (lock_ptr != NULL) {
1042 spin_lock(lock_ptr);
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 if (unlikely(lock_ptr != q->lock_ptr)) {
1057 spin_unlock(lock_ptr);
1058 goto retry;
1059 }
1060 WARN_ON(plist_node_empty(&q->list));
1061 plist_del(&q->list, &q->list.plist);
1062
1063 BUG_ON(q->pi_state);
1064
1065 spin_unlock(lock_ptr);
1066 ret = 1;
1067 }
1068
1069 drop_futex_key_refs(&q->key);
1070 return ret;
1071}
1072
1073
1074
1075
1076
1077
1078static void unqueue_me_pi(struct futex_q *q)
1079{
1080 WARN_ON(plist_node_empty(&q->list));
1081 plist_del(&q->list, &q->list.plist);
1082
1083 BUG_ON(!q->pi_state);
1084 free_pi_state(q->pi_state);
1085 q->pi_state = NULL;
1086
1087 spin_unlock(q->lock_ptr);
1088
1089 drop_futex_key_refs(&q->key);
1090}
1091
1092
1093
1094
1095
1096
1097
1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1099 struct task_struct *newowner,
1100 struct rw_semaphore *fshared)
1101{
1102 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1103 struct futex_pi_state *pi_state = q->pi_state;
1104 struct task_struct *oldowner = pi_state->owner;
1105 u32 uval, curval, newval;
1106 int ret, attempt = 0;
1107
1108
1109 if (!pi_state->owner)
1110 newtid |= FUTEX_OWNER_DIED;
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131retry:
1132 if (get_futex_value_locked(&uval, uaddr))
1133 goto handle_fault;
1134
1135 while (1) {
1136 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1137
1138 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1139
1140 if (curval == -EFAULT)
1141 goto handle_fault;
1142 if (curval == uval)
1143 break;
1144 uval = curval;
1145 }
1146
1147
1148
1149
1150
1151 if (pi_state->owner != NULL) {
1152 spin_lock_irq(&pi_state->owner->pi_lock);
1153 WARN_ON(list_empty(&pi_state->list));
1154 list_del_init(&pi_state->list);
1155 spin_unlock_irq(&pi_state->owner->pi_lock);
1156 }
1157
1158 pi_state->owner = newowner;
1159
1160 spin_lock_irq(&newowner->pi_lock);
1161 WARN_ON(!list_empty(&pi_state->list));
1162 list_add(&pi_state->list, &newowner->pi_state_list);
1163 spin_unlock_irq(&newowner->pi_lock);
1164 return 0;
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176handle_fault:
1177 spin_unlock(q->lock_ptr);
1178
1179 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
1180
1181 spin_lock(q->lock_ptr);
1182
1183
1184
1185
1186 if (pi_state->owner != oldowner)
1187 return 0;
1188
1189 if (ret)
1190 return ret;
1191
1192 goto retry;
1193}
1194
1195
1196
1197
1198
1199#define FLAGS_SHARED 1
1200
1201static long futex_wait_restart(struct restart_block *restart);
1202
1203static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1204 u32 val, ktime_t *abs_time, u32 bitset)
1205{
1206 struct task_struct *curr = current;
1207 DECLARE_WAITQUEUE(wait, curr);
1208 struct futex_hash_bucket *hb;
1209 struct futex_q q;
1210 u32 uval;
1211 int ret;
1212 struct hrtimer_sleeper t;
1213 int rem = 0;
1214
1215 if (!bitset)
1216 return -EINVAL;
1217
1218 q.pi_state = NULL;
1219 q.bitset = bitset;
1220 retry:
1221 futex_lock_mm(fshared);
1222
1223 ret = get_futex_key(uaddr, fshared, &q.key);
1224 if (unlikely(ret != 0))
1225 goto out_release_sem;
1226
1227 hb = queue_lock(&q);
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 ret = get_futex_value_locked(&uval, uaddr);
1250
1251 if (unlikely(ret)) {
1252 queue_unlock(&q, hb);
1253
1254
1255
1256
1257
1258 futex_unlock_mm(fshared);
1259
1260 ret = get_user(uval, uaddr);
1261
1262 if (!ret)
1263 goto retry;
1264 return ret;
1265 }
1266 ret = -EWOULDBLOCK;
1267 if (uval != val)
1268 goto out_unlock_release_sem;
1269
1270
1271 queue_me(&q, hb);
1272
1273
1274
1275
1276
1277 futex_unlock_mm(fshared);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 __set_current_state(TASK_INTERRUPTIBLE);
1290 add_wait_queue(&q.waiters, &wait);
1291
1292
1293
1294
1295 if (likely(!plist_node_empty(&q.list))) {
1296 if (!abs_time)
1297 schedule();
1298 else {
1299 unsigned long slack;
1300 slack = current->timer_slack_ns;
1301 if (rt_task(current))
1302 slack = 0;
1303 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
1304 HRTIMER_MODE_ABS);
1305 hrtimer_init_sleeper(&t, current);
1306 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1307
1308 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1309 if (!hrtimer_active(&t.timer))
1310 t.task = NULL;
1311
1312
1313
1314
1315
1316
1317 if (likely(t.task))
1318 schedule();
1319
1320 hrtimer_cancel(&t.timer);
1321
1322
1323 rem = (t.task == NULL);
1324
1325 destroy_hrtimer_on_stack(&t.timer);
1326 }
1327 }
1328 __set_current_state(TASK_RUNNING);
1329
1330
1331
1332
1333
1334
1335
1336 if (!unqueue_me(&q))
1337 return 0;
1338 if (rem)
1339 return -ETIMEDOUT;
1340
1341
1342
1343
1344
1345 if (!abs_time)
1346 return -ERESTARTSYS;
1347 else {
1348 struct restart_block *restart;
1349 restart = ¤t_thread_info()->restart_block;
1350 restart->fn = futex_wait_restart;
1351 restart->futex.uaddr = (u32 *)uaddr;
1352 restart->futex.val = val;
1353 restart->futex.time = abs_time->tv64;
1354 restart->futex.bitset = bitset;
1355 restart->futex.flags = 0;
1356
1357 if (fshared)
1358 restart->futex.flags |= FLAGS_SHARED;
1359 return -ERESTART_RESTARTBLOCK;
1360 }
1361
1362 out_unlock_release_sem:
1363 queue_unlock(&q, hb);
1364
1365 out_release_sem:
1366 futex_unlock_mm(fshared);
1367 return ret;
1368}
1369
1370
1371static long futex_wait_restart(struct restart_block *restart)
1372{
1373 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1374 struct rw_semaphore *fshared = NULL;
1375 ktime_t t;
1376
1377 t.tv64 = restart->futex.time;
1378 restart->fn = do_no_restart_syscall;
1379 if (restart->futex.flags & FLAGS_SHARED)
1380 fshared = ¤t->mm->mmap_sem;
1381 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1382 restart->futex.bitset);
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1393 int detect, ktime_t *time, int trylock)
1394{
1395 struct hrtimer_sleeper timeout, *to = NULL;
1396 struct task_struct *curr = current;
1397 struct futex_hash_bucket *hb;
1398 u32 uval, newval, curval;
1399 struct futex_q q;
1400 int ret, lock_taken, ownerdied = 0, attempt = 0;
1401
1402 if (refill_pi_state_cache())
1403 return -ENOMEM;
1404
1405 if (time) {
1406 to = &timeout;
1407 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1408 HRTIMER_MODE_ABS);
1409 hrtimer_init_sleeper(to, current);
1410 hrtimer_set_expires(&to->timer, *time);
1411 }
1412
1413 q.pi_state = NULL;
1414 retry:
1415 futex_lock_mm(fshared);
1416
1417 ret = get_futex_key(uaddr, fshared, &q.key);
1418 if (unlikely(ret != 0))
1419 goto out_release_sem;
1420
1421 retry_unlocked:
1422 hb = queue_lock(&q);
1423
1424 retry_locked:
1425 ret = lock_taken = 0;
1426
1427
1428
1429
1430
1431
1432 newval = task_pid_vnr(current);
1433
1434 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1435
1436 if (unlikely(curval == -EFAULT))
1437 goto uaddr_faulted;
1438
1439
1440
1441
1442
1443 if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1444 ret = -EDEADLK;
1445 goto out_unlock_release_sem;
1446 }
1447
1448
1449
1450
1451 if (unlikely(!curval))
1452 goto out_unlock_release_sem;
1453
1454 uval = curval;
1455
1456
1457
1458
1459
1460 newval = curval | FUTEX_WAITERS;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1471
1472 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
1473 ownerdied = 0;
1474 lock_taken = 1;
1475 }
1476
1477 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1478
1479 if (unlikely(curval == -EFAULT))
1480 goto uaddr_faulted;
1481 if (unlikely(curval != uval))
1482 goto retry_locked;
1483
1484
1485
1486
1487 if (unlikely(lock_taken))
1488 goto out_unlock_release_sem;
1489
1490
1491
1492
1493
1494 ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1495
1496 if (unlikely(ret)) {
1497 switch (ret) {
1498
1499 case -EAGAIN:
1500
1501
1502
1503
1504 queue_unlock(&q, hb);
1505 futex_unlock_mm(fshared);
1506 cond_resched();
1507 goto retry;
1508
1509 case -ESRCH:
1510
1511
1512
1513
1514
1515 if (get_futex_value_locked(&curval, uaddr))
1516 goto uaddr_faulted;
1517
1518
1519
1520
1521
1522
1523 if (curval & FUTEX_OWNER_DIED) {
1524 ownerdied = 1;
1525 goto retry_locked;
1526 }
1527 default:
1528 goto out_unlock_release_sem;
1529 }
1530 }
1531
1532
1533
1534
1535 queue_me(&q, hb);
1536
1537
1538
1539
1540
1541 futex_unlock_mm(fshared);
1542
1543 WARN_ON(!q.pi_state);
1544
1545
1546
1547 if (!trylock)
1548 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1549 else {
1550 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1551
1552 ret = ret ? 0 : -EWOULDBLOCK;
1553 }
1554
1555 futex_lock_mm(fshared);
1556 spin_lock(q.lock_ptr);
1557
1558 if (!ret) {
1559
1560
1561
1562
1563
1564 if (q.pi_state->owner != curr)
1565 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1566 } else {
1567
1568
1569
1570
1571
1572 if (q.pi_state->owner == curr) {
1573
1574
1575
1576
1577
1578
1579 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1580 ret = 0;
1581 else {
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593 struct task_struct *owner;
1594 int res;
1595
1596 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1597 res = fixup_pi_state_owner(uaddr, &q, owner,
1598 fshared);
1599
1600
1601 if (res)
1602 ret = res;
1603 }
1604 } else {
1605
1606
1607
1608
1609
1610
1611 if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1612 printk(KERN_ERR "futex_lock_pi: ret = %d "
1613 "pi-mutex: %p pi-state %p\n", ret,
1614 q.pi_state->pi_mutex.owner,
1615 q.pi_state->owner);
1616 }
1617 }
1618
1619
1620 unqueue_me_pi(&q);
1621 futex_unlock_mm(fshared);
1622
1623 if (to)
1624 destroy_hrtimer_on_stack(&to->timer);
1625 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1626
1627 out_unlock_release_sem:
1628 queue_unlock(&q, hb);
1629
1630 out_release_sem:
1631 futex_unlock_mm(fshared);
1632 if (to)
1633 destroy_hrtimer_on_stack(&to->timer);
1634 return ret;
1635
1636 uaddr_faulted:
1637
1638
1639
1640
1641
1642
1643
1644
1645 queue_unlock(&q, hb);
1646
1647 if (attempt++) {
1648 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1649 attempt);
1650 if (ret)
1651 goto out_release_sem;
1652 goto retry_unlocked;
1653 }
1654
1655 futex_unlock_mm(fshared);
1656
1657 ret = get_user(uval, uaddr);
1658 if (!ret && (uval != -EFAULT))
1659 goto retry;
1660
1661 if (to)
1662 destroy_hrtimer_on_stack(&to->timer);
1663 return ret;
1664}
1665
1666
1667
1668
1669
1670
1671static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
1672{
1673 struct futex_hash_bucket *hb;
1674 struct futex_q *this, *next;
1675 u32 uval;
1676 struct plist_head *head;
1677 union futex_key key;
1678 int ret, attempt = 0;
1679
1680retry:
1681 if (get_user(uval, uaddr))
1682 return -EFAULT;
1683
1684
1685
1686 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1687 return -EPERM;
1688
1689
1690
1691 futex_lock_mm(fshared);
1692
1693 ret = get_futex_key(uaddr, fshared, &key);
1694 if (unlikely(ret != 0))
1695 goto out;
1696
1697 hb = hash_futex(&key);
1698retry_unlocked:
1699 spin_lock(&hb->lock);
1700
1701
1702
1703
1704
1705
1706 if (!(uval & FUTEX_OWNER_DIED))
1707 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1708
1709
1710 if (unlikely(uval == -EFAULT))
1711 goto pi_faulted;
1712
1713
1714
1715
1716 if (unlikely(uval == task_pid_vnr(current)))
1717 goto out_unlock;
1718
1719
1720
1721
1722
1723 head = &hb->chain;
1724
1725 plist_for_each_entry_safe(this, next, head, list) {
1726 if (!match_futex (&this->key, &key))
1727 continue;
1728 ret = wake_futex_pi(uaddr, uval, this);
1729
1730
1731
1732
1733
1734 if (ret == -EFAULT)
1735 goto pi_faulted;
1736 goto out_unlock;
1737 }
1738
1739
1740
1741 if (!(uval & FUTEX_OWNER_DIED)) {
1742 ret = unlock_futex_pi(uaddr, uval);
1743 if (ret == -EFAULT)
1744 goto pi_faulted;
1745 }
1746
1747out_unlock:
1748 spin_unlock(&hb->lock);
1749out:
1750 futex_unlock_mm(fshared);
1751
1752 return ret;
1753
1754pi_faulted:
1755
1756
1757
1758
1759
1760
1761
1762
1763 spin_unlock(&hb->lock);
1764
1765 if (attempt++) {
1766 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1767 attempt);
1768 if (ret)
1769 goto out;
1770 uval = 0;
1771 goto retry_unlocked;
1772 }
1773
1774 futex_unlock_mm(fshared);
1775
1776 ret = get_user(uval, uaddr);
1777 if (!ret && (uval != -EFAULT))
1778 goto retry;
1779
1780 return ret;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803asmlinkage long
1804sys_set_robust_list(struct robust_list_head __user *head,
1805 size_t len)
1806{
1807 if (!futex_cmpxchg_enabled)
1808 return -ENOSYS;
1809
1810
1811
1812 if (unlikely(len != sizeof(*head)))
1813 return -EINVAL;
1814
1815 current->robust_list = head;
1816
1817 return 0;
1818}
1819
1820
1821
1822
1823
1824
1825
1826asmlinkage long
1827sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
1828 size_t __user *len_ptr)
1829{
1830 struct robust_list_head __user *head;
1831 unsigned long ret;
1832
1833 if (!futex_cmpxchg_enabled)
1834 return -ENOSYS;
1835
1836 if (!pid)
1837 head = current->robust_list;
1838 else {
1839 struct task_struct *p;
1840
1841 ret = -ESRCH;
1842 rcu_read_lock();
1843 p = find_task_by_vpid(pid);
1844 if (!p)
1845 goto err_unlock;
1846 ret = -EPERM;
1847 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1848 !capable(CAP_SYS_PTRACE))
1849 goto err_unlock;
1850 head = p->robust_list;
1851 rcu_read_unlock();
1852 }
1853
1854 if (put_user(sizeof(*head), len_ptr))
1855 return -EFAULT;
1856 return put_user(head, head_ptr);
1857
1858err_unlock:
1859 rcu_read_unlock();
1860
1861 return ret;
1862}
1863
1864
1865
1866
1867
1868int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1869{
1870 u32 uval, nval, mval;
1871
1872retry:
1873 if (get_user(uval, uaddr))
1874 return -1;
1875
1876 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1888 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1889
1890 if (nval == -EFAULT)
1891 return -1;
1892
1893 if (nval != uval)
1894 goto retry;
1895
1896
1897
1898
1899
1900 if (!pi && (uval & FUTEX_WAITERS))
1901 futex_wake(uaddr, &curr->mm->mmap_sem, 1,
1902 FUTEX_BITSET_MATCH_ANY);
1903 }
1904 return 0;
1905}
1906
1907
1908
1909
1910static inline int fetch_robust_entry(struct robust_list __user **entry,
1911 struct robust_list __user * __user *head,
1912 int *pi)
1913{
1914 unsigned long uentry;
1915
1916 if (get_user(uentry, (unsigned long __user *)head))
1917 return -EFAULT;
1918
1919 *entry = (void __user *)(uentry & ~1UL);
1920 *pi = uentry & 1;
1921
1922 return 0;
1923}
1924
1925
1926
1927
1928
1929
1930
1931void exit_robust_list(struct task_struct *curr)
1932{
1933 struct robust_list_head __user *head = curr->robust_list;
1934 struct robust_list __user *entry, *next_entry, *pending;
1935 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1936 unsigned long futex_offset;
1937 int rc;
1938
1939 if (!futex_cmpxchg_enabled)
1940 return;
1941
1942
1943
1944
1945
1946 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1947 return;
1948
1949
1950
1951 if (get_user(futex_offset, &head->futex_offset))
1952 return;
1953
1954
1955
1956
1957 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1958 return;
1959
1960 next_entry = NULL;
1961 while (entry != &head->list) {
1962
1963
1964
1965
1966 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1967
1968
1969
1970
1971 if (entry != pending)
1972 if (handle_futex_death((void __user *)entry + futex_offset,
1973 curr, pi))
1974 return;
1975 if (rc)
1976 return;
1977 entry = next_entry;
1978 pi = next_pi;
1979
1980
1981
1982 if (!--limit)
1983 break;
1984
1985 cond_resched();
1986 }
1987
1988 if (pending)
1989 handle_futex_death((void __user *)pending + futex_offset,
1990 curr, pip);
1991}
1992
1993long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1994 u32 __user *uaddr2, u32 val2, u32 val3)
1995{
1996 int ret = -ENOSYS;
1997 int cmd = op & FUTEX_CMD_MASK;
1998 struct rw_semaphore *fshared = NULL;
1999
2000 if (!(op & FUTEX_PRIVATE_FLAG))
2001 fshared = ¤t->mm->mmap_sem;
2002
2003 switch (cmd) {
2004 case FUTEX_WAIT:
2005 val3 = FUTEX_BITSET_MATCH_ANY;
2006 case FUTEX_WAIT_BITSET:
2007 ret = futex_wait(uaddr, fshared, val, timeout, val3);
2008 break;
2009 case FUTEX_WAKE:
2010 val3 = FUTEX_BITSET_MATCH_ANY;
2011 case FUTEX_WAKE_BITSET:
2012 ret = futex_wake(uaddr, fshared, val, val3);
2013 break;
2014 case FUTEX_REQUEUE:
2015 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
2016 break;
2017 case FUTEX_CMP_REQUEUE:
2018 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
2019 break;
2020 case FUTEX_WAKE_OP:
2021 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2022 break;
2023 case FUTEX_LOCK_PI:
2024 if (futex_cmpxchg_enabled)
2025 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2026 break;
2027 case FUTEX_UNLOCK_PI:
2028 if (futex_cmpxchg_enabled)
2029 ret = futex_unlock_pi(uaddr, fshared);
2030 break;
2031 case FUTEX_TRYLOCK_PI:
2032 if (futex_cmpxchg_enabled)
2033 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2034 break;
2035 default:
2036 ret = -ENOSYS;
2037 }
2038 return ret;
2039}
2040
2041
2042asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
2043 struct timespec __user *utime, u32 __user *uaddr2,
2044 u32 val3)
2045{
2046 struct timespec ts;
2047 ktime_t t, *tp = NULL;
2048 u32 val2 = 0;
2049 int cmd = op & FUTEX_CMD_MASK;
2050
2051 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2052 cmd == FUTEX_WAIT_BITSET)) {
2053 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2054 return -EFAULT;
2055 if (!timespec_valid(&ts))
2056 return -EINVAL;
2057
2058 t = timespec_to_ktime(ts);
2059 if (cmd == FUTEX_WAIT)
2060 t = ktime_add_safe(ktime_get(), t);
2061 tp = &t;
2062 }
2063
2064
2065
2066
2067 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2068 cmd == FUTEX_WAKE_OP)
2069 val2 = (u32) (unsigned long) utime;
2070
2071 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2072}
2073
2074static int __init futex_init(void)
2075{
2076 u32 curval;
2077 int i;
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2090 if (curval == -EFAULT)
2091 futex_cmpxchg_enabled = 1;
2092
2093 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2094 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2095 spin_lock_init(&futex_queues[i].lock);
2096 }
2097
2098 return 0;
2099}
2100__initcall(futex_init);