1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
35#include <asm/pgtable.h>
36#include "gru.h"
37#include "grutables.h"
38#include "grulib.h"
39#include "gru_instructions.h"
40#include <asm/uv/uv_hub.h>
41
42
43
44
45static inline int is_gru_paddr(unsigned long paddr)
46{
47 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
48}
49
50
51
52
53struct vm_area_struct *gru_find_vma(unsigned long vaddr)
54{
55 struct vm_area_struct *vma;
56
57 vma = find_vma(current->mm, vaddr);
58 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
59 return vma;
60 return NULL;
61}
62
63
64
65
66
67
68
69
70
71static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
72{
73 struct mm_struct *mm = current->mm;
74 struct vm_area_struct *vma;
75 struct gru_thread_state *gts = NULL;
76
77 down_read(&mm->mmap_sem);
78 vma = gru_find_vma(vaddr);
79 if (vma)
80 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
81 if (gts)
82 mutex_lock(>s->ts_ctxlock);
83 else
84 up_read(&mm->mmap_sem);
85 return gts;
86}
87
88static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
89{
90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma;
92 struct gru_thread_state *gts = NULL;
93
94 down_write(&mm->mmap_sem);
95 vma = gru_find_vma(vaddr);
96 if (vma)
97 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
98 if (gts) {
99 mutex_lock(>s->ts_ctxlock);
100 downgrade_write(&mm->mmap_sem);
101 } else {
102 up_write(&mm->mmap_sem);
103 }
104
105 return gts;
106}
107
108
109
110
111static void gru_unlock_gts(struct gru_thread_state *gts)
112{
113 mutex_unlock(>s->ts_ctxlock);
114 up_read(¤t->mm->mmap_sem);
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129static void gru_cb_set_istatus_active(unsigned long __user *cb)
130{
131 union {
132 struct gru_instruction_bits bits;
133 unsigned long dw;
134 } u;
135
136 if (cb) {
137 get_user(u.dw, cb);
138 u.bits.istatus = CBS_ACTIVE;
139 put_user(u.dw, cb);
140 }
141}
142
143
144
145
146
147
148
149
150
151static inline struct gru_state *irq_to_gru(int irq)
152{
153 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
154}
155
156
157
158
159
160
161
162
163
164
165
166
167static void get_clear_fault_map(struct gru_state *gru,
168 struct gru_tlb_fault_map *map)
169{
170 unsigned long i, k;
171 struct gru_tlb_fault_map *tfm;
172
173 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
174 prefetchw(tfm);
175 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
176 k = tfm->fault_bits[i];
177 if (k)
178 k = xchg(&tfm->fault_bits[i], 0UL);
179 map->fault_bits[i] = k;
180 }
181
182
183
184
185
186 gru_flush_cache(tfm);
187}
188
189
190
191
192
193
194
195
196
197
198static int non_atomic_pte_lookup(struct vm_area_struct *vma,
199 unsigned long vaddr, int write,
200 unsigned long *paddr, int *pageshift)
201{
202 struct page *page;
203
204
205 if (is_vm_hugetlb_page(vma))
206 return -EFAULT;
207 *pageshift = PAGE_SHIFT;
208 if (get_user_pages
209 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
210 return -EFAULT;
211 *paddr = page_to_phys(page);
212 put_page(page);
213 return 0;
214}
215
216
217
218
219
220
221
222
223
224
225
226static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
227 int write, unsigned long *paddr, int *pageshift)
228{
229 pgd_t *pgdp;
230 pmd_t *pmdp;
231 pud_t *pudp;
232 pte_t pte;
233
234 pgdp = pgd_offset(vma->vm_mm, vaddr);
235 if (unlikely(pgd_none(*pgdp)))
236 goto err;
237
238 pudp = pud_offset(pgdp, vaddr);
239 if (unlikely(pud_none(*pudp)))
240 goto err;
241
242 pmdp = pmd_offset(pudp, vaddr);
243 if (unlikely(pmd_none(*pmdp)))
244 goto err;
245#ifdef CONFIG_X86_64
246 if (unlikely(pmd_large(*pmdp)))
247 pte = *(pte_t *) pmdp;
248 else
249#endif
250 pte = *pte_offset_kernel(pmdp, vaddr);
251
252 if (unlikely(!pte_present(pte) ||
253 (write && (!pte_write(pte) || !pte_dirty(pte)))))
254 return 1;
255
256 *paddr = pte_pfn(pte) << PAGE_SHIFT;
257#ifdef CONFIG_HUGETLB_PAGE
258 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
259#else
260 *pageshift = PAGE_SHIFT;
261#endif
262 return 0;
263
264err:
265 local_irq_enable();
266 return 1;
267}
268
269
270
271
272
273
274
275
276
277
278
279static int gru_try_dropin(struct gru_thread_state *gts,
280 struct gru_tlb_fault_handle *tfh,
281 unsigned long __user *cb)
282{
283 struct mm_struct *mm = gts->ts_mm;
284 struct vm_area_struct *vma;
285 int pageshift, asid, write, ret;
286 unsigned long paddr, gpa, vaddr;
287
288
289
290
291
292
293
294
295
296
297
298
299
300 if (tfh->state == TFHSTATE_IDLE)
301 goto failidle;
302 if (tfh->state == TFHSTATE_MISS_FMM && cb)
303 goto failfmm;
304
305 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
306 vaddr = tfh->missvaddr;
307 asid = tfh->missasid;
308 if (asid == 0)
309 goto failnoasid;
310
311 rmb();
312
313
314
315
316
317 if (atomic_read(>s->ts_gms->ms_range_active))
318 goto failactive;
319
320 vma = find_vma(mm, vaddr);
321 if (!vma)
322 goto failinval;
323
324
325
326
327
328 rmb();
329 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
330 if (ret) {
331 if (!cb)
332 goto failupm;
333 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr,
334 &pageshift))
335 goto failinval;
336 }
337 if (is_gru_paddr(paddr))
338 goto failinval;
339
340 paddr = paddr & ~((1UL << pageshift) - 1);
341 gpa = uv_soc_phys_ram_to_gpa(paddr);
342 gru_cb_set_istatus_active(cb);
343 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
344 GRU_PAGESIZE(pageshift));
345 STAT(tlb_dropin);
346 gru_dbg(grudev,
347 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
348 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
349 pageshift, gpa);
350 return 0;
351
352failnoasid:
353
354 STAT(tlb_dropin_fail_no_asid);
355 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
356 if (!cb)
357 tfh_user_polling_mode(tfh);
358 else
359 gru_flush_cache(tfh);
360 return -EAGAIN;
361
362failupm:
363
364 tfh_user_polling_mode(tfh);
365 STAT(tlb_dropin_fail_upm);
366 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
367 return 1;
368
369failfmm:
370
371 STAT(tlb_dropin_fail_fmm);
372 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
373 return 0;
374
375failidle:
376
377 gru_flush_cache(tfh);
378 if (cb)
379 gru_flush_cache(cb);
380 STAT(tlb_dropin_fail_idle);
381 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
382 return 0;
383
384failinval:
385
386 tfh_exception(tfh);
387 STAT(tlb_dropin_fail_invalid);
388 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
389 return -EFAULT;
390
391failactive:
392
393 if (!cb)
394 tfh_user_polling_mode(tfh);
395 else
396 gru_flush_cache(tfh);
397 STAT(tlb_dropin_fail_range_active);
398 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
399 tfh, vaddr);
400 return 1;
401}
402
403
404
405
406
407
408
409irqreturn_t gru_intr(int irq, void *dev_id)
410{
411 struct gru_state *gru;
412 struct gru_tlb_fault_map map;
413 struct gru_thread_state *gts;
414 struct gru_tlb_fault_handle *tfh = NULL;
415 int cbrnum, ctxnum;
416
417 STAT(intr);
418
419 gru = irq_to_gru(irq);
420 if (!gru) {
421 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
422 raw_smp_processor_id(), irq);
423 return IRQ_NONE;
424 }
425 get_clear_fault_map(gru, &map);
426 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
427 map.fault_bits[0]);
428
429 for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
430 tfh = get_tfh_by_index(gru, cbrnum);
431 prefetchw(tfh);
432
433
434
435
436
437
438
439 ctxnum = tfh->ctxnum;
440 gts = gru->gs_gts[ctxnum];
441
442
443
444
445
446 if (down_read_trylock(>s->ts_mm->mmap_sem)) {
447 gru_try_dropin(gts, tfh, NULL);
448 up_read(>s->ts_mm->mmap_sem);
449 } else {
450 tfh_user_polling_mode(tfh);
451 }
452 }
453 return IRQ_HANDLED;
454}
455
456
457static int gru_user_dropin(struct gru_thread_state *gts,
458 struct gru_tlb_fault_handle *tfh,
459 unsigned long __user *cb)
460{
461 struct gru_mm_struct *gms = gts->ts_gms;
462 int ret;
463
464 while (1) {
465 wait_event(gms->ms_wait_queue,
466 atomic_read(&gms->ms_range_active) == 0);
467 prefetchw(tfh);
468 ret = gru_try_dropin(gts, tfh, cb);
469 if (ret <= 0)
470 return ret;
471 STAT(call_os_wait_queue);
472 }
473}
474
475
476
477
478
479
480int gru_handle_user_call_os(unsigned long cb)
481{
482 struct gru_tlb_fault_handle *tfh;
483 struct gru_thread_state *gts;
484 unsigned long __user *cbp;
485 int ucbnum, cbrnum, ret = -EINVAL;
486
487 STAT(call_os);
488 gru_dbg(grudev, "address 0x%lx\n", cb);
489
490
491 ucbnum = get_cb_number((void *)cb);
492 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
493 return -EINVAL;
494 cbp = (unsigned long *)cb;
495
496 gts = gru_find_lock_gts(cb);
497 if (!gts)
498 return -EINVAL;
499
500 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
501 ret = -EINVAL;
502 goto exit;
503 }
504
505
506
507
508
509
510
511 ret = -EAGAIN;
512 cbrnum = thread_cbr_number(gts, ucbnum);
513 if (gts->ts_force_unload) {
514 gru_unload_context(gts, 1);
515 } else if (gts->ts_gru) {
516 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
517 ret = gru_user_dropin(gts, tfh, cbp);
518 }
519exit:
520 gru_unlock_gts(gts);
521 return ret;
522}
523
524
525
526
527
528int gru_get_exception_detail(unsigned long arg)
529{
530 struct control_block_extended_exc_detail excdet;
531 struct gru_control_block_extended *cbe;
532 struct gru_thread_state *gts;
533 int ucbnum, cbrnum, ret;
534
535 STAT(user_exception);
536 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
537 return -EFAULT;
538
539 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
540 gts = gru_find_lock_gts(excdet.cb);
541 if (!gts)
542 return -EINVAL;
543
544 if (gts->ts_gru) {
545 ucbnum = get_cb_number((void *)excdet.cb);
546 cbrnum = thread_cbr_number(gts, ucbnum);
547 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
548 prefetchw(cbe);
549 excdet.opc = cbe->opccpy;
550 excdet.exopc = cbe->exopccpy;
551 excdet.ecause = cbe->ecause;
552 excdet.exceptdet0 = cbe->idef1upd;
553 excdet.exceptdet1 = cbe->idef3upd;
554 ret = 0;
555 } else {
556 ret = -EAGAIN;
557 }
558 gru_unlock_gts(gts);
559
560 gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
561 excdet.ecause);
562 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
563 ret = -EFAULT;
564 return ret;
565}
566
567
568
569
570int gru_user_unload_context(unsigned long arg)
571{
572 struct gru_thread_state *gts;
573 struct gru_unload_context_req req;
574
575 STAT(user_unload_context);
576 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
577 return -EFAULT;
578
579 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
580
581 gts = gru_find_lock_gts(req.gseg);
582 if (!gts)
583 return -EINVAL;
584
585 if (gts->ts_gru)
586 gru_unload_context(gts, 1);
587 gru_unlock_gts(gts);
588
589 return 0;
590}
591
592
593
594
595
596int gru_user_flush_tlb(unsigned long arg)
597{
598 struct gru_thread_state *gts;
599 struct gru_flush_tlb_req req;
600
601 STAT(user_flush_tlb);
602 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
603 return -EFAULT;
604
605 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
606 req.vaddr, req.len);
607
608 gts = gru_find_lock_gts(req.gseg);
609 if (!gts)
610 return -EINVAL;
611
612 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len);
613 gru_unlock_gts(gts);
614
615 return 0;
616}
617
618
619
620
621
622int gru_set_task_slice(long address)
623{
624 struct gru_thread_state *gts;
625
626 STAT(set_task_slice);
627 gru_dbg(grudev, "address 0x%lx\n", address);
628 gts = gru_alloc_locked_gts(address);
629 if (!gts)
630 return -EINVAL;
631
632 gts->ts_tgid_owner = current->tgid;
633 gru_unlock_gts(gts);
634
635 return 0;
636}