mm/hmm/mirror: mirror process address space on device with HMM helpers
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / mm / hmm.c
CommitLineData
133ff0ea
JG
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
15 */
16/*
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
19 */
20#include <linux/mm.h>
21#include <linux/hmm.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
c0b12405 24#include <linux/mmu_notifier.h>
133ff0ea
JG
25
26
27#ifdef CONFIG_HMM
c0b12405
JG
28static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
29
133ff0ea
JG
30/*
31 * struct hmm - HMM per mm struct
32 *
33 * @mm: mm struct this HMM struct is bound to
c0b12405
JG
34 * @sequence: we track updates to the CPU page table with a sequence number
35 * @mirrors: list of mirrors for this mm
36 * @mmu_notifier: mmu notifier to track updates to CPU page table
37 * @mirrors_sem: read/write semaphore protecting the mirrors list
133ff0ea
JG
38 */
39struct hmm {
40 struct mm_struct *mm;
c0b12405
JG
41 atomic_t sequence;
42 struct list_head mirrors;
43 struct mmu_notifier mmu_notifier;
44 struct rw_semaphore mirrors_sem;
133ff0ea
JG
45};
46
47/*
48 * hmm_register - register HMM against an mm (HMM internal)
49 *
50 * @mm: mm struct to attach to
51 *
52 * This is not intended to be used directly by device drivers. It allocates an
53 * HMM struct if mm does not have one, and initializes it.
54 */
55static struct hmm *hmm_register(struct mm_struct *mm)
56{
c0b12405
JG
57 struct hmm *hmm = READ_ONCE(mm->hmm);
58 bool cleanup = false;
133ff0ea
JG
59
60 /*
61 * The hmm struct can only be freed once the mm_struct goes away,
62 * hence we should always have pre-allocated an new hmm struct
63 * above.
64 */
c0b12405
JG
65 if (hmm)
66 return hmm;
67
68 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
69 if (!hmm)
70 return NULL;
71 INIT_LIST_HEAD(&hmm->mirrors);
72 init_rwsem(&hmm->mirrors_sem);
73 atomic_set(&hmm->sequence, 0);
74 hmm->mmu_notifier.ops = NULL;
75 hmm->mm = mm;
76
77 /*
78 * We should only get here if hold the mmap_sem in write mode ie on
79 * registration of first mirror through hmm_mirror_register()
80 */
81 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
82 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
83 kfree(hmm);
84 return NULL;
85 }
86
87 spin_lock(&mm->page_table_lock);
88 if (!mm->hmm)
89 mm->hmm = hmm;
90 else
91 cleanup = true;
92 spin_unlock(&mm->page_table_lock);
93
94 if (cleanup) {
95 mmu_notifier_unregister(&hmm->mmu_notifier, mm);
96 kfree(hmm);
97 }
98
133ff0ea
JG
99 return mm->hmm;
100}
101
102void hmm_mm_destroy(struct mm_struct *mm)
103{
104 kfree(mm->hmm);
105}
106#endif /* CONFIG_HMM */
c0b12405
JG
107
108#if IS_ENABLED(CONFIG_HMM_MIRROR)
109static void hmm_invalidate_range(struct hmm *hmm,
110 enum hmm_update_type action,
111 unsigned long start,
112 unsigned long end)
113{
114 struct hmm_mirror *mirror;
115
116 down_read(&hmm->mirrors_sem);
117 list_for_each_entry(mirror, &hmm->mirrors, list)
118 mirror->ops->sync_cpu_device_pagetables(mirror, action,
119 start, end);
120 up_read(&hmm->mirrors_sem);
121}
122
123static void hmm_invalidate_range_start(struct mmu_notifier *mn,
124 struct mm_struct *mm,
125 unsigned long start,
126 unsigned long end)
127{
128 struct hmm *hmm = mm->hmm;
129
130 VM_BUG_ON(!hmm);
131
132 atomic_inc(&hmm->sequence);
133}
134
135static void hmm_invalidate_range_end(struct mmu_notifier *mn,
136 struct mm_struct *mm,
137 unsigned long start,
138 unsigned long end)
139{
140 struct hmm *hmm = mm->hmm;
141
142 VM_BUG_ON(!hmm);
143
144 hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
145}
146
147static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
148 .invalidate_range_start = hmm_invalidate_range_start,
149 .invalidate_range_end = hmm_invalidate_range_end,
150};
151
152/*
153 * hmm_mirror_register() - register a mirror against an mm
154 *
155 * @mirror: new mirror struct to register
156 * @mm: mm to register against
157 *
158 * To start mirroring a process address space, the device driver must register
159 * an HMM mirror struct.
160 *
161 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
162 */
163int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
164{
165 /* Sanity check */
166 if (!mm || !mirror || !mirror->ops)
167 return -EINVAL;
168
169 mirror->hmm = hmm_register(mm);
170 if (!mirror->hmm)
171 return -ENOMEM;
172
173 down_write(&mirror->hmm->mirrors_sem);
174 list_add(&mirror->list, &mirror->hmm->mirrors);
175 up_write(&mirror->hmm->mirrors_sem);
176
177 return 0;
178}
179EXPORT_SYMBOL(hmm_mirror_register);
180
181/*
182 * hmm_mirror_unregister() - unregister a mirror
183 *
184 * @mirror: new mirror struct to register
185 *
186 * Stop mirroring a process address space, and cleanup.
187 */
188void hmm_mirror_unregister(struct hmm_mirror *mirror)
189{
190 struct hmm *hmm = mirror->hmm;
191
192 down_write(&hmm->mirrors_sem);
193 list_del(&mirror->list);
194 up_write(&hmm->mirrors_sem);
195}
196EXPORT_SYMBOL(hmm_mirror_unregister);
197#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */