1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/file.h>
5 #include <linux/fdtable.h>
6 #include <linux/fs_struct.h>
7 #include <linux/mount.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/seq_file.h>
11 #include <linux/sched/mm.h>
12
13 #include "internal.h"
14
15 /*
16 * Logic: we've got two memory sums for each process, "shared", and
17 * "non-shared". Shared memory may get counted more than once, for
18 * each process that owns it. Non-shared memory is counted
19 * accurately.
20 */
task_mem(struct seq_file * m,struct mm_struct * mm)21 void task_mem(struct seq_file *m, struct mm_struct *mm)
22 {
23 VMA_ITERATOR(vmi, mm, 0);
24 struct vm_area_struct *vma;
25 struct vm_region *region;
26 unsigned long bytes = 0, sbytes = 0, slack = 0, size;
27
28 mmap_read_lock(mm);
29 for_each_vma(vmi, vma) {
30 bytes += kobjsize(vma);
31
32 region = vma->vm_region;
33 if (region) {
34 size = kobjsize(region);
35 size += region->vm_end - region->vm_start;
36 } else {
37 size = vma->vm_end - vma->vm_start;
38 }
39
40 if (atomic_read(&mm->mm_count) > 1 ||
41 vma->vm_flags & VM_MAYSHARE) {
42 sbytes += size;
43 } else {
44 bytes += size;
45 if (region)
46 slack = region->vm_end - vma->vm_end;
47 }
48 }
49
50 if (atomic_read(&mm->mm_count) > 1)
51 sbytes += kobjsize(mm);
52 else
53 bytes += kobjsize(mm);
54
55 if (current->fs && current->fs->users > 1)
56 sbytes += kobjsize(current->fs);
57 else
58 bytes += kobjsize(current->fs);
59
60 if (current->files && atomic_read(¤t->files->count) > 1)
61 sbytes += kobjsize(current->files);
62 else
63 bytes += kobjsize(current->files);
64
65 if (current->sighand && refcount_read(¤t->sighand->count) > 1)
66 sbytes += kobjsize(current->sighand);
67 else
68 bytes += kobjsize(current->sighand);
69
70 bytes += kobjsize(current); /* includes kernel stack */
71
72 seq_printf(m,
73 "Mem:\t%8lu bytes\n"
74 "Slack:\t%8lu bytes\n"
75 "Shared:\t%8lu bytes\n",
76 bytes, slack, sbytes);
77
78 mmap_read_unlock(mm);
79 }
80
task_vsize(struct mm_struct * mm)81 unsigned long task_vsize(struct mm_struct *mm)
82 {
83 VMA_ITERATOR(vmi, mm, 0);
84 struct vm_area_struct *vma;
85 unsigned long vsize = 0;
86
87 mmap_read_lock(mm);
88 for_each_vma(vmi, vma)
89 vsize += vma->vm_end - vma->vm_start;
90 mmap_read_unlock(mm);
91 return vsize;
92 }
93
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)94 unsigned long task_statm(struct mm_struct *mm,
95 unsigned long *shared, unsigned long *text,
96 unsigned long *data, unsigned long *resident)
97 {
98 VMA_ITERATOR(vmi, mm, 0);
99 struct vm_area_struct *vma;
100 struct vm_region *region;
101 unsigned long size = kobjsize(mm);
102
103 mmap_read_lock(mm);
104 for_each_vma(vmi, vma) {
105 size += kobjsize(vma);
106 region = vma->vm_region;
107 if (region) {
108 size += kobjsize(region);
109 size += region->vm_end - region->vm_start;
110 }
111 }
112
113 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
114 >> PAGE_SHIFT;
115 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
116 >> PAGE_SHIFT;
117 mmap_read_unlock(mm);
118 size >>= PAGE_SHIFT;
119 size += *text + *data;
120 *resident = size;
121 return size;
122 }
123
is_stack(struct vm_area_struct * vma)124 static int is_stack(struct vm_area_struct *vma)
125 {
126 struct mm_struct *mm = vma->vm_mm;
127
128 /*
129 * We make no effort to guess what a given thread considers to be
130 * its "stack". It's not even well-defined for programs written
131 * languages like Go.
132 */
133 return vma->vm_start <= mm->start_stack &&
134 vma->vm_end >= mm->start_stack;
135 }
136
137 /*
138 * display a single VMA to a sequenced file
139 */
nommu_vma_show(struct seq_file * m,struct vm_area_struct * vma)140 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
141 {
142 struct mm_struct *mm = vma->vm_mm;
143 unsigned long ino = 0;
144 struct file *file;
145 dev_t dev = 0;
146 int flags;
147 unsigned long long pgoff = 0;
148
149 flags = vma->vm_flags;
150 file = vma->vm_file;
151
152 if (file) {
153 struct inode *inode = file_inode(vma->vm_file);
154 dev = inode->i_sb->s_dev;
155 ino = inode->i_ino;
156 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
157 }
158
159 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
160 seq_printf(m,
161 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
162 vma->vm_start,
163 vma->vm_end,
164 flags & VM_READ ? 'r' : '-',
165 flags & VM_WRITE ? 'w' : '-',
166 flags & VM_EXEC ? 'x' : '-',
167 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
168 pgoff,
169 MAJOR(dev), MINOR(dev), ino);
170
171 if (file) {
172 seq_pad(m, ' ');
173 seq_file_path(m, file, "");
174 } else if (mm && is_stack(vma)) {
175 seq_pad(m, ' ');
176 seq_puts(m, "[stack]");
177 }
178
179 seq_putc(m, '\n');
180 return 0;
181 }
182
183 /*
184 * display mapping lines for a particular process's /proc/pid/maps
185 */
show_map(struct seq_file * m,void * _p)186 static int show_map(struct seq_file *m, void *_p)
187 {
188 return nommu_vma_show(m, _p);
189 }
190
m_start(struct seq_file * m,loff_t * pos)191 static void *m_start(struct seq_file *m, loff_t *pos)
192 {
193 struct proc_maps_private *priv = m->private;
194 struct mm_struct *mm;
195 struct vm_area_struct *vma;
196 unsigned long addr = *pos;
197
198 /* See m_next(). Zero at the start or after lseek. */
199 if (addr == -1UL)
200 return NULL;
201
202 /* pin the task and mm whilst we play with them */
203 priv->task = get_proc_task(priv->inode);
204 if (!priv->task)
205 return ERR_PTR(-ESRCH);
206
207 mm = priv->mm;
208 if (!mm || !mmget_not_zero(mm))
209 return NULL;
210
211 if (mmap_read_lock_killable(mm)) {
212 mmput(mm);
213 return ERR_PTR(-EINTR);
214 }
215
216 /* start the next element from addr */
217 vma = find_vma(mm, addr);
218 if (vma)
219 return vma;
220
221 mmap_read_unlock(mm);
222 mmput(mm);
223 return NULL;
224 }
225
m_stop(struct seq_file * m,void * _vml)226 static void m_stop(struct seq_file *m, void *_vml)
227 {
228 struct proc_maps_private *priv = m->private;
229
230 if (!IS_ERR_OR_NULL(_vml)) {
231 mmap_read_unlock(priv->mm);
232 mmput(priv->mm);
233 }
234 if (priv->task) {
235 put_task_struct(priv->task);
236 priv->task = NULL;
237 }
238 }
239
m_next(struct seq_file * m,void * _p,loff_t * pos)240 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
241 {
242 struct vm_area_struct *vma = _p;
243
244 *pos = vma->vm_end;
245 return find_vma(vma->vm_mm, vma->vm_end);
246 }
247
248 static const struct seq_operations proc_pid_maps_ops = {
249 .start = m_start,
250 .next = m_next,
251 .stop = m_stop,
252 .show = show_map
253 };
254
maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)255 static int maps_open(struct inode *inode, struct file *file,
256 const struct seq_operations *ops)
257 {
258 struct proc_maps_private *priv;
259
260 priv = __seq_open_private(file, ops, sizeof(*priv));
261 if (!priv)
262 return -ENOMEM;
263
264 priv->inode = inode;
265 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
266 if (IS_ERR(priv->mm)) {
267 int err = PTR_ERR(priv->mm);
268
269 seq_release_private(inode, file);
270 return err;
271 }
272
273 return 0;
274 }
275
276
map_release(struct inode * inode,struct file * file)277 static int map_release(struct inode *inode, struct file *file)
278 {
279 struct seq_file *seq = file->private_data;
280 struct proc_maps_private *priv = seq->private;
281
282 if (priv->mm)
283 mmdrop(priv->mm);
284
285 return seq_release_private(inode, file);
286 }
287
pid_maps_open(struct inode * inode,struct file * file)288 static int pid_maps_open(struct inode *inode, struct file *file)
289 {
290 return maps_open(inode, file, &proc_pid_maps_ops);
291 }
292
293 const struct file_operations proc_pid_maps_operations = {
294 .open = pid_maps_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = map_release,
298 };
299
300