Creation of Cybook 2416 (actually Gen4) repository
This commit is contained in:
17
fs/proc/Makefile
Normal file
17
fs/proc/Makefile
Normal file
@@ -0,0 +1,17 @@
|
||||
#
|
||||
# Makefile for the Linux proc filesystem routines.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
|
||||
proc-y := nommu.o task_nommu.o
|
||||
proc-$(CONFIG_MMU) := mmu.o task_mmu.o
|
||||
|
||||
proc-y += inode.o root.o base.o generic.o array.o \
|
||||
proc_tty.o proc_misc.o
|
||||
|
||||
proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o
|
||||
proc-$(CONFIG_PROC_KCORE) += kcore.o
|
||||
proc-$(CONFIG_PROC_VMCORE) += vmcore.o
|
||||
proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o
|
||||
proc-$(CONFIG_PRINTK) += kmsg.o
|
||||
523
fs/proc/array.c
Normal file
523
fs/proc/array.c
Normal file
@@ -0,0 +1,523 @@
|
||||
/*
|
||||
* linux/fs/proc/array.c
|
||||
*
|
||||
* Copyright (C) 1992 by Linus Torvalds
|
||||
* based on ideas by Darren Senn
|
||||
*
|
||||
* Fixes:
|
||||
* Michael. K. Johnson: stat,statm extensions.
|
||||
* <johnsonm@stolaf.edu>
|
||||
*
|
||||
* Pauline Middelink : Made cmdline,envline only break at '\0's, to
|
||||
* make sure SET_PROCTITLE works. Also removed
|
||||
* bad '!' which forced address recalculation for
|
||||
* EVERY character on the current page.
|
||||
* <middelin@polyware.iaf.nl>
|
||||
*
|
||||
* Danny ter Haar : added cpuinfo
|
||||
* <dth@cistron.nl>
|
||||
*
|
||||
* Alessandro Rubini : profile extension.
|
||||
* <rubini@ipvvis.unipv.it>
|
||||
*
|
||||
* Jeff Tranter : added BogoMips field to cpuinfo
|
||||
* <Jeff_Tranter@Mitel.COM>
|
||||
*
|
||||
* Bruno Haible : remove 4K limit for the maps file
|
||||
* <haible@ma2s2.mathematik.uni-karlsruhe.de>
|
||||
*
|
||||
* Yves Arrouye : remove removal of trailing spaces in get_array.
|
||||
* <Yves.Arrouye@marin.fdn.fr>
|
||||
*
|
||||
* Jerome Forissier : added per-CPU time information to /proc/stat
|
||||
* and /proc/<pid>/cpu extension
|
||||
* <forissier@isia.cma.fr>
|
||||
* - Incorporation and non-SMP safe operation
|
||||
* of forissier patch in 2.1.78 by
|
||||
* Hans Marcus <crowbar@concepts.nl>
|
||||
*
|
||||
* aeb@cwi.nl : /proc/partitions
|
||||
*
|
||||
*
|
||||
* Alan Cox : security fixes.
|
||||
* <Alan.Cox@linux.org>
|
||||
*
|
||||
* Al Viro : safe handling of mm_struct
|
||||
*
|
||||
* Gerhard Wichert : added BIGMEM support
|
||||
* Siemens AG <Gerhard.Wichert@pdb.siemens.de>
|
||||
*
|
||||
* Al Viro & Jeff Garzik : moved most of the thing into base.c and
|
||||
* : proc_misc.c. The rest may eventually go into
|
||||
* : base.c too.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/times.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/delayacct.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#include "internal.h"
|
||||
|
||||
/* Gcc optimizes away "strlen(x)" for constant x */
|
||||
#define ADDBUF(buffer, string) \
|
||||
do { memcpy(buffer, string, strlen(string)); \
|
||||
buffer += strlen(string); } while (0)
|
||||
|
||||
static inline char * task_name(struct task_struct *p, char * buf)
|
||||
{
|
||||
int i;
|
||||
char * name;
|
||||
char tcomm[sizeof(p->comm)];
|
||||
|
||||
get_task_comm(tcomm, p);
|
||||
|
||||
ADDBUF(buf, "Name:\t");
|
||||
name = tcomm;
|
||||
i = sizeof(tcomm);
|
||||
do {
|
||||
unsigned char c = *name;
|
||||
name++;
|
||||
i--;
|
||||
*buf = c;
|
||||
if (!c)
|
||||
break;
|
||||
if (c == '\\') {
|
||||
buf[1] = c;
|
||||
buf += 2;
|
||||
continue;
|
||||
}
|
||||
if (c == '\n') {
|
||||
buf[0] = '\\';
|
||||
buf[1] = 'n';
|
||||
buf += 2;
|
||||
continue;
|
||||
}
|
||||
buf++;
|
||||
} while (i);
|
||||
*buf = '\n';
|
||||
return buf+1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The task state array is a strange "bitmap" of
|
||||
* reasons to sleep. Thus "running" is zero, and
|
||||
* you can test for combinations of others with
|
||||
* simple bit tests.
|
||||
*/
|
||||
static const char *task_state_array[] = {
|
||||
"R (running)", /* 0 */
|
||||
"S (sleeping)", /* 1 */
|
||||
"D (disk sleep)", /* 2 */
|
||||
"T (stopped)", /* 4 */
|
||||
"T (tracing stop)", /* 8 */
|
||||
"Z (zombie)", /* 16 */
|
||||
"X (dead)" /* 32 */
|
||||
};
|
||||
|
||||
static inline const char * get_task_state(struct task_struct *tsk)
|
||||
{
|
||||
unsigned int state = (tsk->state & (TASK_RUNNING |
|
||||
TASK_INTERRUPTIBLE |
|
||||
TASK_UNINTERRUPTIBLE |
|
||||
TASK_STOPPED |
|
||||
TASK_TRACED)) |
|
||||
(tsk->exit_state & (EXIT_ZOMBIE |
|
||||
EXIT_DEAD));
|
||||
const char **p = &task_state_array[0];
|
||||
|
||||
while (state) {
|
||||
p++;
|
||||
state >>= 1;
|
||||
}
|
||||
return *p;
|
||||
}
|
||||
|
||||
static inline char * task_state(struct task_struct *p, char *buffer)
|
||||
{
|
||||
struct group_info *group_info;
|
||||
int g;
|
||||
struct fdtable *fdt = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
buffer += sprintf(buffer,
|
||||
"State:\t%s\n"
|
||||
"Tgid:\t%d\n"
|
||||
"Pid:\t%d\n"
|
||||
"PPid:\t%d\n"
|
||||
"TracerPid:\t%d\n"
|
||||
"Uid:\t%d\t%d\t%d\t%d\n"
|
||||
"Gid:\t%d\t%d\t%d\t%d\n",
|
||||
get_task_state(p),
|
||||
p->tgid, p->pid,
|
||||
pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
|
||||
pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
|
||||
p->uid, p->euid, p->suid, p->fsuid,
|
||||
p->gid, p->egid, p->sgid, p->fsgid);
|
||||
|
||||
task_lock(p);
|
||||
if (p->files)
|
||||
fdt = files_fdtable(p->files);
|
||||
buffer += sprintf(buffer,
|
||||
"FDSize:\t%d\n"
|
||||
"Groups:\t",
|
||||
fdt ? fdt->max_fds : 0);
|
||||
rcu_read_unlock();
|
||||
|
||||
group_info = p->group_info;
|
||||
get_group_info(group_info);
|
||||
task_unlock(p);
|
||||
|
||||
for (g = 0; g < min(group_info->ngroups,NGROUPS_SMALL); g++)
|
||||
buffer += sprintf(buffer, "%d ", GROUP_AT(group_info,g));
|
||||
put_group_info(group_info);
|
||||
|
||||
buffer += sprintf(buffer, "\n");
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static char * render_sigset_t(const char *header, sigset_t *set, char *buffer)
|
||||
{
|
||||
int i, len;
|
||||
|
||||
len = strlen(header);
|
||||
memcpy(buffer, header, len);
|
||||
buffer += len;
|
||||
|
||||
i = _NSIG;
|
||||
do {
|
||||
int x = 0;
|
||||
|
||||
i -= 4;
|
||||
if (sigismember(set, i+1)) x |= 1;
|
||||
if (sigismember(set, i+2)) x |= 2;
|
||||
if (sigismember(set, i+3)) x |= 4;
|
||||
if (sigismember(set, i+4)) x |= 8;
|
||||
*buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
|
||||
} while (i >= 4);
|
||||
|
||||
*buffer++ = '\n';
|
||||
*buffer = 0;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
|
||||
sigset_t *catch)
|
||||
{
|
||||
struct k_sigaction *k;
|
||||
int i;
|
||||
|
||||
k = p->sighand->action;
|
||||
for (i = 1; i <= _NSIG; ++i, ++k) {
|
||||
if (k->sa.sa_handler == SIG_IGN)
|
||||
sigaddset(ign, i);
|
||||
else if (k->sa.sa_handler != SIG_DFL)
|
||||
sigaddset(catch, i);
|
||||
}
|
||||
}
|
||||
|
||||
static inline char * task_sig(struct task_struct *p, char *buffer)
|
||||
{
|
||||
unsigned long flags;
|
||||
sigset_t pending, shpending, blocked, ignored, caught;
|
||||
int num_threads = 0;
|
||||
unsigned long qsize = 0;
|
||||
unsigned long qlim = 0;
|
||||
|
||||
sigemptyset(&pending);
|
||||
sigemptyset(&shpending);
|
||||
sigemptyset(&blocked);
|
||||
sigemptyset(&ignored);
|
||||
sigemptyset(&caught);
|
||||
|
||||
rcu_read_lock();
|
||||
if (lock_task_sighand(p, &flags)) {
|
||||
pending = p->pending.signal;
|
||||
shpending = p->signal->shared_pending.signal;
|
||||
blocked = p->blocked;
|
||||
collect_sigign_sigcatch(p, &ignored, &caught);
|
||||
num_threads = atomic_read(&p->signal->count);
|
||||
qsize = atomic_read(&p->user->sigpending);
|
||||
qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
buffer += sprintf(buffer, "Threads:\t%d\n", num_threads);
|
||||
buffer += sprintf(buffer, "SigQ:\t%lu/%lu\n", qsize, qlim);
|
||||
|
||||
/* render them all */
|
||||
buffer = render_sigset_t("SigPnd:\t", &pending, buffer);
|
||||
buffer = render_sigset_t("ShdPnd:\t", &shpending, buffer);
|
||||
buffer = render_sigset_t("SigBlk:\t", &blocked, buffer);
|
||||
buffer = render_sigset_t("SigIgn:\t", &ignored, buffer);
|
||||
buffer = render_sigset_t("SigCgt:\t", &caught, buffer);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static inline char *task_cap(struct task_struct *p, char *buffer)
|
||||
{
|
||||
return buffer + sprintf(buffer, "CapInh:\t%016x\n"
|
||||
"CapPrm:\t%016x\n"
|
||||
"CapEff:\t%016x\n",
|
||||
cap_t(p->cap_inheritable),
|
||||
cap_t(p->cap_permitted),
|
||||
cap_t(p->cap_effective));
|
||||
}
|
||||
|
||||
int proc_pid_status(struct task_struct *task, char * buffer)
|
||||
{
|
||||
char * orig = buffer;
|
||||
struct mm_struct *mm = get_task_mm(task);
|
||||
|
||||
buffer = task_name(task, buffer);
|
||||
buffer = task_state(task, buffer);
|
||||
|
||||
if (mm) {
|
||||
buffer = task_mem(mm, buffer);
|
||||
mmput(mm);
|
||||
}
|
||||
buffer = task_sig(task, buffer);
|
||||
buffer = task_cap(task, buffer);
|
||||
buffer = cpuset_task_status_allowed(task, buffer);
|
||||
#if defined(CONFIG_S390)
|
||||
buffer = task_show_regs(task, buffer);
|
||||
#endif
|
||||
return buffer - orig;
|
||||
}
|
||||
|
||||
static clock_t task_utime(struct task_struct *p)
|
||||
{
|
||||
clock_t utime = cputime_to_clock_t(p->utime),
|
||||
total = utime + cputime_to_clock_t(p->stime);
|
||||
u64 temp;
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting:
|
||||
*/
|
||||
temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
|
||||
|
||||
if (total) {
|
||||
temp *= utime;
|
||||
do_div(temp, total);
|
||||
}
|
||||
utime = (clock_t)temp;
|
||||
|
||||
return utime;
|
||||
}
|
||||
|
||||
static clock_t task_stime(struct task_struct *p)
|
||||
{
|
||||
clock_t stime = cputime_to_clock_t(p->stime);
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting. (we subtract utime from
|
||||
* the total, to make sure the total observed by userspace
|
||||
* grows monotonically - apps rely on that):
|
||||
*/
|
||||
stime = nsec_to_clock_t(p->se.sum_exec_runtime) - task_utime(p);
|
||||
|
||||
return stime;
|
||||
}
|
||||
|
||||
|
||||
static int do_task_stat(struct task_struct *task, char * buffer, int whole)
|
||||
{
|
||||
unsigned long vsize, eip, esp, wchan = ~0UL;
|
||||
long priority, nice;
|
||||
int tty_pgrp = -1, tty_nr = 0;
|
||||
sigset_t sigign, sigcatch;
|
||||
char state;
|
||||
int res;
|
||||
pid_t ppid = 0, pgid = -1, sid = -1;
|
||||
int num_threads = 0;
|
||||
struct mm_struct *mm;
|
||||
unsigned long long start_time;
|
||||
unsigned long cmin_flt = 0, cmaj_flt = 0;
|
||||
unsigned long min_flt = 0, maj_flt = 0;
|
||||
cputime_t cutime, cstime;
|
||||
clock_t utime, stime;
|
||||
unsigned long rsslim = 0;
|
||||
char tcomm[sizeof(task->comm)];
|
||||
unsigned long flags;
|
||||
|
||||
state = *get_task_state(task);
|
||||
vsize = eip = esp = 0;
|
||||
mm = get_task_mm(task);
|
||||
if (mm) {
|
||||
vsize = task_vsize(mm);
|
||||
eip = KSTK_EIP(task);
|
||||
esp = KSTK_ESP(task);
|
||||
}
|
||||
|
||||
get_task_comm(tcomm, task);
|
||||
|
||||
sigemptyset(&sigign);
|
||||
sigemptyset(&sigcatch);
|
||||
cutime = cstime = cputime_zero;
|
||||
utime = stime = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
if (lock_task_sighand(task, &flags)) {
|
||||
struct signal_struct *sig = task->signal;
|
||||
|
||||
if (sig->tty) {
|
||||
tty_pgrp = pid_nr(sig->tty->pgrp);
|
||||
tty_nr = new_encode_dev(tty_devnum(sig->tty));
|
||||
}
|
||||
|
||||
num_threads = atomic_read(&sig->count);
|
||||
collect_sigign_sigcatch(task, &sigign, &sigcatch);
|
||||
|
||||
cmin_flt = sig->cmin_flt;
|
||||
cmaj_flt = sig->cmaj_flt;
|
||||
cutime = sig->cutime;
|
||||
cstime = sig->cstime;
|
||||
rsslim = sig->rlim[RLIMIT_RSS].rlim_cur;
|
||||
|
||||
/* add up live thread stats at the group level */
|
||||
if (whole) {
|
||||
struct task_struct *t = task;
|
||||
do {
|
||||
min_flt += t->min_flt;
|
||||
maj_flt += t->maj_flt;
|
||||
utime += task_utime(t);
|
||||
stime += task_stime(t);
|
||||
t = next_thread(t);
|
||||
} while (t != task);
|
||||
|
||||
min_flt += sig->min_flt;
|
||||
maj_flt += sig->maj_flt;
|
||||
utime += cputime_to_clock_t(sig->utime);
|
||||
stime += cputime_to_clock_t(sig->stime);
|
||||
}
|
||||
|
||||
sid = signal_session(sig);
|
||||
pgid = process_group(task);
|
||||
ppid = rcu_dereference(task->real_parent)->tgid;
|
||||
|
||||
unlock_task_sighand(task, &flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!whole || num_threads<2)
|
||||
wchan = get_wchan(task);
|
||||
if (!whole) {
|
||||
min_flt = task->min_flt;
|
||||
maj_flt = task->maj_flt;
|
||||
utime = task_utime(task);
|
||||
stime = task_stime(task);
|
||||
}
|
||||
|
||||
/* scale priority and nice values from timeslices to -20..20 */
|
||||
/* to make it look like a "normal" Unix priority/nice value */
|
||||
priority = task_prio(task);
|
||||
nice = task_nice(task);
|
||||
|
||||
/* Temporary variable needed for gcc-2.96 */
|
||||
/* convert timespec -> nsec*/
|
||||
start_time = (unsigned long long)task->start_time.tv_sec * NSEC_PER_SEC
|
||||
+ task->start_time.tv_nsec;
|
||||
/* convert nsec -> ticks */
|
||||
start_time = nsec_to_clock_t(start_time);
|
||||
|
||||
res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
|
||||
%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
|
||||
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %llu\n",
|
||||
task->pid,
|
||||
tcomm,
|
||||
state,
|
||||
ppid,
|
||||
pgid,
|
||||
sid,
|
||||
tty_nr,
|
||||
tty_pgrp,
|
||||
task->flags,
|
||||
min_flt,
|
||||
cmin_flt,
|
||||
maj_flt,
|
||||
cmaj_flt,
|
||||
utime,
|
||||
stime,
|
||||
cputime_to_clock_t(cutime),
|
||||
cputime_to_clock_t(cstime),
|
||||
priority,
|
||||
nice,
|
||||
num_threads,
|
||||
start_time,
|
||||
vsize,
|
||||
mm ? get_mm_rss(mm) : 0,
|
||||
rsslim,
|
||||
mm ? mm->start_code : 0,
|
||||
mm ? mm->end_code : 0,
|
||||
mm ? mm->start_stack : 0,
|
||||
esp,
|
||||
eip,
|
||||
/* The signal information here is obsolete.
|
||||
* It must be decimal for Linux 2.0 compatibility.
|
||||
* Use /proc/#/status for real-time signals.
|
||||
*/
|
||||
task->pending.signal.sig[0] & 0x7fffffffUL,
|
||||
task->blocked.sig[0] & 0x7fffffffUL,
|
||||
sigign .sig[0] & 0x7fffffffUL,
|
||||
sigcatch .sig[0] & 0x7fffffffUL,
|
||||
wchan,
|
||||
0UL,
|
||||
0UL,
|
||||
task->exit_signal,
|
||||
task_cpu(task),
|
||||
task->rt_priority,
|
||||
task->policy,
|
||||
(unsigned long long)delayacct_blkio_ticks(task));
|
||||
if(mm)
|
||||
mmput(mm);
|
||||
return res;
|
||||
}
|
||||
|
||||
int proc_tid_stat(struct task_struct *task, char * buffer)
|
||||
{
|
||||
return do_task_stat(task, buffer, 0);
|
||||
}
|
||||
|
||||
int proc_tgid_stat(struct task_struct *task, char * buffer)
|
||||
{
|
||||
return do_task_stat(task, buffer, 1);
|
||||
}
|
||||
|
||||
int proc_pid_statm(struct task_struct *task, char *buffer)
|
||||
{
|
||||
int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
|
||||
struct mm_struct *mm = get_task_mm(task);
|
||||
|
||||
if (mm) {
|
||||
size = task_statm(mm, &shared, &text, &data, &resident);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
return sprintf(buffer,"%d %d %d %d %d %d %d\n",
|
||||
size, resident, shared, text, lib, data, 0);
|
||||
}
|
||||
2480
fs/proc/base.c
Normal file
2480
fs/proc/base.c
Normal file
File diff suppressed because it is too large
Load Diff
745
fs/proc/generic.c
Normal file
745
fs/proc/generic.c
Normal file
@@ -0,0 +1,745 @@
|
||||
/*
|
||||
* proc/fs/generic.c --- generic routines for the proc-fs
|
||||
*
|
||||
* This file contains generic proc-fs routines for handling
|
||||
* directories and files.
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds.
|
||||
* Copyright (C) 1997 Theodore Ts'o
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static ssize_t proc_file_read(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos);
|
||||
static ssize_t proc_file_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *ppos);
|
||||
static loff_t proc_file_lseek(struct file *, loff_t, int);
|
||||
|
||||
DEFINE_SPINLOCK(proc_subdir_lock);
|
||||
|
||||
static int proc_match(int len, const char *name, struct proc_dir_entry *de)
|
||||
{
|
||||
if (de->namelen != len)
|
||||
return 0;
|
||||
return !memcmp(name, de->name, len);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_file_operations = {
|
||||
.llseek = proc_file_lseek,
|
||||
.read = proc_file_read,
|
||||
.write = proc_file_write,
|
||||
};
|
||||
|
||||
/* buffer size is one page but our output routines use some slack for overruns */
|
||||
#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
|
||||
|
||||
static ssize_t
|
||||
proc_file_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct inode * inode = file->f_path.dentry->d_inode;
|
||||
char *page;
|
||||
ssize_t retval=0;
|
||||
int eof=0;
|
||||
ssize_t n, count;
|
||||
char *start;
|
||||
struct proc_dir_entry * dp;
|
||||
unsigned long long pos;
|
||||
|
||||
/*
|
||||
* Gaah, please just use "seq_file" instead. The legacy /proc
|
||||
* interfaces cut loff_t down to off_t for reads, and ignore
|
||||
* the offset entirely for writes..
|
||||
*/
|
||||
pos = *ppos;
|
||||
if (pos > MAX_NON_LFS)
|
||||
return 0;
|
||||
if (nbytes > MAX_NON_LFS - pos)
|
||||
nbytes = MAX_NON_LFS - pos;
|
||||
|
||||
dp = PDE(inode);
|
||||
if (!(page = (char*) __get_free_page(GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
while ((nbytes > 0) && !eof) {
|
||||
count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
|
||||
|
||||
start = NULL;
|
||||
if (dp->get_info) {
|
||||
/* Handle old net routines */
|
||||
n = dp->get_info(page, &start, *ppos, count);
|
||||
if (n < count)
|
||||
eof = 1;
|
||||
} else if (dp->read_proc) {
|
||||
/*
|
||||
* How to be a proc read function
|
||||
* ------------------------------
|
||||
* Prototype:
|
||||
* int f(char *buffer, char **start, off_t offset,
|
||||
* int count, int *peof, void *dat)
|
||||
*
|
||||
* Assume that the buffer is "count" bytes in size.
|
||||
*
|
||||
* If you know you have supplied all the data you
|
||||
* have, set *peof.
|
||||
*
|
||||
* You have three ways to return data:
|
||||
* 0) Leave *start = NULL. (This is the default.)
|
||||
* Put the data of the requested offset at that
|
||||
* offset within the buffer. Return the number (n)
|
||||
* of bytes there are from the beginning of the
|
||||
* buffer up to the last byte of data. If the
|
||||
* number of supplied bytes (= n - offset) is
|
||||
* greater than zero and you didn't signal eof
|
||||
* and the reader is prepared to take more data
|
||||
* you will be called again with the requested
|
||||
* offset advanced by the number of bytes
|
||||
* absorbed. This interface is useful for files
|
||||
* no larger than the buffer.
|
||||
* 1) Set *start = an unsigned long value less than
|
||||
* the buffer address but greater than zero.
|
||||
* Put the data of the requested offset at the
|
||||
* beginning of the buffer. Return the number of
|
||||
* bytes of data placed there. If this number is
|
||||
* greater than zero and you didn't signal eof
|
||||
* and the reader is prepared to take more data
|
||||
* you will be called again with the requested
|
||||
* offset advanced by *start. This interface is
|
||||
* useful when you have a large file consisting
|
||||
* of a series of blocks which you want to count
|
||||
* and return as wholes.
|
||||
* (Hack by Paul.Russell@rustcorp.com.au)
|
||||
* 2) Set *start = an address within the buffer.
|
||||
* Put the data of the requested offset at *start.
|
||||
* Return the number of bytes of data placed there.
|
||||
* If this number is greater than zero and you
|
||||
* didn't signal eof and the reader is prepared to
|
||||
* take more data you will be called again with the
|
||||
* requested offset advanced by the number of bytes
|
||||
* absorbed.
|
||||
*/
|
||||
n = dp->read_proc(page, &start, *ppos,
|
||||
count, &eof, dp->data);
|
||||
} else
|
||||
break;
|
||||
|
||||
if (n == 0) /* end of file */
|
||||
break;
|
||||
if (n < 0) { /* error */
|
||||
if (retval == 0)
|
||||
retval = n;
|
||||
break;
|
||||
}
|
||||
|
||||
if (start == NULL) {
|
||||
if (n > PAGE_SIZE) {
|
||||
printk(KERN_ERR
|
||||
"proc_file_read: Apparent buffer overflow!\n");
|
||||
n = PAGE_SIZE;
|
||||
}
|
||||
n -= *ppos;
|
||||
if (n <= 0)
|
||||
break;
|
||||
if (n > count)
|
||||
n = count;
|
||||
start = page + *ppos;
|
||||
} else if (start < page) {
|
||||
if (n > PAGE_SIZE) {
|
||||
printk(KERN_ERR
|
||||
"proc_file_read: Apparent buffer overflow!\n");
|
||||
n = PAGE_SIZE;
|
||||
}
|
||||
if (n > count) {
|
||||
/*
|
||||
* Don't reduce n because doing so might
|
||||
* cut off part of a data block.
|
||||
*/
|
||||
printk(KERN_WARNING
|
||||
"proc_file_read: Read count exceeded\n");
|
||||
}
|
||||
} else /* start >= page */ {
|
||||
unsigned long startoff = (unsigned long)(start - page);
|
||||
if (n > (PAGE_SIZE - startoff)) {
|
||||
printk(KERN_ERR
|
||||
"proc_file_read: Apparent buffer overflow!\n");
|
||||
n = PAGE_SIZE - startoff;
|
||||
}
|
||||
if (n > count)
|
||||
n = count;
|
||||
}
|
||||
|
||||
n -= copy_to_user(buf, start < page ? page : start, n);
|
||||
if (n == 0) {
|
||||
if (retval == 0)
|
||||
retval = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
*ppos += start < page ? (unsigned long)start : n;
|
||||
nbytes -= n;
|
||||
buf += n;
|
||||
retval += n;
|
||||
}
|
||||
free_page((unsigned long) page);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
proc_file_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct proc_dir_entry * dp;
|
||||
|
||||
dp = PDE(inode);
|
||||
|
||||
if (!dp->write_proc)
|
||||
return -EIO;
|
||||
|
||||
/* FIXME: does this routine need ppos? probably... */
|
||||
return dp->write_proc(file, buffer, count, dp->data);
|
||||
}
|
||||
|
||||
|
||||
static loff_t
|
||||
proc_file_lseek(struct file *file, loff_t offset, int orig)
|
||||
{
|
||||
loff_t retval = -EINVAL;
|
||||
switch (orig) {
|
||||
case 1:
|
||||
offset += file->f_pos;
|
||||
/* fallthrough */
|
||||
case 0:
|
||||
if (offset < 0 || offset > MAX_NON_LFS)
|
||||
break;
|
||||
file->f_pos = retval = offset;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct proc_dir_entry *de = PDE(inode);
|
||||
int error;
|
||||
|
||||
error = inode_change_ok(inode, iattr);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = inode_setattr(inode, iattr);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
de->uid = inode->i_uid;
|
||||
de->gid = inode->i_gid;
|
||||
de->mode = inode->i_mode;
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct proc_dir_entry *de = PROC_I(inode)->pde;
|
||||
if (de && de->nlink)
|
||||
inode->i_nlink = de->nlink;
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct inode_operations proc_file_inode_operations = {
|
||||
.setattr = proc_notify_change,
|
||||
};
|
||||
|
||||
/*
|
||||
* This function parses a name such as "tty/driver/serial", and
|
||||
* returns the struct proc_dir_entry for "/proc/tty/driver", and
|
||||
* returns "serial" in residual.
|
||||
*/
|
||||
static int xlate_proc_name(const char *name,
|
||||
struct proc_dir_entry **ret, const char **residual)
|
||||
{
|
||||
const char *cp = name, *next;
|
||||
struct proc_dir_entry *de;
|
||||
int len;
|
||||
int rtn = 0;
|
||||
|
||||
spin_lock(&proc_subdir_lock);
|
||||
de = &proc_root;
|
||||
while (1) {
|
||||
next = strchr(cp, '/');
|
||||
if (!next)
|
||||
break;
|
||||
|
||||
len = next - cp;
|
||||
for (de = de->subdir; de ; de = de->next) {
|
||||
if (proc_match(len, cp, de))
|
||||
break;
|
||||
}
|
||||
if (!de) {
|
||||
rtn = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
cp += len + 1;
|
||||
}
|
||||
*residual = cp;
|
||||
*ret = de;
|
||||
out:
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
return rtn;
|
||||
}
|
||||
|
||||
static DEFINE_IDR(proc_inum_idr);
|
||||
static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
|
||||
|
||||
#define PROC_DYNAMIC_FIRST 0xF0000000UL
|
||||
|
||||
/*
|
||||
* Return an inode number between PROC_DYNAMIC_FIRST and
|
||||
* 0xffffffff, or zero on failure.
|
||||
*/
|
||||
static unsigned int get_inode_number(void)
|
||||
{
|
||||
int i, inum = 0;
|
||||
int error;
|
||||
|
||||
retry:
|
||||
if (idr_pre_get(&proc_inum_idr, GFP_KERNEL) == 0)
|
||||
return 0;
|
||||
|
||||
spin_lock(&proc_inum_lock);
|
||||
error = idr_get_new(&proc_inum_idr, NULL, &i);
|
||||
spin_unlock(&proc_inum_lock);
|
||||
if (error == -EAGAIN)
|
||||
goto retry;
|
||||
else if (error)
|
||||
return 0;
|
||||
|
||||
inum = (i & MAX_ID_MASK) + PROC_DYNAMIC_FIRST;
|
||||
|
||||
/* inum will never be more than 0xf0ffffff, so no check
|
||||
* for overflow.
|
||||
*/
|
||||
|
||||
return inum;
|
||||
}
|
||||
|
||||
static void release_inode_number(unsigned int inum)
|
||||
{
|
||||
int id = (inum - PROC_DYNAMIC_FIRST) | ~MAX_ID_MASK;
|
||||
|
||||
spin_lock(&proc_inum_lock);
|
||||
idr_remove(&proc_inum_idr, id);
|
||||
spin_unlock(&proc_inum_lock);
|
||||
}
|
||||
|
||||
static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||
{
|
||||
nd_set_link(nd, PDE(dentry->d_inode)->data);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct inode_operations proc_link_inode_operations = {
|
||||
.readlink = generic_readlink,
|
||||
.follow_link = proc_follow_link,
|
||||
};
|
||||
|
||||
/*
|
||||
* As some entries in /proc are volatile, we want to
|
||||
* get rid of unused dentries. This could be made
|
||||
* smarter: we could keep a "volatile" flag in the
|
||||
* inode to indicate which ones to keep.
|
||||
*/
|
||||
static int proc_delete_dentry(struct dentry * dentry)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct dentry_operations proc_dentry_operations =
|
||||
{
|
||||
.d_delete = proc_delete_dentry,
|
||||
};
|
||||
|
||||
/*
|
||||
* Don't create negative dentries here, return -ENOENT by hand
|
||||
* instead.
|
||||
*/
|
||||
struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
|
||||
{
|
||||
struct inode *inode = NULL;
|
||||
struct proc_dir_entry * de;
|
||||
int error = -ENOENT;
|
||||
|
||||
lock_kernel();
|
||||
spin_lock(&proc_subdir_lock);
|
||||
de = PDE(dir);
|
||||
if (de) {
|
||||
for (de = de->subdir; de ; de = de->next) {
|
||||
if (de->namelen != dentry->d_name.len)
|
||||
continue;
|
||||
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
|
||||
unsigned int ino = de->low_ino;
|
||||
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
error = -EINVAL;
|
||||
inode = proc_get_inode(dir->i_sb, ino, de);
|
||||
spin_lock(&proc_subdir_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
unlock_kernel();
|
||||
|
||||
if (inode) {
|
||||
dentry->d_op = &proc_dentry_operations;
|
||||
d_add(dentry, inode);
|
||||
return NULL;
|
||||
}
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
/*
|
||||
* This returns non-zero if at EOF, so that the /proc
|
||||
* root directory can use this and check if it should
|
||||
* continue with the <pid> entries..
|
||||
*
|
||||
* Note that the VFS-layer doesn't care about the return
|
||||
* value of the readdir() call, as long as it's non-negative
|
||||
* for success..
|
||||
*/
|
||||
int proc_readdir(struct file * filp,
|
||||
void * dirent, filldir_t filldir)
|
||||
{
|
||||
struct proc_dir_entry * de;
|
||||
unsigned int ino;
|
||||
int i;
|
||||
struct inode *inode = filp->f_path.dentry->d_inode;
|
||||
int ret = 0;
|
||||
|
||||
lock_kernel();
|
||||
|
||||
ino = inode->i_ino;
|
||||
de = PDE(inode);
|
||||
if (!de) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
i = filp->f_pos;
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
|
||||
goto out;
|
||||
i++;
|
||||
filp->f_pos++;
|
||||
/* fall through */
|
||||
case 1:
|
||||
if (filldir(dirent, "..", 2, i,
|
||||
parent_ino(filp->f_path.dentry),
|
||||
DT_DIR) < 0)
|
||||
goto out;
|
||||
i++;
|
||||
filp->f_pos++;
|
||||
/* fall through */
|
||||
default:
|
||||
spin_lock(&proc_subdir_lock);
|
||||
de = de->subdir;
|
||||
i -= 2;
|
||||
for (;;) {
|
||||
if (!de) {
|
||||
ret = 1;
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
goto out;
|
||||
}
|
||||
if (!i)
|
||||
break;
|
||||
de = de->next;
|
||||
i--;
|
||||
}
|
||||
|
||||
do {
|
||||
/* filldir passes info to user space */
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
if (filldir(dirent, de->name, de->namelen, filp->f_pos,
|
||||
de->low_ino, de->mode >> 12) < 0)
|
||||
goto out;
|
||||
spin_lock(&proc_subdir_lock);
|
||||
filp->f_pos++;
|
||||
de = de->next;
|
||||
} while (de);
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
}
|
||||
ret = 1;
|
||||
out: unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the generic /proc directory operations. They
|
||||
* use the in-memory "struct proc_dir_entry" tree to parse
|
||||
* the /proc directory.
|
||||
*/
|
||||
static const struct file_operations proc_dir_operations = {
|
||||
.read = generic_read_dir,
|
||||
.readdir = proc_readdir,
|
||||
};
|
||||
|
||||
/*
|
||||
* proc directories can do almost nothing..
|
||||
*/
|
||||
static const struct inode_operations proc_dir_inode_operations = {
|
||||
.lookup = proc_lookup,
|
||||
.getattr = proc_getattr,
|
||||
.setattr = proc_notify_change,
|
||||
};
|
||||
|
||||
static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
i = get_inode_number();
|
||||
if (i == 0)
|
||||
return -EAGAIN;
|
||||
dp->low_ino = i;
|
||||
|
||||
spin_lock(&proc_subdir_lock);
|
||||
dp->next = dir->subdir;
|
||||
dp->parent = dir;
|
||||
dir->subdir = dp;
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
|
||||
if (S_ISDIR(dp->mode)) {
|
||||
if (dp->proc_iops == NULL) {
|
||||
dp->proc_fops = &proc_dir_operations;
|
||||
dp->proc_iops = &proc_dir_inode_operations;
|
||||
}
|
||||
dir->nlink++;
|
||||
} else if (S_ISLNK(dp->mode)) {
|
||||
if (dp->proc_iops == NULL)
|
||||
dp->proc_iops = &proc_link_inode_operations;
|
||||
} else if (S_ISREG(dp->mode)) {
|
||||
if (dp->proc_fops == NULL)
|
||||
dp->proc_fops = &proc_file_operations;
|
||||
if (dp->proc_iops == NULL)
|
||||
dp->proc_iops = &proc_file_inode_operations;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Kill an inode that got unregistered..
|
||||
*/
|
||||
static void proc_kill_inodes(struct proc_dir_entry *de)
|
||||
{
|
||||
struct list_head *p;
|
||||
struct super_block *sb = proc_mnt->mnt_sb;
|
||||
|
||||
/*
|
||||
* Actually it's a partial revoke().
|
||||
*/
|
||||
file_list_lock();
|
||||
list_for_each(p, &sb->s_files) {
|
||||
struct file * filp = list_entry(p, struct file, f_u.fu_list);
|
||||
struct dentry * dentry = filp->f_path.dentry;
|
||||
struct inode * inode;
|
||||
const struct file_operations *fops;
|
||||
|
||||
if (dentry->d_op != &proc_dentry_operations)
|
||||
continue;
|
||||
inode = dentry->d_inode;
|
||||
if (PDE(inode) != de)
|
||||
continue;
|
||||
fops = filp->f_op;
|
||||
filp->f_op = NULL;
|
||||
fops_put(fops);
|
||||
}
|
||||
file_list_unlock();
|
||||
}
|
||||
|
||||
static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
|
||||
const char *name,
|
||||
mode_t mode,
|
||||
nlink_t nlink)
|
||||
{
|
||||
struct proc_dir_entry *ent = NULL;
|
||||
const char *fn = name;
|
||||
int len;
|
||||
|
||||
/* make sure name is valid */
|
||||
if (!name || !strlen(name)) goto out;
|
||||
|
||||
if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
|
||||
goto out;
|
||||
|
||||
/* At this point there must not be any '/' characters beyond *fn */
|
||||
if (strchr(fn, '/'))
|
||||
goto out;
|
||||
|
||||
len = strlen(fn);
|
||||
|
||||
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
|
||||
if (!ent) goto out;
|
||||
|
||||
memset(ent, 0, sizeof(struct proc_dir_entry));
|
||||
memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1);
|
||||
ent->name = ((char *) ent) + sizeof(*ent);
|
||||
ent->namelen = len;
|
||||
ent->mode = mode;
|
||||
ent->nlink = nlink;
|
||||
out:
|
||||
return ent;
|
||||
}
|
||||
|
||||
struct proc_dir_entry *proc_symlink(const char *name,
|
||||
struct proc_dir_entry *parent, const char *dest)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
|
||||
ent = proc_create(&parent,name,
|
||||
(S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
|
||||
|
||||
if (ent) {
|
||||
ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
|
||||
if (ent->data) {
|
||||
strcpy((char*)ent->data,dest);
|
||||
if (proc_register(parent, ent) < 0) {
|
||||
kfree(ent->data);
|
||||
kfree(ent);
|
||||
ent = NULL;
|
||||
}
|
||||
} else {
|
||||
kfree(ent);
|
||||
ent = NULL;
|
||||
}
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
|
||||
struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
|
||||
struct proc_dir_entry *parent)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
|
||||
ent = proc_create(&parent, name, S_IFDIR | mode, 2);
|
||||
if (ent) {
|
||||
ent->proc_fops = &proc_dir_operations;
|
||||
ent->proc_iops = &proc_dir_inode_operations;
|
||||
|
||||
if (proc_register(parent, ent) < 0) {
|
||||
kfree(ent);
|
||||
ent = NULL;
|
||||
}
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
|
||||
struct proc_dir_entry *proc_mkdir(const char *name,
|
||||
struct proc_dir_entry *parent)
|
||||
{
|
||||
return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
|
||||
}
|
||||
|
||||
struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
|
||||
struct proc_dir_entry *parent)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
nlink_t nlink;
|
||||
|
||||
if (S_ISDIR(mode)) {
|
||||
if ((mode & S_IALLUGO) == 0)
|
||||
mode |= S_IRUGO | S_IXUGO;
|
||||
nlink = 2;
|
||||
} else {
|
||||
if ((mode & S_IFMT) == 0)
|
||||
mode |= S_IFREG;
|
||||
if ((mode & S_IALLUGO) == 0)
|
||||
mode |= S_IRUGO;
|
||||
nlink = 1;
|
||||
}
|
||||
|
||||
ent = proc_create(&parent,name,mode,nlink);
|
||||
if (ent) {
|
||||
if (S_ISDIR(mode)) {
|
||||
ent->proc_fops = &proc_dir_operations;
|
||||
ent->proc_iops = &proc_dir_inode_operations;
|
||||
}
|
||||
if (proc_register(parent, ent) < 0) {
|
||||
kfree(ent);
|
||||
ent = NULL;
|
||||
}
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
|
||||
void free_proc_entry(struct proc_dir_entry *de)
|
||||
{
|
||||
unsigned int ino = de->low_ino;
|
||||
|
||||
if (ino < PROC_DYNAMIC_FIRST)
|
||||
return;
|
||||
|
||||
release_inode_number(ino);
|
||||
|
||||
if (S_ISLNK(de->mode) && de->data)
|
||||
kfree(de->data);
|
||||
kfree(de);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a /proc entry and free it if it's not currently in use.
|
||||
* If it is in use, we set the 'deleted' flag.
|
||||
*/
|
||||
void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
||||
{
|
||||
struct proc_dir_entry **p;
|
||||
struct proc_dir_entry *de;
|
||||
const char *fn = name;
|
||||
int len;
|
||||
|
||||
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
|
||||
goto out;
|
||||
len = strlen(fn);
|
||||
|
||||
spin_lock(&proc_subdir_lock);
|
||||
for (p = &parent->subdir; *p; p=&(*p)->next ) {
|
||||
if (!proc_match(len, fn, *p))
|
||||
continue;
|
||||
de = *p;
|
||||
*p = de->next;
|
||||
de->next = NULL;
|
||||
if (S_ISDIR(de->mode))
|
||||
parent->nlink--;
|
||||
proc_kill_inodes(de);
|
||||
de->nlink = 0;
|
||||
WARN_ON(de->subdir);
|
||||
if (!atomic_read(&de->count))
|
||||
free_proc_entry(de);
|
||||
else {
|
||||
de->deleted = 1;
|
||||
printk("remove_proc_entry: %s/%s busy, count=%d\n",
|
||||
parent->name, de->name, atomic_read(&de->count));
|
||||
}
|
||||
break;
|
||||
}
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
out:
|
||||
return;
|
||||
}
|
||||
14
fs/proc/inode-alloc.txt
Normal file
14
fs/proc/inode-alloc.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
Current inode allocations in the proc-fs (hex-numbers):
|
||||
|
||||
00000000 reserved
|
||||
00000001-00000fff static entries (goners)
|
||||
001 root-ino
|
||||
|
||||
00001000-00001fff unused
|
||||
0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff
|
||||
80000000-efffffff unused
|
||||
f0000000-ffffffff dynamic entries
|
||||
|
||||
Goal:
|
||||
a) once we'll split the thing into several virtual filesystems we
|
||||
will get rid of magical ranges (and this file, BTW).
|
||||
218
fs/proc/inode.c
Normal file
218
fs/proc/inode.c
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
* linux/fs/proc/inode.c
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/time.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/smp_lock.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static inline struct proc_dir_entry * de_get(struct proc_dir_entry *de)
|
||||
{
|
||||
if (de)
|
||||
atomic_inc(&de->count);
|
||||
return de;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrements the use count and checks for deferred deletion.
|
||||
*/
|
||||
static void de_put(struct proc_dir_entry *de)
|
||||
{
|
||||
if (de) {
|
||||
lock_kernel();
|
||||
if (!atomic_read(&de->count)) {
|
||||
printk("de_put: entry %s already free!\n", de->name);
|
||||
unlock_kernel();
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&de->count)) {
|
||||
if (de->deleted) {
|
||||
printk("de_put: deferred delete of %s\n",
|
||||
de->name);
|
||||
free_proc_entry(de);
|
||||
}
|
||||
}
|
||||
unlock_kernel();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrement the use count of the proc_dir_entry.
|
||||
*/
|
||||
static void proc_delete_inode(struct inode *inode)
|
||||
{
|
||||
struct proc_dir_entry *de;
|
||||
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
|
||||
/* Stop tracking associated processes */
|
||||
put_pid(PROC_I(inode)->pid);
|
||||
|
||||
/* Let go of any associated proc directory entry */
|
||||
de = PROC_I(inode)->pde;
|
||||
if (de) {
|
||||
if (de->owner)
|
||||
module_put(de->owner);
|
||||
de_put(de);
|
||||
}
|
||||
clear_inode(inode);
|
||||
}
|
||||
|
||||
struct vfsmount *proc_mnt;
|
||||
|
||||
static void proc_read_inode(struct inode * inode)
|
||||
{
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
}
|
||||
|
||||
static struct kmem_cache * proc_inode_cachep;
|
||||
|
||||
static struct inode *proc_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
struct proc_inode *ei;
|
||||
struct inode *inode;
|
||||
|
||||
ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
|
||||
if (!ei)
|
||||
return NULL;
|
||||
ei->pid = NULL;
|
||||
ei->fd = 0;
|
||||
ei->op.proc_get_link = NULL;
|
||||
ei->pde = NULL;
|
||||
inode = &ei->vfs_inode;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
return inode;
|
||||
}
|
||||
|
||||
static void proc_destroy_inode(struct inode *inode)
|
||||
{
|
||||
kmem_cache_free(proc_inode_cachep, PROC_I(inode));
|
||||
}
|
||||
|
||||
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
|
||||
{
|
||||
struct proc_inode *ei = (struct proc_inode *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
int __init proc_init_inodecache(void)
|
||||
{
|
||||
proc_inode_cachep = kmem_cache_create("proc_inode_cache",
|
||||
sizeof(struct proc_inode),
|
||||
0, (SLAB_RECLAIM_ACCOUNT|
|
||||
SLAB_MEM_SPREAD),
|
||||
init_once, NULL);
|
||||
if (proc_inode_cachep == NULL)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int proc_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
*flags |= MS_NODIRATIME;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct super_operations proc_sops = {
|
||||
.alloc_inode = proc_alloc_inode,
|
||||
.destroy_inode = proc_destroy_inode,
|
||||
.read_inode = proc_read_inode,
|
||||
.drop_inode = generic_delete_inode,
|
||||
.delete_inode = proc_delete_inode,
|
||||
.statfs = simple_statfs,
|
||||
.remount_fs = proc_remount,
|
||||
};
|
||||
|
||||
struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
|
||||
struct proc_dir_entry *de)
|
||||
{
|
||||
struct inode * inode;
|
||||
|
||||
/*
|
||||
* Increment the use count so the dir entry can't disappear.
|
||||
*/
|
||||
de_get(de);
|
||||
|
||||
WARN_ON(de && de->deleted);
|
||||
|
||||
if (de != NULL && !try_module_get(de->owner))
|
||||
goto out_mod;
|
||||
|
||||
inode = iget(sb, ino);
|
||||
if (!inode)
|
||||
goto out_ino;
|
||||
|
||||
PROC_I(inode)->fd = 0;
|
||||
PROC_I(inode)->pde = de;
|
||||
if (de) {
|
||||
if (de->mode) {
|
||||
inode->i_mode = de->mode;
|
||||
inode->i_uid = de->uid;
|
||||
inode->i_gid = de->gid;
|
||||
}
|
||||
if (de->size)
|
||||
inode->i_size = de->size;
|
||||
if (de->nlink)
|
||||
inode->i_nlink = de->nlink;
|
||||
if (de->proc_iops)
|
||||
inode->i_op = de->proc_iops;
|
||||
if (de->proc_fops)
|
||||
inode->i_fop = de->proc_fops;
|
||||
}
|
||||
|
||||
return inode;
|
||||
|
||||
out_ino:
|
||||
if (de != NULL)
|
||||
module_put(de->owner);
|
||||
out_mod:
|
||||
de_put(de);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int proc_fill_super(struct super_block *s, void *data, int silent)
|
||||
{
|
||||
struct inode * root_inode;
|
||||
|
||||
s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
|
||||
s->s_blocksize = 1024;
|
||||
s->s_blocksize_bits = 10;
|
||||
s->s_magic = PROC_SUPER_MAGIC;
|
||||
s->s_op = &proc_sops;
|
||||
s->s_time_gran = 1;
|
||||
|
||||
root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
|
||||
if (!root_inode)
|
||||
goto out_no_root;
|
||||
root_inode->i_uid = 0;
|
||||
root_inode->i_gid = 0;
|
||||
s->s_root = d_alloc_root(root_inode);
|
||||
if (!s->s_root)
|
||||
goto out_no_root;
|
||||
return 0;
|
||||
|
||||
out_no_root:
|
||||
printk("proc_read_super: get root inode failed\n");
|
||||
iput(root_inode);
|
||||
return -ENOMEM;
|
||||
}
|
||||
MODULE_LICENSE("GPL");
|
||||
73
fs/proc/internal.h
Normal file
73
fs/proc/internal.h
Normal file
@@ -0,0 +1,73 @@
|
||||
/* internal.h: internal procfs definitions
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
extern int proc_sys_init(void);
|
||||
#else
|
||||
static inline void proc_sys_init(void) { }
|
||||
#endif
|
||||
|
||||
struct vmalloc_info {
|
||||
unsigned long used;
|
||||
unsigned long largest_chunk;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
|
||||
extern void get_vmalloc_info(struct vmalloc_info *vmi);
|
||||
#else
|
||||
|
||||
#define VMALLOC_TOTAL 0UL
|
||||
#define get_vmalloc_info(vmi) \
|
||||
do { \
|
||||
(vmi)->used = 0; \
|
||||
(vmi)->largest_chunk = 0; \
|
||||
} while(0)
|
||||
|
||||
extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
|
||||
#endif
|
||||
|
||||
extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f);
|
||||
extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **);
|
||||
extern int proc_tid_stat(struct task_struct *, char *);
|
||||
extern int proc_tgid_stat(struct task_struct *, char *);
|
||||
extern int proc_pid_status(struct task_struct *, char *);
|
||||
extern int proc_pid_statm(struct task_struct *, char *);
|
||||
|
||||
extern const struct file_operations proc_maps_operations;
|
||||
extern const struct file_operations proc_numa_maps_operations;
|
||||
extern const struct file_operations proc_smaps_operations;
|
||||
|
||||
extern const struct file_operations proc_maps_operations;
|
||||
extern const struct file_operations proc_numa_maps_operations;
|
||||
extern const struct file_operations proc_smaps_operations;
|
||||
|
||||
|
||||
void free_proc_entry(struct proc_dir_entry *de);
|
||||
|
||||
int proc_init_inodecache(void);
|
||||
|
||||
static inline struct pid *proc_pid(struct inode *inode)
|
||||
{
|
||||
return PROC_I(inode)->pid;
|
||||
}
|
||||
|
||||
static inline struct task_struct *get_proc_task(struct inode *inode)
|
||||
{
|
||||
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
|
||||
}
|
||||
|
||||
static inline int proc_fd(struct inode *inode)
|
||||
{
|
||||
return PROC_I(inode)->fd;
|
||||
}
|
||||
402
fs/proc/kcore.c
Normal file
402
fs/proc/kcore.c
Normal file
@@ -0,0 +1,402 @@
|
||||
/*
|
||||
* fs/proc/kcore.c kernel ELF core dumper
|
||||
*
|
||||
* Modelled on fs/exec.c:aout_core_dump()
|
||||
* Jeremy Fitzhardinge <jeremy@sw.oz.au>
|
||||
* ELF version written by David Howells <David.Howells@nexor.co.uk>
|
||||
* Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
|
||||
* Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
|
||||
* Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/a.out.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#define CORE_STR "CORE"
|
||||
|
||||
static int open_kcore(struct inode * inode, struct file * filp)
|
||||
{
|
||||
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
|
||||
}
|
||||
|
||||
static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
|
||||
|
||||
const struct file_operations proc_kcore_operations = {
|
||||
.read = read_kcore,
|
||||
.open = open_kcore,
|
||||
};
|
||||
|
||||
#ifndef kc_vaddr_to_offset
|
||||
#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
|
||||
#endif
|
||||
#ifndef kc_offset_to_vaddr
|
||||
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
|
||||
#endif
|
||||
|
||||
/* An ELF note in memory */
|
||||
struct memelfnote
|
||||
{
|
||||
const char *name;
|
||||
int type;
|
||||
unsigned int datasz;
|
||||
void *data;
|
||||
};
|
||||
|
||||
static struct kcore_list *kclist;
|
||||
static DEFINE_RWLOCK(kclist_lock);
|
||||
|
||||
void
|
||||
kclist_add(struct kcore_list *new, void *addr, size_t size)
|
||||
{
|
||||
new->addr = (unsigned long)addr;
|
||||
new->size = size;
|
||||
|
||||
write_lock(&kclist_lock);
|
||||
new->next = kclist;
|
||||
kclist = new;
|
||||
write_unlock(&kclist_lock);
|
||||
}
|
||||
|
||||
static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
|
||||
{
|
||||
size_t try, size;
|
||||
struct kcore_list *m;
|
||||
|
||||
*nphdr = 1; /* PT_NOTE */
|
||||
size = 0;
|
||||
|
||||
for (m=kclist; m; m=m->next) {
|
||||
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
|
||||
if (try > size)
|
||||
size = try;
|
||||
*nphdr = *nphdr + 1;
|
||||
}
|
||||
*elf_buflen = sizeof(struct elfhdr) +
|
||||
(*nphdr + 2)*sizeof(struct elf_phdr) +
|
||||
3 * ((sizeof(struct elf_note)) +
|
||||
roundup(sizeof(CORE_STR), 4)) +
|
||||
roundup(sizeof(struct elf_prstatus), 4) +
|
||||
roundup(sizeof(struct elf_prpsinfo), 4) +
|
||||
roundup(sizeof(struct task_struct), 4);
|
||||
*elf_buflen = PAGE_ALIGN(*elf_buflen);
|
||||
return size + *elf_buflen;
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* determine size of ELF note
|
||||
*/
|
||||
static int notesize(struct memelfnote *en)
|
||||
{
|
||||
int sz;
|
||||
|
||||
sz = sizeof(struct elf_note);
|
||||
sz += roundup((strlen(en->name) + 1), 4);
|
||||
sz += roundup(en->datasz, 4);
|
||||
|
||||
return sz;
|
||||
} /* end notesize() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* store a note in the header buffer
|
||||
*/
|
||||
static char *storenote(struct memelfnote *men, char *bufp)
|
||||
{
|
||||
struct elf_note en;
|
||||
|
||||
#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
|
||||
|
||||
en.n_namesz = strlen(men->name) + 1;
|
||||
en.n_descsz = men->datasz;
|
||||
en.n_type = men->type;
|
||||
|
||||
DUMP_WRITE(&en, sizeof(en));
|
||||
DUMP_WRITE(men->name, en.n_namesz);
|
||||
|
||||
/* XXX - cast from long long to long to avoid need for libgcc.a */
|
||||
bufp = (char*) roundup((unsigned long)bufp,4);
|
||||
DUMP_WRITE(men->data, men->datasz);
|
||||
bufp = (char*) roundup((unsigned long)bufp,4);
|
||||
|
||||
#undef DUMP_WRITE
|
||||
|
||||
return bufp;
|
||||
} /* end storenote() */
|
||||
|
||||
/*
|
||||
* store an ELF coredump header in the supplied buffer
|
||||
* nphdr is the number of elf_phdr to insert
|
||||
*/
|
||||
static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
|
||||
{
|
||||
struct elf_prstatus prstatus; /* NT_PRSTATUS */
|
||||
struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
|
||||
struct elf_phdr *nhdr, *phdr;
|
||||
struct elfhdr *elf;
|
||||
struct memelfnote notes[3];
|
||||
off_t offset = 0;
|
||||
struct kcore_list *m;
|
||||
|
||||
/* setup ELF header */
|
||||
elf = (struct elfhdr *) bufp;
|
||||
bufp += sizeof(struct elfhdr);
|
||||
offset += sizeof(struct elfhdr);
|
||||
memcpy(elf->e_ident, ELFMAG, SELFMAG);
|
||||
elf->e_ident[EI_CLASS] = ELF_CLASS;
|
||||
elf->e_ident[EI_DATA] = ELF_DATA;
|
||||
elf->e_ident[EI_VERSION]= EV_CURRENT;
|
||||
elf->e_ident[EI_OSABI] = ELF_OSABI;
|
||||
memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
|
||||
elf->e_type = ET_CORE;
|
||||
elf->e_machine = ELF_ARCH;
|
||||
elf->e_version = EV_CURRENT;
|
||||
elf->e_entry = 0;
|
||||
elf->e_phoff = sizeof(struct elfhdr);
|
||||
elf->e_shoff = 0;
|
||||
#if defined(CONFIG_H8300)
|
||||
elf->e_flags = ELF_FLAGS;
|
||||
#else
|
||||
elf->e_flags = 0;
|
||||
#endif
|
||||
elf->e_ehsize = sizeof(struct elfhdr);
|
||||
elf->e_phentsize= sizeof(struct elf_phdr);
|
||||
elf->e_phnum = nphdr;
|
||||
elf->e_shentsize= 0;
|
||||
elf->e_shnum = 0;
|
||||
elf->e_shstrndx = 0;
|
||||
|
||||
/* setup ELF PT_NOTE program header */
|
||||
nhdr = (struct elf_phdr *) bufp;
|
||||
bufp += sizeof(struct elf_phdr);
|
||||
offset += sizeof(struct elf_phdr);
|
||||
nhdr->p_type = PT_NOTE;
|
||||
nhdr->p_offset = 0;
|
||||
nhdr->p_vaddr = 0;
|
||||
nhdr->p_paddr = 0;
|
||||
nhdr->p_filesz = 0;
|
||||
nhdr->p_memsz = 0;
|
||||
nhdr->p_flags = 0;
|
||||
nhdr->p_align = 0;
|
||||
|
||||
/* setup ELF PT_LOAD program header for every area */
|
||||
for (m=kclist; m; m=m->next) {
|
||||
phdr = (struct elf_phdr *) bufp;
|
||||
bufp += sizeof(struct elf_phdr);
|
||||
offset += sizeof(struct elf_phdr);
|
||||
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_flags = PF_R|PF_W|PF_X;
|
||||
phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
|
||||
phdr->p_vaddr = (size_t)m->addr;
|
||||
phdr->p_paddr = 0;
|
||||
phdr->p_filesz = phdr->p_memsz = m->size;
|
||||
phdr->p_align = PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the notes in similar form to SVR4 core dumps made
|
||||
* with info from their /proc.
|
||||
*/
|
||||
nhdr->p_offset = offset;
|
||||
|
||||
/* set up the process status */
|
||||
notes[0].name = CORE_STR;
|
||||
notes[0].type = NT_PRSTATUS;
|
||||
notes[0].datasz = sizeof(struct elf_prstatus);
|
||||
notes[0].data = &prstatus;
|
||||
|
||||
memset(&prstatus, 0, sizeof(struct elf_prstatus));
|
||||
|
||||
nhdr->p_filesz = notesize(¬es[0]);
|
||||
bufp = storenote(¬es[0], bufp);
|
||||
|
||||
/* set up the process info */
|
||||
notes[1].name = CORE_STR;
|
||||
notes[1].type = NT_PRPSINFO;
|
||||
notes[1].datasz = sizeof(struct elf_prpsinfo);
|
||||
notes[1].data = &prpsinfo;
|
||||
|
||||
memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
|
||||
prpsinfo.pr_state = 0;
|
||||
prpsinfo.pr_sname = 'R';
|
||||
prpsinfo.pr_zomb = 0;
|
||||
|
||||
strcpy(prpsinfo.pr_fname, "vmlinux");
|
||||
strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
|
||||
|
||||
nhdr->p_filesz += notesize(¬es[1]);
|
||||
bufp = storenote(¬es[1], bufp);
|
||||
|
||||
/* set up the task structure */
|
||||
notes[2].name = CORE_STR;
|
||||
notes[2].type = NT_TASKSTRUCT;
|
||||
notes[2].datasz = sizeof(struct task_struct);
|
||||
notes[2].data = current;
|
||||
|
||||
nhdr->p_filesz += notesize(¬es[2]);
|
||||
bufp = storenote(¬es[2], bufp);
|
||||
|
||||
} /* end elf_kcore_store_hdr() */
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* read from the ELF header and then kernel memory
|
||||
*/
|
||||
static ssize_t
|
||||
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
||||
{
|
||||
ssize_t acc = 0;
|
||||
size_t size, tsz;
|
||||
size_t elf_buflen;
|
||||
int nphdr;
|
||||
unsigned long start;
|
||||
|
||||
read_lock(&kclist_lock);
|
||||
proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
|
||||
if (buflen == 0 || *fpos >= size) {
|
||||
read_unlock(&kclist_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* trim buflen to not go beyond EOF */
|
||||
if (buflen > size - *fpos)
|
||||
buflen = size - *fpos;
|
||||
|
||||
/* construct an ELF core header if we'll need some of it */
|
||||
if (*fpos < elf_buflen) {
|
||||
char * elf_buf;
|
||||
|
||||
tsz = elf_buflen - *fpos;
|
||||
if (buflen < tsz)
|
||||
tsz = buflen;
|
||||
elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
|
||||
if (!elf_buf) {
|
||||
read_unlock(&kclist_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
|
||||
read_unlock(&kclist_lock);
|
||||
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
|
||||
kfree(elf_buf);
|
||||
return -EFAULT;
|
||||
}
|
||||
kfree(elf_buf);
|
||||
buflen -= tsz;
|
||||
*fpos += tsz;
|
||||
buffer += tsz;
|
||||
acc += tsz;
|
||||
|
||||
/* leave now if filled buffer already */
|
||||
if (buflen == 0)
|
||||
return acc;
|
||||
} else
|
||||
read_unlock(&kclist_lock);
|
||||
|
||||
/*
|
||||
* Check to see if our file offset matches with any of
|
||||
* the addresses in the elf_phdr on our list.
|
||||
*/
|
||||
start = kc_offset_to_vaddr(*fpos - elf_buflen);
|
||||
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
|
||||
tsz = buflen;
|
||||
|
||||
while (buflen) {
|
||||
struct kcore_list *m;
|
||||
|
||||
read_lock(&kclist_lock);
|
||||
for (m=kclist; m; m=m->next) {
|
||||
if (start >= m->addr && start < (m->addr+m->size))
|
||||
break;
|
||||
}
|
||||
read_unlock(&kclist_lock);
|
||||
|
||||
if (m == NULL) {
|
||||
if (clear_user(buffer, tsz))
|
||||
return -EFAULT;
|
||||
} else if ((start >= VMALLOC_START) && (start < VMALLOC_END)) {
|
||||
char * elf_buf;
|
||||
struct vm_struct *m;
|
||||
unsigned long curstart = start;
|
||||
unsigned long cursize = tsz;
|
||||
|
||||
elf_buf = kzalloc(tsz, GFP_KERNEL);
|
||||
if (!elf_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
read_lock(&vmlist_lock);
|
||||
for (m=vmlist; m && cursize; m=m->next) {
|
||||
unsigned long vmstart;
|
||||
unsigned long vmsize;
|
||||
unsigned long msize = m->size - PAGE_SIZE;
|
||||
|
||||
if (((unsigned long)m->addr + msize) <
|
||||
curstart)
|
||||
continue;
|
||||
if ((unsigned long)m->addr > (curstart +
|
||||
cursize))
|
||||
break;
|
||||
vmstart = (curstart < (unsigned long)m->addr ?
|
||||
(unsigned long)m->addr : curstart);
|
||||
if (((unsigned long)m->addr + msize) >
|
||||
(curstart + cursize))
|
||||
vmsize = curstart + cursize - vmstart;
|
||||
else
|
||||
vmsize = (unsigned long)m->addr +
|
||||
msize - vmstart;
|
||||
curstart = vmstart + vmsize;
|
||||
cursize -= vmsize;
|
||||
/* don't dump ioremap'd stuff! (TA) */
|
||||
if (m->flags & VM_IOREMAP)
|
||||
continue;
|
||||
memcpy(elf_buf + (vmstart - start),
|
||||
(char *)vmstart, vmsize);
|
||||
}
|
||||
read_unlock(&vmlist_lock);
|
||||
if (copy_to_user(buffer, elf_buf, tsz)) {
|
||||
kfree(elf_buf);
|
||||
return -EFAULT;
|
||||
}
|
||||
kfree(elf_buf);
|
||||
} else {
|
||||
if (kern_addr_valid(start)) {
|
||||
unsigned long n;
|
||||
|
||||
n = copy_to_user(buffer, (char *)start, tsz);
|
||||
/*
|
||||
* We cannot distingush between fault on source
|
||||
* and fault on destination. When this happens
|
||||
* we clear too and hope it will trigger the
|
||||
* EFAULT again.
|
||||
*/
|
||||
if (n) {
|
||||
if (clear_user(buffer + tsz - n,
|
||||
n))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
if (clear_user(buffer, tsz))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
buflen -= tsz;
|
||||
*fpos += tsz;
|
||||
buffer += tsz;
|
||||
acc += tsz;
|
||||
start += tsz;
|
||||
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
|
||||
}
|
||||
|
||||
return acc;
|
||||
}
|
||||
55
fs/proc/kmsg.c
Normal file
55
fs/proc/kmsg.c
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* linux/fs/proc/kmsg.c
|
||||
*
|
||||
* Copyright (C) 1992 by Linus Torvalds
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
extern wait_queue_head_t log_wait;
|
||||
|
||||
extern int do_syslog(int type, char __user *bug, int count);
|
||||
|
||||
static int kmsg_open(struct inode * inode, struct file * file)
|
||||
{
|
||||
return do_syslog(1,NULL,0);
|
||||
}
|
||||
|
||||
static int kmsg_release(struct inode * inode, struct file * file)
|
||||
{
|
||||
(void) do_syslog(0,NULL,0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t kmsg_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
|
||||
return -EAGAIN;
|
||||
return do_syslog(2, buf, count);
|
||||
}
|
||||
|
||||
static unsigned int kmsg_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
poll_wait(file, &log_wait, wait);
|
||||
if (do_syslog(9, NULL, 0))
|
||||
return POLLIN | POLLRDNORM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
const struct file_operations proc_kmsg_operations = {
|
||||
.read = kmsg_read,
|
||||
.poll = kmsg_poll,
|
||||
.open = kmsg_open,
|
||||
.release = kmsg_release,
|
||||
};
|
||||
77
fs/proc/mmu.c
Normal file
77
fs/proc/mmu.c
Normal file
@@ -0,0 +1,77 @@
|
||||
/* mmu.c: mmu memory info files
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/div64.h>
|
||||
#include "internal.h"
|
||||
|
||||
void get_vmalloc_info(struct vmalloc_info *vmi)
|
||||
{
|
||||
struct vm_struct *vma;
|
||||
unsigned long free_area_size;
|
||||
unsigned long prev_end;
|
||||
|
||||
vmi->used = 0;
|
||||
|
||||
if (!vmlist) {
|
||||
vmi->largest_chunk = VMALLOC_TOTAL;
|
||||
}
|
||||
else {
|
||||
vmi->largest_chunk = 0;
|
||||
|
||||
prev_end = VMALLOC_START;
|
||||
|
||||
read_lock(&vmlist_lock);
|
||||
|
||||
for (vma = vmlist; vma; vma = vma->next) {
|
||||
unsigned long addr = (unsigned long) vma->addr;
|
||||
|
||||
/*
|
||||
* Some archs keep another range for modules in vmlist
|
||||
*/
|
||||
if (addr < VMALLOC_START)
|
||||
continue;
|
||||
if (addr >= VMALLOC_END)
|
||||
break;
|
||||
|
||||
vmi->used += vma->size;
|
||||
|
||||
free_area_size = addr - prev_end;
|
||||
if (vmi->largest_chunk < free_area_size)
|
||||
vmi->largest_chunk = free_area_size;
|
||||
|
||||
prev_end = vma->size + addr;
|
||||
}
|
||||
|
||||
if (VMALLOC_END - prev_end > vmi->largest_chunk)
|
||||
vmi->largest_chunk = VMALLOC_END - prev_end;
|
||||
|
||||
read_unlock(&vmlist_lock);
|
||||
}
|
||||
}
|
||||
144
fs/proc/nommu.c
Normal file
144
fs/proc/nommu.c
Normal file
@@ -0,0 +1,144 @@
|
||||
/* nommu.c: mmu-less memory info files
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/div64.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* display a single VMA to a sequenced file
|
||||
*/
|
||||
int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long ino = 0;
|
||||
struct file *file;
|
||||
dev_t dev = 0;
|
||||
int flags, len;
|
||||
|
||||
flags = vma->vm_flags;
|
||||
file = vma->vm_file;
|
||||
|
||||
if (file) {
|
||||
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
||||
dev = inode->i_sb->s_dev;
|
||||
ino = inode->i_ino;
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
"%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
|
||||
vma->vm_start,
|
||||
vma->vm_end,
|
||||
flags & VM_READ ? 'r' : '-',
|
||||
flags & VM_WRITE ? 'w' : '-',
|
||||
flags & VM_EXEC ? 'x' : '-',
|
||||
flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
|
||||
vma->vm_pgoff << PAGE_SHIFT,
|
||||
MAJOR(dev), MINOR(dev), ino, &len);
|
||||
|
||||
if (file) {
|
||||
len = 25 + sizeof(void *) * 6 - len;
|
||||
if (len < 1)
|
||||
len = 1;
|
||||
seq_printf(m, "%*c", len, ' ');
|
||||
seq_path(m, file->f_path.mnt, file->f_path.dentry, "");
|
||||
}
|
||||
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* display a list of all the VMAs the kernel knows about
|
||||
* - nommu kernals have a single flat list
|
||||
*/
|
||||
static int nommu_vma_list_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
|
||||
return nommu_vma_show(m, vma);
|
||||
}
|
||||
|
||||
static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
struct rb_node *_rb;
|
||||
loff_t pos = *_pos;
|
||||
void *next = NULL;
|
||||
|
||||
down_read(&nommu_vma_sem);
|
||||
|
||||
for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) {
|
||||
if (pos == 0) {
|
||||
next = _rb;
|
||||
break;
|
||||
}
|
||||
pos--;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
static void nommu_vma_list_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
up_read(&nommu_vma_sem);
|
||||
}
|
||||
|
||||
static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
return rb_next((struct rb_node *) v);
|
||||
}
|
||||
|
||||
static struct seq_operations proc_nommu_vma_list_seqop = {
|
||||
.start = nommu_vma_list_start,
|
||||
.next = nommu_vma_list_next,
|
||||
.stop = nommu_vma_list_stop,
|
||||
.show = nommu_vma_list_show
|
||||
};
|
||||
|
||||
static int proc_nommu_vma_list_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &proc_nommu_vma_list_seqop);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_nommu_vma_list_operations = {
|
||||
.open = proc_nommu_vma_list_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int __init proc_nommu_init(void)
|
||||
{
|
||||
create_seq_entry("maps", S_IRUGO, &proc_nommu_vma_list_operations);
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(proc_nommu_init);
|
||||
232
fs/proc/proc_devtree.c
Normal file
232
fs/proc/proc_devtree.c
Normal file
@@ -0,0 +1,232 @@
|
||||
/*
|
||||
* proc_devtree.c - handles /proc/device-tree
|
||||
*
|
||||
* Copyright 1997 Paul Mackerras
|
||||
*/
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#ifndef HAVE_ARCH_DEVTREE_FIXUPS
|
||||
static inline void set_node_proc_entry(struct device_node *np,
|
||||
struct proc_dir_entry *de)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct proc_dir_entry *proc_device_tree;
|
||||
|
||||
/*
|
||||
* Supply data on a read from /proc/device-tree/node/property.
|
||||
*/
|
||||
static int property_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
struct property *pp = data;
|
||||
int n;
|
||||
|
||||
if (off >= pp->length) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
n = pp->length - off;
|
||||
if (n > count)
|
||||
n = count;
|
||||
else
|
||||
*eof = 1;
|
||||
memcpy(page, pp->value + off, n);
|
||||
*start = page;
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* For a node with a name like "gc@10", we make symlinks called "gc"
|
||||
* and "@10" to it.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Add a property to a node
|
||||
*/
|
||||
static struct proc_dir_entry *
|
||||
__proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp,
|
||||
const char *name)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
|
||||
/*
|
||||
* Unfortunately proc_register puts each new entry
|
||||
* at the beginning of the list. So we rearrange them.
|
||||
*/
|
||||
ent = create_proc_read_entry(name,
|
||||
strncmp(name, "security-", 9)
|
||||
? S_IRUGO : S_IRUSR, de,
|
||||
property_read_proc, pp);
|
||||
if (ent == NULL)
|
||||
return NULL;
|
||||
|
||||
if (!strncmp(name, "security-", 9))
|
||||
ent->size = 0; /* don't leak number of password chars */
|
||||
else
|
||||
ent->size = pp->length;
|
||||
|
||||
return ent;
|
||||
}
|
||||
|
||||
|
||||
void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop)
|
||||
{
|
||||
__proc_device_tree_add_prop(pde, prop, prop->name);
|
||||
}
|
||||
|
||||
void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
|
||||
struct property *prop)
|
||||
{
|
||||
remove_proc_entry(prop->name, pde);
|
||||
}
|
||||
|
||||
void proc_device_tree_update_prop(struct proc_dir_entry *pde,
|
||||
struct property *newprop,
|
||||
struct property *oldprop)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
|
||||
for (ent = pde->subdir; ent != NULL; ent = ent->next)
|
||||
if (ent->data == oldprop)
|
||||
break;
|
||||
if (ent == NULL) {
|
||||
printk(KERN_WARNING "device-tree: property \"%s\" "
|
||||
" does not exist\n", oldprop->name);
|
||||
} else {
|
||||
ent->data = newprop;
|
||||
ent->size = newprop->length;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Various dodgy firmware might give us nodes and/or properties with
|
||||
* conflicting names. That's generally ok, except for exporting via /proc,
|
||||
* so munge names here to ensure they're unique.
|
||||
*/
|
||||
|
||||
static int duplicate_name(struct proc_dir_entry *de, const char *name)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
int found = 0;
|
||||
|
||||
spin_lock(&proc_subdir_lock);
|
||||
|
||||
for (ent = de->subdir; ent != NULL; ent = ent->next) {
|
||||
if (strcmp(ent->name, name) == 0) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&proc_subdir_lock);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
static const char *fixup_name(struct device_node *np, struct proc_dir_entry *de,
|
||||
const char *name)
|
||||
{
|
||||
char *fixed_name;
|
||||
int fixup_len = strlen(name) + 2 + 1; /* name + #x + \0 */
|
||||
int i = 1, size;
|
||||
|
||||
realloc:
|
||||
fixed_name = kmalloc(fixup_len, GFP_KERNEL);
|
||||
if (fixed_name == NULL) {
|
||||
printk(KERN_ERR "device-tree: Out of memory trying to fixup "
|
||||
"name \"%s\"\n", name);
|
||||
return name;
|
||||
}
|
||||
|
||||
retry:
|
||||
size = snprintf(fixed_name, fixup_len, "%s#%d", name, i);
|
||||
size++; /* account for NULL */
|
||||
|
||||
if (size > fixup_len) {
|
||||
/* We ran out of space, free and reallocate. */
|
||||
kfree(fixed_name);
|
||||
fixup_len = size;
|
||||
goto realloc;
|
||||
}
|
||||
|
||||
if (duplicate_name(de, fixed_name)) {
|
||||
/* Multiple duplicates. Retry with a different offset. */
|
||||
i++;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
printk(KERN_WARNING "device-tree: Duplicate name in %s, "
|
||||
"renamed to \"%s\"\n", np->full_name, fixed_name);
|
||||
|
||||
return fixed_name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process a node, adding entries for its children and its properties.
|
||||
*/
|
||||
void proc_device_tree_add_node(struct device_node *np,
|
||||
struct proc_dir_entry *de)
|
||||
{
|
||||
struct property *pp;
|
||||
struct proc_dir_entry *ent;
|
||||
struct device_node *child;
|
||||
const char *p;
|
||||
|
||||
set_node_proc_entry(np, de);
|
||||
for (child = NULL; (child = of_get_next_child(np, child));) {
|
||||
/* Use everything after the last slash, or the full name */
|
||||
p = strrchr(child->full_name, '/');
|
||||
if (!p)
|
||||
p = child->full_name;
|
||||
else
|
||||
++p;
|
||||
|
||||
if (duplicate_name(de, p))
|
||||
p = fixup_name(np, de, p);
|
||||
|
||||
ent = proc_mkdir(p, de);
|
||||
if (ent == 0)
|
||||
break;
|
||||
proc_device_tree_add_node(child, ent);
|
||||
}
|
||||
of_node_put(child);
|
||||
|
||||
for (pp = np->properties; pp != 0; pp = pp->next) {
|
||||
p = pp->name;
|
||||
|
||||
if (duplicate_name(de, p))
|
||||
p = fixup_name(np, de, p);
|
||||
|
||||
ent = __proc_device_tree_add_prop(de, pp, p);
|
||||
if (ent == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called on initialization to set up the /proc/device-tree subtree
|
||||
*/
|
||||
void proc_device_tree_init(void)
|
||||
{
|
||||
struct device_node *root;
|
||||
if ( !have_of )
|
||||
return;
|
||||
proc_device_tree = proc_mkdir("device-tree", NULL);
|
||||
if (proc_device_tree == 0)
|
||||
return;
|
||||
root = of_find_node_by_path("/");
|
||||
if (root == 0) {
|
||||
printk(KERN_ERR "/proc/device-tree: can't find root\n");
|
||||
return;
|
||||
}
|
||||
proc_device_tree_add_node(root, proc_device_tree);
|
||||
of_node_put(root);
|
||||
}
|
||||
789
fs/proc/proc_misc.c
Normal file
789
fs/proc/proc_misc.c
Normal file
@@ -0,0 +1,789 @@
|
||||
/*
|
||||
* linux/fs/proc/proc_misc.c
|
||||
*
|
||||
* linux/fs/proc/array.c
|
||||
* Copyright (C) 1992 by Linus Torvalds
|
||||
* based on ideas by Darren Senn
|
||||
*
|
||||
* This used to be the part of array.c. See the rest of history and credits
|
||||
* there. I took this into a separate file and switched the thing to generic
|
||||
* proc_file_inode_operations, leaving in array.c only per-process stuff.
|
||||
* Inumbers allocation made dynamic (via create_proc_entry()). AV, May 1999.
|
||||
*
|
||||
* Changes:
|
||||
* Fulton Green : Encapsulated position metric calculations.
|
||||
* <kernel@FultonGreen.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/times.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/div64.h>
|
||||
#include "internal.h"
|
||||
|
||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
/*
|
||||
* Warning: stuff below (imported functions) assumes that its output will fit
|
||||
* into one page. For some of those functions it may be wrong. Moreover, we
|
||||
* have a way to deal with that gracefully. Right now I used straightforward
|
||||
* wrappers, but this needs further analysis wrt potential overflows.
|
||||
*/
|
||||
extern int get_hardware_list(char *);
|
||||
extern int get_stram_list(char *);
|
||||
extern int get_filesystem_list(char *);
|
||||
extern int get_exec_domain_list(char *);
|
||||
extern int get_dma_list(char *);
|
||||
extern int get_locks_status (char *, char **, off_t, int);
|
||||
|
||||
static int proc_calc_metrics(char *page, char **start, off_t off,
|
||||
int count, int *eof, int len)
|
||||
{
|
||||
if (len <= off+count) *eof = 1;
|
||||
*start = page + off;
|
||||
len -= off;
|
||||
if (len>count) len = count;
|
||||
if (len<0) len = 0;
|
||||
return len;
|
||||
}
|
||||
|
||||
static int loadavg_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int a, b, c;
|
||||
int len;
|
||||
|
||||
a = avenrun[0] + (FIXED_1/200);
|
||||
b = avenrun[1] + (FIXED_1/200);
|
||||
c = avenrun[2] + (FIXED_1/200);
|
||||
len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
|
||||
LOAD_INT(a), LOAD_FRAC(a),
|
||||
LOAD_INT(b), LOAD_FRAC(b),
|
||||
LOAD_INT(c), LOAD_FRAC(c),
|
||||
nr_running(), nr_threads, current->nsproxy->pid_ns->last_pid);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
static int uptime_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
struct timespec uptime;
|
||||
struct timespec idle;
|
||||
int len;
|
||||
cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
|
||||
|
||||
do_posix_clock_monotonic_gettime(&uptime);
|
||||
cputime_to_timespec(idletime, &idle);
|
||||
len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
|
||||
(unsigned long) uptime.tv_sec,
|
||||
(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
|
||||
(unsigned long) idle.tv_sec,
|
||||
(idle.tv_nsec / (NSEC_PER_SEC / 100)));
|
||||
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
static int meminfo_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
struct sysinfo i;
|
||||
int len;
|
||||
unsigned long committed;
|
||||
unsigned long allowed;
|
||||
struct vmalloc_info vmi;
|
||||
long cached;
|
||||
|
||||
/*
|
||||
* display in kilobytes.
|
||||
*/
|
||||
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
||||
si_meminfo(&i);
|
||||
si_swapinfo(&i);
|
||||
committed = atomic_read(&vm_committed_space);
|
||||
allowed = ((totalram_pages - hugetlb_total_pages())
|
||||
* sysctl_overcommit_ratio / 100) + total_swap_pages;
|
||||
|
||||
cached = global_page_state(NR_FILE_PAGES) -
|
||||
total_swapcache_pages - i.bufferram;
|
||||
if (cached < 0)
|
||||
cached = 0;
|
||||
|
||||
get_vmalloc_info(&vmi);
|
||||
|
||||
/*
|
||||
* Tagged format, for easy grepping and expansion.
|
||||
*/
|
||||
len = sprintf(page,
|
||||
"MemTotal: %8lu kB\n"
|
||||
"MemFree: %8lu kB\n"
|
||||
"Buffers: %8lu kB\n"
|
||||
"Cached: %8lu kB\n"
|
||||
"SwapCached: %8lu kB\n"
|
||||
"Active: %8lu kB\n"
|
||||
"Inactive: %8lu kB\n"
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
"HighTotal: %8lu kB\n"
|
||||
"HighFree: %8lu kB\n"
|
||||
"LowTotal: %8lu kB\n"
|
||||
"LowFree: %8lu kB\n"
|
||||
#endif
|
||||
"SwapTotal: %8lu kB\n"
|
||||
"SwapFree: %8lu kB\n"
|
||||
"Dirty: %8lu kB\n"
|
||||
"Writeback: %8lu kB\n"
|
||||
"AnonPages: %8lu kB\n"
|
||||
"Mapped: %8lu kB\n"
|
||||
"Slab: %8lu kB\n"
|
||||
"SReclaimable: %8lu kB\n"
|
||||
"SUnreclaim: %8lu kB\n"
|
||||
"PageTables: %8lu kB\n"
|
||||
"NFS_Unstable: %8lu kB\n"
|
||||
"Bounce: %8lu kB\n"
|
||||
"CommitLimit: %8lu kB\n"
|
||||
"Committed_AS: %8lu kB\n"
|
||||
"VmallocTotal: %8lu kB\n"
|
||||
"VmallocUsed: %8lu kB\n"
|
||||
"VmallocChunk: %8lu kB\n",
|
||||
K(i.totalram),
|
||||
K(i.freeram),
|
||||
K(i.bufferram),
|
||||
K(cached),
|
||||
K(total_swapcache_pages),
|
||||
K(global_page_state(NR_ACTIVE)),
|
||||
K(global_page_state(NR_INACTIVE)),
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
K(i.totalhigh),
|
||||
K(i.freehigh),
|
||||
K(i.totalram-i.totalhigh),
|
||||
K(i.freeram-i.freehigh),
|
||||
#endif
|
||||
K(i.totalswap),
|
||||
K(i.freeswap),
|
||||
K(global_page_state(NR_FILE_DIRTY)),
|
||||
K(global_page_state(NR_WRITEBACK)),
|
||||
K(global_page_state(NR_ANON_PAGES)),
|
||||
K(global_page_state(NR_FILE_MAPPED)),
|
||||
K(global_page_state(NR_SLAB_RECLAIMABLE) +
|
||||
global_page_state(NR_SLAB_UNRECLAIMABLE)),
|
||||
K(global_page_state(NR_SLAB_RECLAIMABLE)),
|
||||
K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
|
||||
K(global_page_state(NR_PAGETABLE)),
|
||||
K(global_page_state(NR_UNSTABLE_NFS)),
|
||||
K(global_page_state(NR_BOUNCE)),
|
||||
K(allowed),
|
||||
K(committed),
|
||||
(unsigned long)VMALLOC_TOTAL >> 10,
|
||||
vmi.used >> 10,
|
||||
vmi.largest_chunk >> 10
|
||||
);
|
||||
|
||||
len += hugetlb_report_meminfo(page + len);
|
||||
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
#undef K
|
||||
}
|
||||
|
||||
extern struct seq_operations fragmentation_op;
|
||||
static int fragmentation_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
(void)inode;
|
||||
return seq_open(file, &fragmentation_op);
|
||||
}
|
||||
|
||||
static const struct file_operations fragmentation_file_operations = {
|
||||
.open = fragmentation_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
extern struct seq_operations zoneinfo_op;
|
||||
static int zoneinfo_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &zoneinfo_op);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_zoneinfo_file_operations = {
|
||||
.open = zoneinfo_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int version_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = snprintf(page, PAGE_SIZE, linux_proc_banner,
|
||||
utsname()->sysname,
|
||||
utsname()->release,
|
||||
utsname()->version);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
|
||||
//Qisda, Asaku Chen, 2009/08/17, uboot and kernel version {
|
||||
static int qisda_kernel_version_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = snprintf(page, PAGE_SIZE, qisda_kernel_proc_banner);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
|
||||
static int qisda_uboot_version_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = snprintf(page, PAGE_SIZE, qisda_uboot_proc_banner);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
static int uboot_version_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = snprintf(page, PAGE_SIZE, uboot_proc_banner);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
//Qisda, Asaku Chen, 2009/08/17, uboot and kernel version }
|
||||
|
||||
extern struct seq_operations cpuinfo_op;
|
||||
static int cpuinfo_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &cpuinfo_op);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_cpuinfo_operations = {
|
||||
.open = cpuinfo_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int devinfo_show(struct seq_file *f, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v;
|
||||
|
||||
if (i < CHRDEV_MAJOR_HASH_SIZE) {
|
||||
if (i == 0)
|
||||
seq_printf(f, "Character devices:\n");
|
||||
chrdev_show(f, i);
|
||||
}
|
||||
#ifdef CONFIG_BLOCK
|
||||
else {
|
||||
i -= CHRDEV_MAJOR_HASH_SIZE;
|
||||
if (i == 0)
|
||||
seq_printf(f, "\nBlock devices:\n");
|
||||
blkdev_show(f, i);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *devinfo_start(struct seq_file *f, loff_t *pos)
|
||||
{
|
||||
if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
|
||||
return pos;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
|
||||
return NULL;
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void devinfo_stop(struct seq_file *f, void *v)
|
||||
{
|
||||
/* Nothing to do */
|
||||
}
|
||||
|
||||
static struct seq_operations devinfo_ops = {
|
||||
.start = devinfo_start,
|
||||
.next = devinfo_next,
|
||||
.stop = devinfo_stop,
|
||||
.show = devinfo_show
|
||||
};
|
||||
|
||||
static int devinfo_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return seq_open(filp, &devinfo_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_devinfo_operations = {
|
||||
.open = devinfo_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
extern struct seq_operations vmstat_op;
|
||||
static int vmstat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &vmstat_op);
|
||||
}
|
||||
static const struct file_operations proc_vmstat_file_operations = {
|
||||
.open = vmstat_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_HARDWARE
|
||||
static int hardware_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len = get_hardware_list(page);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STRAM_PROC
|
||||
static int stram_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len = get_stram_list(page);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern struct seq_operations partitions_op;
|
||||
static int partitions_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &partitions_op);
|
||||
}
|
||||
static const struct file_operations proc_partitions_operations = {
|
||||
.open = partitions_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
extern struct seq_operations diskstats_op;
|
||||
static int diskstats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &diskstats_op);
|
||||
}
|
||||
static const struct file_operations proc_diskstats_operations = {
|
||||
.open = diskstats_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
extern struct seq_operations modules_op;
|
||||
static int modules_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &modules_op);
|
||||
}
|
||||
static const struct file_operations proc_modules_operations = {
|
||||
.open = modules_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SLAB
|
||||
extern struct seq_operations slabinfo_op;
|
||||
extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
|
||||
static int slabinfo_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &slabinfo_op);
|
||||
}
|
||||
static const struct file_operations proc_slabinfo_operations = {
|
||||
.open = slabinfo_open,
|
||||
.read = seq_read,
|
||||
.write = slabinfo_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_SLAB_LEAK
|
||||
extern struct seq_operations slabstats_op;
|
||||
static int slabstats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
int ret = -ENOMEM;
|
||||
if (n) {
|
||||
ret = seq_open(file, &slabstats_op);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
*n = PAGE_SIZE / (2 * sizeof(unsigned long));
|
||||
m->private = n;
|
||||
n = NULL;
|
||||
}
|
||||
kfree(n);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int slabstats_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *m = file->private_data;
|
||||
kfree(m->private);
|
||||
return seq_release(inode, file);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_slabstats_operations = {
|
||||
.open = slabstats_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = slabstats_release,
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static int show_stat(struct seq_file *p, void *v)
|
||||
{
|
||||
int i;
|
||||
unsigned long jif;
|
||||
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
|
||||
u64 sum = 0;
|
||||
|
||||
user = nice = system = idle = iowait =
|
||||
irq = softirq = steal = cputime64_zero;
|
||||
jif = - wall_to_monotonic.tv_sec;
|
||||
if (wall_to_monotonic.tv_nsec)
|
||||
--jif;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
int j;
|
||||
|
||||
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
|
||||
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
|
||||
system = cputime64_add(system, kstat_cpu(i).cpustat.system);
|
||||
idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
|
||||
iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
|
||||
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
|
||||
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
|
||||
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
|
||||
for (j = 0 ; j < NR_IRQS ; j++)
|
||||
sum += kstat_cpu(i).irqs[j];
|
||||
}
|
||||
|
||||
seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n",
|
||||
(unsigned long long)cputime64_to_clock_t(user),
|
||||
(unsigned long long)cputime64_to_clock_t(nice),
|
||||
(unsigned long long)cputime64_to_clock_t(system),
|
||||
(unsigned long long)cputime64_to_clock_t(idle),
|
||||
(unsigned long long)cputime64_to_clock_t(iowait),
|
||||
(unsigned long long)cputime64_to_clock_t(irq),
|
||||
(unsigned long long)cputime64_to_clock_t(softirq),
|
||||
(unsigned long long)cputime64_to_clock_t(steal));
|
||||
for_each_online_cpu(i) {
|
||||
|
||||
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
|
||||
user = kstat_cpu(i).cpustat.user;
|
||||
nice = kstat_cpu(i).cpustat.nice;
|
||||
system = kstat_cpu(i).cpustat.system;
|
||||
idle = kstat_cpu(i).cpustat.idle;
|
||||
iowait = kstat_cpu(i).cpustat.iowait;
|
||||
irq = kstat_cpu(i).cpustat.irq;
|
||||
softirq = kstat_cpu(i).cpustat.softirq;
|
||||
steal = kstat_cpu(i).cpustat.steal;
|
||||
seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
|
||||
i,
|
||||
(unsigned long long)cputime64_to_clock_t(user),
|
||||
(unsigned long long)cputime64_to_clock_t(nice),
|
||||
(unsigned long long)cputime64_to_clock_t(system),
|
||||
(unsigned long long)cputime64_to_clock_t(idle),
|
||||
(unsigned long long)cputime64_to_clock_t(iowait),
|
||||
(unsigned long long)cputime64_to_clock_t(irq),
|
||||
(unsigned long long)cputime64_to_clock_t(softirq),
|
||||
(unsigned long long)cputime64_to_clock_t(steal));
|
||||
}
|
||||
seq_printf(p, "intr %llu", (unsigned long long)sum);
|
||||
|
||||
#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
seq_printf(p, " %u", kstat_irqs(i));
|
||||
#endif
|
||||
|
||||
seq_printf(p,
|
||||
"\nctxt %llu\n"
|
||||
"btime %lu\n"
|
||||
"processes %lu\n"
|
||||
"procs_running %lu\n"
|
||||
"procs_blocked %lu\n",
|
||||
nr_context_switches(),
|
||||
(unsigned long)jif,
|
||||
total_forks,
|
||||
nr_running(),
|
||||
nr_iowait());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned size = 4096 * (1 + num_possible_cpus() / 32);
|
||||
char *buf;
|
||||
struct seq_file *m;
|
||||
int res;
|
||||
|
||||
/* don't ask for more than the kmalloc() max size, currently 128 KB */
|
||||
if (size > 128 * 1024)
|
||||
size = 128 * 1024;
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
res = single_open(file, show_stat, NULL);
|
||||
if (!res) {
|
||||
m = file->private_data;
|
||||
m->buf = buf;
|
||||
m->size = size;
|
||||
} else
|
||||
kfree(buf);
|
||||
return res;
|
||||
}
|
||||
static const struct file_operations proc_stat_operations = {
|
||||
.open = stat_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* /proc/interrupts
|
||||
*/
|
||||
static void *int_seq_start(struct seq_file *f, loff_t *pos)
|
||||
{
|
||||
return (*pos <= NR_IRQS) ? pos : NULL;
|
||||
}
|
||||
|
||||
static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
if (*pos > NR_IRQS)
|
||||
return NULL;
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void int_seq_stop(struct seq_file *f, void *v)
|
||||
{
|
||||
/* Nothing to do */
|
||||
}
|
||||
|
||||
|
||||
extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */
|
||||
static struct seq_operations int_seq_ops = {
|
||||
.start = int_seq_start,
|
||||
.next = int_seq_next,
|
||||
.stop = int_seq_stop,
|
||||
.show = show_interrupts
|
||||
};
|
||||
|
||||
static int interrupts_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return seq_open(filp, &int_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_interrupts_operations = {
|
||||
.open = interrupts_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static int filesystems_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len = get_filesystem_list(page);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
static int cmdline_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = sprintf(page, "%s\n", saved_command_line);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
static int locks_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len = get_locks_status(page, start, off, count);
|
||||
|
||||
if (len < count)
|
||||
*eof = 1;
|
||||
return len;
|
||||
}
|
||||
|
||||
static int execdomains_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int len = get_exec_domain_list(page);
|
||||
return proc_calc_metrics(page, start, off, count, eof, len);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MAGIC_SYSRQ
|
||||
/*
|
||||
* writing 'C' to /proc/sysrq-trigger is like sysrq-C
|
||||
*/
|
||||
static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
if (count) {
|
||||
char c;
|
||||
|
||||
if (get_user(c, buf))
|
||||
return -EFAULT;
|
||||
__handle_sysrq(c, NULL, 0);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations proc_sysrq_trigger_operations = {
|
||||
.write = write_sysrq_trigger,
|
||||
};
|
||||
#endif
|
||||
|
||||
struct proc_dir_entry *proc_root_kcore;
|
||||
|
||||
void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
entry = create_proc_entry(name, mode, NULL);
|
||||
if (entry)
|
||||
entry->proc_fops = f;
|
||||
}
|
||||
|
||||
void __init proc_misc_init(void)
|
||||
{
|
||||
static struct {
|
||||
char *name;
|
||||
int (*read_proc)(char*,char**,off_t,int,int*,void*);
|
||||
} *p, simple_ones[] = {
|
||||
{"loadavg", loadavg_read_proc},
|
||||
{"uptime", uptime_read_proc},
|
||||
{"meminfo", meminfo_read_proc},
|
||||
{"version", version_read_proc},
|
||||
#ifdef CONFIG_PROC_HARDWARE
|
||||
{"hardware", hardware_read_proc},
|
||||
#endif
|
||||
#ifdef CONFIG_STRAM_PROC
|
||||
{"stram", stram_read_proc},
|
||||
#endif
|
||||
{"filesystems", filesystems_read_proc},
|
||||
{"cmdline", cmdline_read_proc},
|
||||
{"locks", locks_read_proc},
|
||||
{"execdomains", execdomains_read_proc},
|
||||
|
||||
//Qisda, Asaku Chen, 2009/08/17, uboot and kernel version {
|
||||
{"odm_kernel_version", qisda_kernel_version_read_proc},
|
||||
{"odm_uboot_version", qisda_uboot_version_read_proc},
|
||||
{"uboot_version", uboot_version_read_proc},
|
||||
//Qisda, Asaku Chen, 2009/08/17, uboot and kernel version }
|
||||
|
||||
{NULL,}
|
||||
};
|
||||
for (p = simple_ones; p->name; p++)
|
||||
create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
|
||||
|
||||
proc_symlink("mounts", NULL, "self/mounts");
|
||||
|
||||
/* And now for trickier ones */
|
||||
#ifdef CONFIG_PRINTK
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
|
||||
if (entry)
|
||||
entry->proc_fops = &proc_kmsg_operations;
|
||||
}
|
||||
#endif
|
||||
create_seq_entry("devices", 0, &proc_devinfo_operations);
|
||||
create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
|
||||
#ifdef CONFIG_BLOCK
|
||||
create_seq_entry("partitions", 0, &proc_partitions_operations);
|
||||
#endif
|
||||
create_seq_entry("stat", 0, &proc_stat_operations);
|
||||
create_seq_entry("interrupts", 0, &proc_interrupts_operations);
|
||||
#ifdef CONFIG_SLAB
|
||||
create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
|
||||
#ifdef CONFIG_DEBUG_SLAB_LEAK
|
||||
create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
|
||||
#endif
|
||||
#endif
|
||||
create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
|
||||
create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
|
||||
create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations);
|
||||
#ifdef CONFIG_BLOCK
|
||||
create_seq_entry("diskstats", 0, &proc_diskstats_operations);
|
||||
#endif
|
||||
#ifdef CONFIG_MODULES
|
||||
create_seq_entry("modules", 0, &proc_modules_operations);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
create_seq_entry("schedstat", 0, &proc_schedstat_operations);
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_KCORE
|
||||
proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
|
||||
if (proc_root_kcore) {
|
||||
proc_root_kcore->proc_fops = &proc_kcore_operations;
|
||||
proc_root_kcore->size =
|
||||
(size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_VMCORE
|
||||
proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
|
||||
if (proc_vmcore)
|
||||
proc_vmcore->proc_fops = &proc_vmcore_operations;
|
||||
#endif
|
||||
#ifdef CONFIG_MAGIC_SYSRQ
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
|
||||
if (entry)
|
||||
entry->proc_fops = &proc_sysrq_trigger_operations;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
479
fs/proc/proc_sysctl.c
Normal file
479
fs/proc/proc_sysctl.c
Normal file
@@ -0,0 +1,479 @@
|
||||
/*
|
||||
* /proc/sys support
|
||||
*/
|
||||
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/security.h>
|
||||
#include "internal.h"
|
||||
|
||||
static struct dentry_operations proc_sys_dentry_operations;
|
||||
static const struct file_operations proc_sys_file_operations;
|
||||
static struct inode_operations proc_sys_inode_operations;
|
||||
|
||||
static void proc_sys_refresh_inode(struct inode *inode, struct ctl_table *table)
|
||||
{
|
||||
/* Refresh the cached information bits in the inode */
|
||||
if (table) {
|
||||
inode->i_uid = 0;
|
||||
inode->i_gid = 0;
|
||||
inode->i_mode = table->mode;
|
||||
if (table->proc_handler) {
|
||||
inode->i_mode |= S_IFREG;
|
||||
inode->i_nlink = 1;
|
||||
} else {
|
||||
inode->i_mode |= S_IFDIR;
|
||||
inode->i_nlink = 0; /* It is too hard to figure out */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct inode *proc_sys_make_inode(struct inode *dir, struct ctl_table *table)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct proc_inode *dir_ei, *ei;
|
||||
int depth;
|
||||
|
||||
inode = new_inode(dir->i_sb);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
/* A directory is always one deeper than it's parent */
|
||||
dir_ei = PROC_I(dir);
|
||||
depth = dir_ei->fd + 1;
|
||||
|
||||
ei = PROC_I(inode);
|
||||
ei->fd = depth;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
inode->i_op = &proc_sys_inode_operations;
|
||||
inode->i_fop = &proc_sys_file_operations;
|
||||
inode->i_flags |= S_PRIVATE; /* tell selinux to ignore this inode */
|
||||
proc_sys_refresh_inode(inode, table);
|
||||
out:
|
||||
return inode;
|
||||
}
|
||||
|
||||
static struct dentry *proc_sys_ancestor(struct dentry *dentry, int depth)
|
||||
{
|
||||
for (;;) {
|
||||
struct proc_inode *ei;
|
||||
|
||||
ei = PROC_I(dentry->d_inode);
|
||||
if (ei->fd == depth)
|
||||
break; /* found */
|
||||
|
||||
dentry = dentry->d_parent;
|
||||
}
|
||||
return dentry;
|
||||
}
|
||||
|
||||
static struct ctl_table *proc_sys_lookup_table_one(struct ctl_table *table,
|
||||
struct qstr *name)
|
||||
{
|
||||
int len;
|
||||
for ( ; table->ctl_name || table->procname; table++) {
|
||||
|
||||
if (!table->procname)
|
||||
continue;
|
||||
|
||||
len = strlen(table->procname);
|
||||
if (len != name->len)
|
||||
continue;
|
||||
|
||||
if (memcmp(table->procname, name->name, len) != 0)
|
||||
continue;
|
||||
|
||||
/* I have a match */
|
||||
return table;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ctl_table *proc_sys_lookup_table(struct dentry *dentry,
|
||||
struct ctl_table *table)
|
||||
{
|
||||
struct dentry *ancestor;
|
||||
struct proc_inode *ei;
|
||||
int depth, i;
|
||||
|
||||
ei = PROC_I(dentry->d_inode);
|
||||
depth = ei->fd;
|
||||
|
||||
if (depth == 0)
|
||||
return table;
|
||||
|
||||
for (i = 1; table && (i <= depth); i++) {
|
||||
ancestor = proc_sys_ancestor(dentry, i);
|
||||
table = proc_sys_lookup_table_one(table, &ancestor->d_name);
|
||||
if (table)
|
||||
table = table->child;
|
||||
}
|
||||
return table;
|
||||
|
||||
}
|
||||
static struct ctl_table *proc_sys_lookup_entry(struct dentry *dparent,
|
||||
struct qstr *name,
|
||||
struct ctl_table *table)
|
||||
{
|
||||
table = proc_sys_lookup_table(dparent, table);
|
||||
if (table)
|
||||
table = proc_sys_lookup_table_one(table, name);
|
||||
return table;
|
||||
}
|
||||
|
||||
static struct ctl_table *do_proc_sys_lookup(struct dentry *parent,
|
||||
struct qstr *name,
|
||||
struct ctl_table_header **ptr)
|
||||
{
|
||||
struct ctl_table_header *head;
|
||||
struct ctl_table *table = NULL;
|
||||
|
||||
for (head = sysctl_head_next(NULL); head;
|
||||
head = sysctl_head_next(head)) {
|
||||
table = proc_sys_lookup_entry(parent, name, head->ctl_table);
|
||||
if (table)
|
||||
break;
|
||||
}
|
||||
*ptr = head;
|
||||
return table;
|
||||
}
|
||||
|
||||
static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
|
||||
struct nameidata *nd)
|
||||
{
|
||||
struct ctl_table_header *head;
|
||||
struct inode *inode;
|
||||
struct dentry *err;
|
||||
struct ctl_table *table;
|
||||
|
||||
err = ERR_PTR(-ENOENT);
|
||||
table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
err = ERR_PTR(-ENOMEM);
|
||||
inode = proc_sys_make_inode(dir, table);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
err = NULL;
|
||||
dentry->d_op = &proc_sys_dentry_operations;
|
||||
d_add(dentry, inode);
|
||||
|
||||
out:
|
||||
sysctl_head_finish(head);
|
||||
return err;
|
||||
}
|
||||
|
||||
static ssize_t proc_sys_read(struct file *filp, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct dentry *dentry = filp->f_dentry;
|
||||
struct ctl_table_header *head;
|
||||
struct ctl_table *table;
|
||||
ssize_t error, res;
|
||||
|
||||
table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
|
||||
/* Has the sysctl entry disappeared on us? */
|
||||
error = -ENOENT;
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
/* Has the sysctl entry been replaced by a directory? */
|
||||
error = -EISDIR;
|
||||
if (!table->proc_handler)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* At this point we know that the sysctl was not unregistered
|
||||
* and won't be until we finish.
|
||||
*/
|
||||
error = -EPERM;
|
||||
if (sysctl_perm(table, MAY_READ))
|
||||
goto out;
|
||||
|
||||
/* careful: calling conventions are nasty here */
|
||||
res = count;
|
||||
error = table->proc_handler(table, 0, filp, buf, &res, ppos);
|
||||
if (!error)
|
||||
error = res;
|
||||
out:
|
||||
sysctl_head_finish(head);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static ssize_t proc_sys_write(struct file *filp, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct dentry *dentry = filp->f_dentry;
|
||||
struct ctl_table_header *head;
|
||||
struct ctl_table *table;
|
||||
ssize_t error, res;
|
||||
|
||||
table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
|
||||
/* Has the sysctl entry disappeared on us? */
|
||||
error = -ENOENT;
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
/* Has the sysctl entry been replaced by a directory? */
|
||||
error = -EISDIR;
|
||||
if (!table->proc_handler)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* At this point we know that the sysctl was not unregistered
|
||||
* and won't be until we finish.
|
||||
*/
|
||||
error = -EPERM;
|
||||
if (sysctl_perm(table, MAY_WRITE))
|
||||
goto out;
|
||||
|
||||
/* careful: calling conventions are nasty here */
|
||||
res = count;
|
||||
error = table->proc_handler(table, 1, filp, (char __user *)buf,
|
||||
&res, ppos);
|
||||
if (!error)
|
||||
error = res;
|
||||
out:
|
||||
sysctl_head_finish(head);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static int proc_sys_fill_cache(struct file *filp, void *dirent,
|
||||
filldir_t filldir, struct ctl_table *table)
|
||||
{
|
||||
struct ctl_table_header *head;
|
||||
struct ctl_table *child_table = NULL;
|
||||
struct dentry *child, *dir = filp->f_path.dentry;
|
||||
struct inode *inode;
|
||||
struct qstr qname;
|
||||
ino_t ino = 0;
|
||||
unsigned type = DT_UNKNOWN;
|
||||
int ret;
|
||||
|
||||
qname.name = table->procname;
|
||||
qname.len = strlen(table->procname);
|
||||
qname.hash = full_name_hash(qname.name, qname.len);
|
||||
|
||||
/* Suppress duplicates.
|
||||
* Only fill a directory entry if it is the value that
|
||||
* an ordinary lookup of that name returns. Hide all
|
||||
* others.
|
||||
*
|
||||
* If we ever cache this translation in the dcache
|
||||
* I should do a dcache lookup first. But for now
|
||||
* it is just simpler not to.
|
||||
*/
|
||||
ret = 0;
|
||||
child_table = do_proc_sys_lookup(dir, &qname, &head);
|
||||
sysctl_head_finish(head);
|
||||
if (child_table != table)
|
||||
return 0;
|
||||
|
||||
child = d_lookup(dir, &qname);
|
||||
if (!child) {
|
||||
struct dentry *new;
|
||||
new = d_alloc(dir, &qname);
|
||||
if (new) {
|
||||
inode = proc_sys_make_inode(dir->d_inode, table);
|
||||
if (!inode)
|
||||
child = ERR_PTR(-ENOMEM);
|
||||
else {
|
||||
new->d_op = &proc_sys_dentry_operations;
|
||||
d_add(new, inode);
|
||||
}
|
||||
if (child)
|
||||
dput(new);
|
||||
else
|
||||
child = new;
|
||||
}
|
||||
}
|
||||
if (!child || IS_ERR(child) || !child->d_inode)
|
||||
goto end_instantiate;
|
||||
inode = child->d_inode;
|
||||
if (inode) {
|
||||
ino = inode->i_ino;
|
||||
type = inode->i_mode >> 12;
|
||||
}
|
||||
dput(child);
|
||||
end_instantiate:
|
||||
if (!ino)
|
||||
ino= find_inode_number(dir, &qname);
|
||||
if (!ino)
|
||||
ino = 1;
|
||||
return filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type);
|
||||
}
|
||||
|
||||
static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
||||
{
|
||||
struct dentry *dentry = filp->f_dentry;
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct ctl_table_header *head = NULL;
|
||||
struct ctl_table *table;
|
||||
unsigned long pos;
|
||||
int ret;
|
||||
|
||||
ret = -ENOTDIR;
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
/* Avoid a switch here: arm builds fail with missing __cmpdi2 */
|
||||
if (filp->f_pos == 0) {
|
||||
if (filldir(dirent, ".", 1, filp->f_pos,
|
||||
inode->i_ino, DT_DIR) < 0)
|
||||
goto out;
|
||||
filp->f_pos++;
|
||||
}
|
||||
if (filp->f_pos == 1) {
|
||||
if (filldir(dirent, "..", 2, filp->f_pos,
|
||||
parent_ino(dentry), DT_DIR) < 0)
|
||||
goto out;
|
||||
filp->f_pos++;
|
||||
}
|
||||
pos = 2;
|
||||
|
||||
/* - Find each instance of the directory
|
||||
* - Read all entries in each instance
|
||||
* - Before returning an entry to user space lookup the entry
|
||||
* by name and if I find a different entry don't return
|
||||
* this one because it means it is a buried dup.
|
||||
* For sysctl this should only happen for directory entries.
|
||||
*/
|
||||
for (head = sysctl_head_next(NULL); head; head = sysctl_head_next(head)) {
|
||||
table = proc_sys_lookup_table(dentry, head->ctl_table);
|
||||
|
||||
if (!table)
|
||||
continue;
|
||||
|
||||
for (; table->ctl_name || table->procname; table++, pos++) {
|
||||
/* Can't do anything without a proc name */
|
||||
if (!table->procname)
|
||||
continue;
|
||||
|
||||
if (pos < filp->f_pos)
|
||||
continue;
|
||||
|
||||
if (proc_sys_fill_cache(filp, dirent, filldir, table) < 0)
|
||||
goto out;
|
||||
filp->f_pos = pos + 1;
|
||||
}
|
||||
}
|
||||
ret = 1;
|
||||
out:
|
||||
sysctl_head_finish(head);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int proc_sys_permission(struct inode *inode, int mask, struct nameidata *nd)
|
||||
{
|
||||
/*
|
||||
* sysctl entries that are not writeable,
|
||||
* are _NOT_ writeable, capabilities or not.
|
||||
*/
|
||||
struct ctl_table_header *head;
|
||||
struct ctl_table *table;
|
||||
struct dentry *dentry;
|
||||
int mode;
|
||||
int depth;
|
||||
int error;
|
||||
|
||||
head = NULL;
|
||||
depth = PROC_I(inode)->fd;
|
||||
|
||||
/* First check the cached permissions, in case we don't have
|
||||
* enough information to lookup the sysctl table entry.
|
||||
*/
|
||||
error = -EACCES;
|
||||
mode = inode->i_mode;
|
||||
|
||||
if (current->euid == 0)
|
||||
mode >>= 6;
|
||||
else if (in_group_p(0))
|
||||
mode >>= 3;
|
||||
|
||||
if ((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask)
|
||||
error = 0;
|
||||
|
||||
/* If we can't get a sysctl table entry the permission
|
||||
* checks on the cached mode will have to be enough.
|
||||
*/
|
||||
if (!nd || !depth)
|
||||
goto out;
|
||||
|
||||
dentry = nd->dentry;
|
||||
table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
|
||||
|
||||
/* If the entry does not exist deny permission */
|
||||
error = -EACCES;
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
/* Use the permissions on the sysctl table entry */
|
||||
error = sysctl_perm(table, mask);
|
||||
out:
|
||||
sysctl_head_finish(head);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
int error;
|
||||
|
||||
if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
|
||||
return -EPERM;
|
||||
|
||||
error = inode_change_ok(inode, attr);
|
||||
if (!error) {
|
||||
error = security_inode_setattr(dentry, attr);
|
||||
if (!error)
|
||||
error = inode_setattr(inode, attr);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/* I'm lazy and don't distinguish between files and directories,
|
||||
* until access time.
|
||||
*/
|
||||
static const struct file_operations proc_sys_file_operations = {
|
||||
.read = proc_sys_read,
|
||||
.write = proc_sys_write,
|
||||
.readdir = proc_sys_readdir,
|
||||
};
|
||||
|
||||
static struct inode_operations proc_sys_inode_operations = {
|
||||
.lookup = proc_sys_lookup,
|
||||
.permission = proc_sys_permission,
|
||||
.setattr = proc_sys_setattr,
|
||||
};
|
||||
|
||||
static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd)
|
||||
{
|
||||
struct ctl_table_header *head;
|
||||
struct ctl_table *table;
|
||||
table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
|
||||
proc_sys_refresh_inode(dentry->d_inode, table);
|
||||
sysctl_head_finish(head);
|
||||
return !!table;
|
||||
}
|
||||
|
||||
static struct dentry_operations proc_sys_dentry_operations = {
|
||||
.d_revalidate = proc_sys_revalidate,
|
||||
};
|
||||
|
||||
static struct proc_dir_entry *proc_sys_root;
|
||||
|
||||
int proc_sys_init(void)
|
||||
{
|
||||
proc_sys_root = proc_mkdir("sys", NULL);
|
||||
proc_sys_root->proc_iops = &proc_sys_inode_operations;
|
||||
proc_sys_root->proc_fops = &proc_sys_file_operations;
|
||||
proc_sys_root->nlink = 0;
|
||||
return 0;
|
||||
}
|
||||
242
fs/proc/proc_tty.c
Normal file
242
fs/proc/proc_tty.c
Normal file
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
* proc_tty.c -- handles /proc/tty
|
||||
*
|
||||
* Copyright 1997, Theodore Ts'o
|
||||
*/
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data);
|
||||
|
||||
/*
|
||||
* The /proc/tty directory inodes...
|
||||
*/
|
||||
static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver;
|
||||
|
||||
/*
|
||||
* This is the handler for /proc/tty/drivers
|
||||
*/
|
||||
static void show_tty_range(struct seq_file *m, struct tty_driver *p,
|
||||
dev_t from, int num)
|
||||
{
|
||||
seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown");
|
||||
seq_printf(m, "/dev/%-8s ", p->name);
|
||||
if (p->num > 1) {
|
||||
seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from),
|
||||
MINOR(from) + num - 1);
|
||||
} else {
|
||||
seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from));
|
||||
}
|
||||
switch (p->type) {
|
||||
case TTY_DRIVER_TYPE_SYSTEM:
|
||||
seq_printf(m, "system");
|
||||
if (p->subtype == SYSTEM_TYPE_TTY)
|
||||
seq_printf(m, ":/dev/tty");
|
||||
else if (p->subtype == SYSTEM_TYPE_SYSCONS)
|
||||
seq_printf(m, ":console");
|
||||
else if (p->subtype == SYSTEM_TYPE_CONSOLE)
|
||||
seq_printf(m, ":vtmaster");
|
||||
break;
|
||||
case TTY_DRIVER_TYPE_CONSOLE:
|
||||
seq_printf(m, "console");
|
||||
break;
|
||||
case TTY_DRIVER_TYPE_SERIAL:
|
||||
seq_printf(m, "serial");
|
||||
break;
|
||||
case TTY_DRIVER_TYPE_PTY:
|
||||
if (p->subtype == PTY_TYPE_MASTER)
|
||||
seq_printf(m, "pty:master");
|
||||
else if (p->subtype == PTY_TYPE_SLAVE)
|
||||
seq_printf(m, "pty:slave");
|
||||
else
|
||||
seq_printf(m, "pty");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "type:%d.%d", p->type, p->subtype);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
static int show_tty_driver(struct seq_file *m, void *v)
|
||||
{
|
||||
struct tty_driver *p = v;
|
||||
dev_t from = MKDEV(p->major, p->minor_start);
|
||||
dev_t to = from + p->num;
|
||||
|
||||
if (&p->tty_drivers == tty_drivers.next) {
|
||||
/* pseudo-drivers first */
|
||||
seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
|
||||
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
|
||||
seq_printf(m, "system:/dev/tty\n");
|
||||
seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
|
||||
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
|
||||
seq_printf(m, "system:console\n");
|
||||
#ifdef CONFIG_UNIX98_PTYS
|
||||
seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
|
||||
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
|
||||
seq_printf(m, "system\n");
|
||||
#endif
|
||||
#ifdef CONFIG_VT
|
||||
seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
|
||||
seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
|
||||
seq_printf(m, "system:vtmaster\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
while (MAJOR(from) < MAJOR(to)) {
|
||||
dev_t next = MKDEV(MAJOR(from)+1, 0);
|
||||
show_tty_range(m, p, from, next - from);
|
||||
from = next;
|
||||
}
|
||||
if (from != to)
|
||||
show_tty_range(m, p, from, to - from);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* iterator */
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct list_head *p;
|
||||
loff_t l = *pos;
|
||||
list_for_each(p, &tty_drivers)
|
||||
if (!l--)
|
||||
return list_entry(p, struct tty_driver, tty_drivers);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct list_head *p = ((struct tty_driver *)v)->tty_drivers.next;
|
||||
(*pos)++;
|
||||
return p==&tty_drivers ? NULL :
|
||||
list_entry(p, struct tty_driver, tty_drivers);
|
||||
}
|
||||
|
||||
static void t_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static struct seq_operations tty_drivers_op = {
|
||||
.start = t_start,
|
||||
.next = t_next,
|
||||
.stop = t_stop,
|
||||
.show = show_tty_driver
|
||||
};
|
||||
|
||||
static int tty_drivers_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &tty_drivers_op);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_tty_drivers_operations = {
|
||||
.open = tty_drivers_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the handler for /proc/tty/ldiscs
|
||||
*/
|
||||
static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int i;
|
||||
int len = 0;
|
||||
off_t begin = 0;
|
||||
struct tty_ldisc *ld;
|
||||
|
||||
for (i=0; i < NR_LDISCS; i++) {
|
||||
ld = tty_ldisc_get(i);
|
||||
if (ld == NULL)
|
||||
continue;
|
||||
len += sprintf(page+len, "%-10s %2d\n",
|
||||
ld->name ? ld->name : "???", i);
|
||||
tty_ldisc_put(i);
|
||||
if (len+begin > off+count)
|
||||
break;
|
||||
if (len+begin < off) {
|
||||
begin += len;
|
||||
len = 0;
|
||||
}
|
||||
}
|
||||
if (i >= NR_LDISCS)
|
||||
*eof = 1;
|
||||
if (off >= len+begin)
|
||||
return 0;
|
||||
*start = page + (off-begin);
|
||||
return ((count < begin+len-off) ? count : begin+len-off);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called by tty_register_driver() to handle
|
||||
* registering the driver's /proc handler into /proc/tty/driver/<foo>
|
||||
*/
|
||||
void proc_tty_register_driver(struct tty_driver *driver)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
|
||||
if ((!driver->read_proc && !driver->write_proc) ||
|
||||
!driver->driver_name ||
|
||||
driver->proc_entry)
|
||||
return;
|
||||
|
||||
ent = create_proc_entry(driver->driver_name, 0, proc_tty_driver);
|
||||
if (!ent)
|
||||
return;
|
||||
ent->read_proc = driver->read_proc;
|
||||
ent->write_proc = driver->write_proc;
|
||||
ent->owner = driver->owner;
|
||||
ent->data = driver;
|
||||
|
||||
driver->proc_entry = ent;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called by tty_unregister_driver()
|
||||
*/
|
||||
void proc_tty_unregister_driver(struct tty_driver *driver)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
|
||||
ent = driver->proc_entry;
|
||||
if (!ent)
|
||||
return;
|
||||
|
||||
remove_proc_entry(driver->driver_name, proc_tty_driver);
|
||||
|
||||
driver->proc_entry = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by proc_root_init() to initialize the /proc/tty subtree
|
||||
*/
|
||||
void __init proc_tty_init(void)
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
if (!proc_mkdir("tty", NULL))
|
||||
return;
|
||||
proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL);
|
||||
/*
|
||||
* /proc/tty/driver/serial reveals the exact character counts for
|
||||
* serial links which is just too easy to abuse for inferring
|
||||
* password lengths and inter-keystroke timings during password
|
||||
* entry.
|
||||
*/
|
||||
proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR | S_IXUSR, NULL);
|
||||
|
||||
create_proc_read_entry("tty/ldiscs", 0, NULL, tty_ldiscs_read_proc, NULL);
|
||||
entry = create_proc_entry("tty/drivers", 0, NULL);
|
||||
if (entry)
|
||||
entry->proc_fops = &proc_tty_drivers_operations;
|
||||
}
|
||||
165
fs/proc/root.c
Normal file
165
fs/proc/root.c
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* linux/fs/proc/root.c
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*
|
||||
* proc root directory handling functions
|
||||
*/
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/mount.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc_root_driver;
|
||||
|
||||
static int proc_get_sb(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
|
||||
{
|
||||
if (proc_mnt) {
|
||||
/* Seed the root directory with a pid so it doesn't need
|
||||
* to be special in base.c. I would do this earlier but
|
||||
* the only task alive when /proc is mounted the first time
|
||||
* is the init_task and it doesn't have any pids.
|
||||
*/
|
||||
struct proc_inode *ei;
|
||||
ei = PROC_I(proc_mnt->mnt_sb->s_root->d_inode);
|
||||
if (!ei->pid)
|
||||
ei->pid = find_get_pid(1);
|
||||
}
|
||||
return get_sb_single(fs_type, flags, data, proc_fill_super, mnt);
|
||||
}
|
||||
|
||||
static struct file_system_type proc_fs_type = {
|
||||
.name = "proc",
|
||||
.get_sb = proc_get_sb,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
|
||||
void __init proc_root_init(void)
|
||||
{
|
||||
int err = proc_init_inodecache();
|
||||
if (err)
|
||||
return;
|
||||
err = register_filesystem(&proc_fs_type);
|
||||
if (err)
|
||||
return;
|
||||
proc_mnt = kern_mount(&proc_fs_type);
|
||||
err = PTR_ERR(proc_mnt);
|
||||
if (IS_ERR(proc_mnt)) {
|
||||
unregister_filesystem(&proc_fs_type);
|
||||
return;
|
||||
}
|
||||
proc_misc_init();
|
||||
proc_net = proc_mkdir("net", NULL);
|
||||
proc_net_stat = proc_mkdir("net/stat", NULL);
|
||||
|
||||
#ifdef CONFIG_SYSVIPC
|
||||
proc_mkdir("sysvipc", NULL);
|
||||
#endif
|
||||
proc_root_fs = proc_mkdir("fs", NULL);
|
||||
proc_root_driver = proc_mkdir("driver", NULL);
|
||||
proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
|
||||
#if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
|
||||
/* just give it a mountpoint */
|
||||
proc_mkdir("openprom", NULL);
|
||||
#endif
|
||||
proc_tty_init();
|
||||
#ifdef CONFIG_PROC_DEVICETREE
|
||||
proc_device_tree_init();
|
||||
#endif
|
||||
proc_bus = proc_mkdir("bus", NULL);
|
||||
proc_sys_init();
|
||||
}
|
||||
|
||||
static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
|
||||
)
|
||||
{
|
||||
generic_fillattr(dentry->d_inode, stat);
|
||||
stat->nlink = proc_root.nlink + nr_processes();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
|
||||
{
|
||||
if (!proc_lookup(dir, dentry, nd)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return proc_pid_lookup(dir, dentry, nd);
|
||||
}
|
||||
|
||||
static int proc_root_readdir(struct file * filp,
|
||||
void * dirent, filldir_t filldir)
|
||||
{
|
||||
unsigned int nr = filp->f_pos;
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
|
||||
if (nr < FIRST_PROCESS_ENTRY) {
|
||||
int error = proc_readdir(filp, dirent, filldir);
|
||||
if (error <= 0) {
|
||||
unlock_kernel();
|
||||
return error;
|
||||
}
|
||||
filp->f_pos = FIRST_PROCESS_ENTRY;
|
||||
}
|
||||
unlock_kernel();
|
||||
|
||||
ret = proc_pid_readdir(filp, dirent, filldir);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The root /proc directory is special, as it has the
|
||||
* <pid> directories. Thus we don't use the generic
|
||||
* directory handling functions for that..
|
||||
*/
|
||||
static const struct file_operations proc_root_operations = {
|
||||
.read = generic_read_dir,
|
||||
.readdir = proc_root_readdir,
|
||||
};
|
||||
|
||||
/*
|
||||
* proc root can do almost nothing..
|
||||
*/
|
||||
static const struct inode_operations proc_root_inode_operations = {
|
||||
.lookup = proc_root_lookup,
|
||||
.getattr = proc_root_getattr,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the root "inode" in the /proc tree..
|
||||
*/
|
||||
struct proc_dir_entry proc_root = {
|
||||
.low_ino = PROC_ROOT_INO,
|
||||
.namelen = 5,
|
||||
.name = "/proc",
|
||||
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
|
||||
.nlink = 2,
|
||||
.proc_iops = &proc_root_inode_operations,
|
||||
.proc_fops = &proc_root_operations,
|
||||
.parent = &proc_root,
|
||||
};
|
||||
|
||||
EXPORT_SYMBOL(proc_symlink);
|
||||
EXPORT_SYMBOL(proc_mkdir);
|
||||
EXPORT_SYMBOL(create_proc_entry);
|
||||
EXPORT_SYMBOL(remove_proc_entry);
|
||||
EXPORT_SYMBOL(proc_root);
|
||||
EXPORT_SYMBOL(proc_root_fs);
|
||||
EXPORT_SYMBOL(proc_net);
|
||||
EXPORT_SYMBOL(proc_net_stat);
|
||||
EXPORT_SYMBOL(proc_bus);
|
||||
EXPORT_SYMBOL(proc_root_driver);
|
||||
477
fs/proc/task_mmu.c
Normal file
477
fs/proc/task_mmu.c
Normal file
@@ -0,0 +1,477 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/mempolicy.h>
|
||||
|
||||
#include <asm/elf.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include "internal.h"
|
||||
|
||||
char *task_mem(struct mm_struct *mm, char *buffer)
|
||||
{
|
||||
unsigned long data, text, lib;
|
||||
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
|
||||
|
||||
/*
|
||||
* Note: to minimize their overhead, mm maintains hiwater_vm and
|
||||
* hiwater_rss only when about to *lower* total_vm or rss. Any
|
||||
* collector of these hiwater stats must therefore get total_vm
|
||||
* and rss too, which will usually be the higher. Barriers? not
|
||||
* worth the effort, such snapshots can always be inconsistent.
|
||||
*/
|
||||
hiwater_vm = total_vm = mm->total_vm;
|
||||
if (hiwater_vm < mm->hiwater_vm)
|
||||
hiwater_vm = mm->hiwater_vm;
|
||||
hiwater_rss = total_rss = get_mm_rss(mm);
|
||||
if (hiwater_rss < mm->hiwater_rss)
|
||||
hiwater_rss = mm->hiwater_rss;
|
||||
|
||||
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
|
||||
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
|
||||
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
|
||||
buffer += sprintf(buffer,
|
||||
"VmPeak:\t%8lu kB\n"
|
||||
"VmSize:\t%8lu kB\n"
|
||||
"VmLck:\t%8lu kB\n"
|
||||
"VmHWM:\t%8lu kB\n"
|
||||
"VmRSS:\t%8lu kB\n"
|
||||
"VmData:\t%8lu kB\n"
|
||||
"VmStk:\t%8lu kB\n"
|
||||
"VmExe:\t%8lu kB\n"
|
||||
"VmLib:\t%8lu kB\n"
|
||||
"VmPTE:\t%8lu kB\n",
|
||||
hiwater_vm << (PAGE_SHIFT-10),
|
||||
(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
|
||||
mm->locked_vm << (PAGE_SHIFT-10),
|
||||
hiwater_rss << (PAGE_SHIFT-10),
|
||||
total_rss << (PAGE_SHIFT-10),
|
||||
data << (PAGE_SHIFT-10),
|
||||
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
|
||||
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
unsigned long task_vsize(struct mm_struct *mm)
|
||||
{
|
||||
return PAGE_SIZE * mm->total_vm;
|
||||
}
|
||||
|
||||
int task_statm(struct mm_struct *mm, int *shared, int *text,
|
||||
int *data, int *resident)
|
||||
{
|
||||
*shared = get_mm_counter(mm, file_rss);
|
||||
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
|
||||
>> PAGE_SHIFT;
|
||||
*data = mm->total_vm - mm->shared_vm;
|
||||
*resident = *shared + get_mm_counter(mm, anon_rss);
|
||||
return mm->total_vm;
|
||||
}
|
||||
|
||||
int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
|
||||
{
|
||||
struct vm_area_struct * vma;
|
||||
int result = -ENOENT;
|
||||
struct task_struct *task = get_proc_task(inode);
|
||||
struct mm_struct * mm = NULL;
|
||||
|
||||
if (task) {
|
||||
mm = get_task_mm(task);
|
||||
put_task_struct(task);
|
||||
}
|
||||
if (!mm)
|
||||
goto out;
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
vma = mm->mmap;
|
||||
while (vma) {
|
||||
if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
|
||||
break;
|
||||
vma = vma->vm_next;
|
||||
}
|
||||
|
||||
if (vma) {
|
||||
*mnt = mntget(vma->vm_file->f_path.mnt);
|
||||
*dentry = dget(vma->vm_file->f_path.dentry);
|
||||
result = 0;
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
static void pad_len_spaces(struct seq_file *m, int len)
|
||||
{
|
||||
len = 25 + sizeof(void*) * 6 - len;
|
||||
if (len < 1)
|
||||
len = 1;
|
||||
seq_printf(m, "%*c", len, ' ');
|
||||
}
|
||||
|
||||
struct mem_size_stats
|
||||
{
|
||||
unsigned long resident;
|
||||
unsigned long shared_clean;
|
||||
unsigned long shared_dirty;
|
||||
unsigned long private_clean;
|
||||
unsigned long private_dirty;
|
||||
};
|
||||
|
||||
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
struct task_struct *task = priv->task;
|
||||
struct vm_area_struct *vma = v;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct file *file = vma->vm_file;
|
||||
int flags = vma->vm_flags;
|
||||
unsigned long ino = 0;
|
||||
dev_t dev = 0;
|
||||
int len;
|
||||
|
||||
if (file) {
|
||||
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
||||
dev = inode->i_sb->s_dev;
|
||||
ino = inode->i_ino;
|
||||
}
|
||||
|
||||
seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
|
||||
vma->vm_start,
|
||||
vma->vm_end,
|
||||
flags & VM_READ ? 'r' : '-',
|
||||
flags & VM_WRITE ? 'w' : '-',
|
||||
flags & VM_EXEC ? 'x' : '-',
|
||||
flags & VM_MAYSHARE ? 's' : 'p',
|
||||
vma->vm_pgoff << PAGE_SHIFT,
|
||||
MAJOR(dev), MINOR(dev), ino, &len);
|
||||
|
||||
/*
|
||||
* Print the dentry name for named mappings, and a
|
||||
* special [heap] marker for the heap:
|
||||
*/
|
||||
if (file) {
|
||||
pad_len_spaces(m, len);
|
||||
seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
|
||||
} else {
|
||||
const char *name = arch_vma_name(vma);
|
||||
if (!name) {
|
||||
if (mm) {
|
||||
if (vma->vm_start <= mm->start_brk &&
|
||||
vma->vm_end >= mm->brk) {
|
||||
name = "[heap]";
|
||||
} else if (vma->vm_start <= mm->start_stack &&
|
||||
vma->vm_end >= mm->start_stack) {
|
||||
name = "[stack]";
|
||||
}
|
||||
} else {
|
||||
name = "[vdso]";
|
||||
}
|
||||
}
|
||||
if (name) {
|
||||
pad_len_spaces(m, len);
|
||||
seq_puts(m, name);
|
||||
}
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
if (mss)
|
||||
seq_printf(m,
|
||||
"Size: %8lu kB\n"
|
||||
"Rss: %8lu kB\n"
|
||||
"Shared_Clean: %8lu kB\n"
|
||||
"Shared_Dirty: %8lu kB\n"
|
||||
"Private_Clean: %8lu kB\n"
|
||||
"Private_Dirty: %8lu kB\n",
|
||||
(vma->vm_end - vma->vm_start) >> 10,
|
||||
mss->resident >> 10,
|
||||
mss->shared_clean >> 10,
|
||||
mss->shared_dirty >> 10,
|
||||
mss->private_clean >> 10,
|
||||
mss->private_dirty >> 10);
|
||||
|
||||
if (m->count < m->size) /* vma is copied successfully */
|
||||
m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int show_map(struct seq_file *m, void *v)
|
||||
{
|
||||
return show_map_internal(m, v, NULL);
|
||||
}
|
||||
|
||||
static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct mem_size_stats *mss)
|
||||
{
|
||||
pte_t *pte, ptent;
|
||||
spinlock_t *ptl;
|
||||
struct page *page;
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
do {
|
||||
ptent = *pte;
|
||||
if (!pte_present(ptent))
|
||||
continue;
|
||||
|
||||
mss->resident += PAGE_SIZE;
|
||||
|
||||
page = vm_normal_page(vma, addr, ptent);
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
if (page_mapcount(page) >= 2) {
|
||||
if (pte_dirty(ptent))
|
||||
mss->shared_dirty += PAGE_SIZE;
|
||||
else
|
||||
mss->shared_clean += PAGE_SIZE;
|
||||
} else {
|
||||
if (pte_dirty(ptent))
|
||||
mss->private_dirty += PAGE_SIZE;
|
||||
else
|
||||
mss->private_clean += PAGE_SIZE;
|
||||
}
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct mem_size_stats *mss)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
continue;
|
||||
smaps_pte_range(vma, pmd, addr, next, mss);
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct mem_size_stats *mss)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
continue;
|
||||
smaps_pmd_range(vma, pud, addr, next, mss);
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static inline void smaps_pgd_range(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct mem_size_stats *mss)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
|
||||
pgd = pgd_offset(vma->vm_mm, addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
continue;
|
||||
smaps_pud_range(vma, pgd, addr, next, mss);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static int show_smap(struct seq_file *m, void *v)
|
||||
{
|
||||
struct vm_area_struct *vma = v;
|
||||
struct mem_size_stats mss;
|
||||
|
||||
memset(&mss, 0, sizeof mss);
|
||||
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
|
||||
smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
|
||||
return show_map_internal(m, v, &mss);
|
||||
}
|
||||
|
||||
static void *m_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
unsigned long last_addr = m->version;
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma, *tail_vma = NULL;
|
||||
loff_t l = *pos;
|
||||
|
||||
/* Clear the per syscall fields in priv */
|
||||
priv->task = NULL;
|
||||
priv->tail_vma = NULL;
|
||||
|
||||
/*
|
||||
* We remember last_addr rather than next_addr to hit with
|
||||
* mmap_cache most of the time. We have zero last_addr at
|
||||
* the beginning and also after lseek. We will have -1 last_addr
|
||||
* after the end of the vmas.
|
||||
*/
|
||||
|
||||
if (last_addr == -1UL)
|
||||
return NULL;
|
||||
|
||||
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
|
||||
if (!priv->task)
|
||||
return NULL;
|
||||
|
||||
mm = get_task_mm(priv->task);
|
||||
if (!mm)
|
||||
return NULL;
|
||||
|
||||
priv->tail_vma = tail_vma = get_gate_vma(priv->task);
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
/* Start with last addr hint */
|
||||
if (last_addr && (vma = find_vma(mm, last_addr))) {
|
||||
vma = vma->vm_next;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the vma index is within the range and do
|
||||
* sequential scan until m_index.
|
||||
*/
|
||||
vma = NULL;
|
||||
if ((unsigned long)l < mm->map_count) {
|
||||
vma = mm->mmap;
|
||||
while (l-- && vma)
|
||||
vma = vma->vm_next;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (l != mm->map_count)
|
||||
tail_vma = NULL; /* After gate vma */
|
||||
|
||||
out:
|
||||
if (vma)
|
||||
return vma;
|
||||
|
||||
/* End of vmas has been reached */
|
||||
m->version = (tail_vma != NULL)? 0: -1UL;
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
return tail_vma;
|
||||
}
|
||||
|
||||
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma && vma != priv->tail_vma) {
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
}
|
||||
|
||||
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
struct vm_area_struct *vma = v;
|
||||
struct vm_area_struct *tail_vma = priv->tail_vma;
|
||||
|
||||
(*pos)++;
|
||||
if (vma && (vma != tail_vma) && vma->vm_next)
|
||||
return vma->vm_next;
|
||||
vma_stop(priv, vma);
|
||||
return (vma != tail_vma)? tail_vma: NULL;
|
||||
}
|
||||
|
||||
static void m_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
struct vm_area_struct *vma = v;
|
||||
|
||||
vma_stop(priv, vma);
|
||||
if (priv->task)
|
||||
put_task_struct(priv->task);
|
||||
}
|
||||
|
||||
static struct seq_operations proc_pid_maps_op = {
|
||||
.start = m_start,
|
||||
.next = m_next,
|
||||
.stop = m_stop,
|
||||
.show = show_map
|
||||
};
|
||||
|
||||
static struct seq_operations proc_pid_smaps_op = {
|
||||
.start = m_start,
|
||||
.next = m_next,
|
||||
.stop = m_stop,
|
||||
.show = show_smap
|
||||
};
|
||||
|
||||
static int do_maps_open(struct inode *inode, struct file *file,
|
||||
struct seq_operations *ops)
|
||||
{
|
||||
struct proc_maps_private *priv;
|
||||
int ret = -ENOMEM;
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (priv) {
|
||||
priv->pid = proc_pid(inode);
|
||||
ret = seq_open(file, ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = priv;
|
||||
} else {
|
||||
kfree(priv);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int maps_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return do_maps_open(inode, file, &proc_pid_maps_op);
|
||||
}
|
||||
|
||||
const struct file_operations proc_maps_operations = {
|
||||
.open = maps_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int show_numa_map(struct seq_file *m, void *v);
|
||||
|
||||
static struct seq_operations proc_pid_numa_maps_op = {
|
||||
.start = m_start,
|
||||
.next = m_next,
|
||||
.stop = m_stop,
|
||||
.show = show_numa_map
|
||||
};
|
||||
|
||||
static int numa_maps_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return do_maps_open(inode, file, &proc_pid_numa_maps_op);
|
||||
}
|
||||
|
||||
const struct file_operations proc_numa_maps_operations = {
|
||||
.open = numa_maps_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int smaps_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return do_maps_open(inode, file, &proc_pid_smaps_op);
|
||||
}
|
||||
|
||||
const struct file_operations proc_smaps_operations = {
|
||||
.open = smaps_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
};
|
||||
229
fs/proc/task_nommu.c
Normal file
229
fs/proc/task_nommu.c
Normal file
@@ -0,0 +1,229 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Logic: we've got two memory sums for each process, "shared", and
|
||||
* "non-shared". Shared memory may get counted more then once, for
|
||||
* each process that owns it. Non-shared memory is counted
|
||||
* accurately.
|
||||
*/
|
||||
char *task_mem(struct mm_struct *mm, char *buffer)
|
||||
{
|
||||
struct vm_list_struct *vml;
|
||||
unsigned long bytes = 0, sbytes = 0, slack = 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
for (vml = mm->context.vmlist; vml; vml = vml->next) {
|
||||
if (!vml->vma)
|
||||
continue;
|
||||
|
||||
bytes += kobjsize(vml);
|
||||
if (atomic_read(&mm->mm_count) > 1 ||
|
||||
atomic_read(&vml->vma->vm_usage) > 1
|
||||
) {
|
||||
sbytes += kobjsize((void *) vml->vma->vm_start);
|
||||
sbytes += kobjsize(vml->vma);
|
||||
} else {
|
||||
bytes += kobjsize((void *) vml->vma->vm_start);
|
||||
bytes += kobjsize(vml->vma);
|
||||
slack += kobjsize((void *) vml->vma->vm_start) -
|
||||
(vml->vma->vm_end - vml->vma->vm_start);
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_read(&mm->mm_count) > 1)
|
||||
sbytes += kobjsize(mm);
|
||||
else
|
||||
bytes += kobjsize(mm);
|
||||
|
||||
if (current->fs && atomic_read(¤t->fs->count) > 1)
|
||||
sbytes += kobjsize(current->fs);
|
||||
else
|
||||
bytes += kobjsize(current->fs);
|
||||
|
||||
if (current->files && atomic_read(¤t->files->count) > 1)
|
||||
sbytes += kobjsize(current->files);
|
||||
else
|
||||
bytes += kobjsize(current->files);
|
||||
|
||||
if (current->sighand && atomic_read(¤t->sighand->count) > 1)
|
||||
sbytes += kobjsize(current->sighand);
|
||||
else
|
||||
bytes += kobjsize(current->sighand);
|
||||
|
||||
bytes += kobjsize(current); /* includes kernel stack */
|
||||
|
||||
buffer += sprintf(buffer,
|
||||
"Mem:\t%8lu bytes\n"
|
||||
"Slack:\t%8lu bytes\n"
|
||||
"Shared:\t%8lu bytes\n",
|
||||
bytes, slack, sbytes);
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
unsigned long task_vsize(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_list_struct *tbp;
|
||||
unsigned long vsize = 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
|
||||
if (tbp->vma)
|
||||
vsize += kobjsize((void *) tbp->vma->vm_start);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
return vsize;
|
||||
}
|
||||
|
||||
int task_statm(struct mm_struct *mm, int *shared, int *text,
|
||||
int *data, int *resident)
|
||||
{
|
||||
struct vm_list_struct *tbp;
|
||||
int size = kobjsize(mm);
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
|
||||
size += kobjsize(tbp);
|
||||
if (tbp->vma) {
|
||||
size += kobjsize(tbp->vma);
|
||||
size += kobjsize((void *) tbp->vma->vm_start);
|
||||
}
|
||||
}
|
||||
|
||||
size += (*text = mm->end_code - mm->start_code);
|
||||
size += (*data = mm->start_stack - mm->start_data);
|
||||
up_read(&mm->mmap_sem);
|
||||
*resident = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
|
||||
{
|
||||
struct vm_list_struct *vml;
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *task = get_proc_task(inode);
|
||||
struct mm_struct *mm = get_task_mm(task);
|
||||
int result = -ENOENT;
|
||||
|
||||
if (!mm)
|
||||
goto out;
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
vml = mm->context.vmlist;
|
||||
vma = NULL;
|
||||
while (vml) {
|
||||
if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
|
||||
vma = vml->vma;
|
||||
break;
|
||||
}
|
||||
vml = vml->next;
|
||||
}
|
||||
|
||||
if (vma) {
|
||||
*mnt = mntget(vma->vm_file->f_path.mnt);
|
||||
*dentry = dget(vma->vm_file->f_path.dentry);
|
||||
result = 0;
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* display mapping lines for a particular process's /proc/pid/maps
|
||||
*/
|
||||
static int show_map(struct seq_file *m, void *_vml)
|
||||
{
|
||||
struct vm_list_struct *vml = _vml;
|
||||
return nommu_vma_show(m, vml->vma);
|
||||
}
|
||||
|
||||
static void *m_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
struct vm_list_struct *vml;
|
||||
struct mm_struct *mm;
|
||||
loff_t n = *pos;
|
||||
|
||||
/* pin the task and mm whilst we play with them */
|
||||
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
|
||||
if (!priv->task)
|
||||
return NULL;
|
||||
|
||||
mm = get_task_mm(priv->task);
|
||||
if (!mm) {
|
||||
put_task_struct(priv->task);
|
||||
priv->task = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
/* start from the Nth VMA */
|
||||
for (vml = mm->context.vmlist; vml; vml = vml->next)
|
||||
if (n-- == 0)
|
||||
return vml;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void m_stop(struct seq_file *m, void *_vml)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
|
||||
if (priv->task) {
|
||||
struct mm_struct *mm = priv->task->mm;
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
put_task_struct(priv->task);
|
||||
}
|
||||
}
|
||||
|
||||
static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
|
||||
{
|
||||
struct vm_list_struct *vml = _vml;
|
||||
|
||||
(*pos)++;
|
||||
return vml ? vml->next : NULL;
|
||||
}
|
||||
|
||||
static struct seq_operations proc_pid_maps_ops = {
|
||||
.start = m_start,
|
||||
.next = m_next,
|
||||
.stop = m_stop,
|
||||
.show = show_map
|
||||
};
|
||||
|
||||
static int maps_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_maps_private *priv;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (priv) {
|
||||
priv->pid = proc_pid(inode);
|
||||
ret = seq_open(file, &proc_pid_maps_ops);
|
||||
if (!ret) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = priv;
|
||||
} else {
|
||||
kfree(priv);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct file_operations proc_maps_operations = {
|
||||
.open = maps_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
};
|
||||
|
||||
670
fs/proc/vmcore.c
Normal file
670
fs/proc/vmcore.c
Normal file
@@ -0,0 +1,670 @@
|
||||
/*
|
||||
* fs/proc/vmcore.c Interface for accessing the crash
|
||||
* dump from the system's previous life.
|
||||
* Heavily borrowed from fs/proc/kcore.c
|
||||
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
|
||||
* Copyright (C) IBM Corporation, 2004. All rights reserved
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/a.out.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/list.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/* List representing chunks of contiguous memory areas and their offsets in
|
||||
* vmcore file.
|
||||
*/
|
||||
static LIST_HEAD(vmcore_list);
|
||||
|
||||
/* Stores the pointer to the buffer containing kernel elf core headers. */
|
||||
static char *elfcorebuf;
|
||||
static size_t elfcorebuf_sz;
|
||||
|
||||
/* Total size of vmcore file. */
|
||||
static u64 vmcore_size;
|
||||
|
||||
/* Stores the physical address of elf header of crash image. */
|
||||
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
|
||||
|
||||
struct proc_dir_entry *proc_vmcore = NULL;
|
||||
|
||||
/* Reads a page from the oldmem device from given offset. */
|
||||
static ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
u64 *ppos, int userbuf)
|
||||
{
|
||||
unsigned long pfn, offset;
|
||||
size_t nr_bytes;
|
||||
ssize_t read = 0, tmp;
|
||||
|
||||
if (!count)
|
||||
return 0;
|
||||
|
||||
offset = (unsigned long)(*ppos % PAGE_SIZE);
|
||||
pfn = (unsigned long)(*ppos / PAGE_SIZE);
|
||||
if (pfn > saved_max_pfn)
|
||||
return -EINVAL;
|
||||
|
||||
do {
|
||||
if (count > (PAGE_SIZE - offset))
|
||||
nr_bytes = PAGE_SIZE - offset;
|
||||
else
|
||||
nr_bytes = count;
|
||||
|
||||
tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
*ppos += nr_bytes;
|
||||
count -= nr_bytes;
|
||||
buf += nr_bytes;
|
||||
read += nr_bytes;
|
||||
++pfn;
|
||||
offset = 0;
|
||||
} while (count);
|
||||
|
||||
return read;
|
||||
}
|
||||
|
||||
/* Maps vmcore file offset to respective physical address in memroy. */
|
||||
static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
|
||||
struct vmcore **m_ptr)
|
||||
{
|
||||
struct vmcore *m;
|
||||
u64 paddr;
|
||||
|
||||
list_for_each_entry(m, vc_list, list) {
|
||||
u64 start, end;
|
||||
start = m->offset;
|
||||
end = m->offset + m->size - 1;
|
||||
if (offset >= start && offset <= end) {
|
||||
paddr = m->paddr + offset - start;
|
||||
*m_ptr = m;
|
||||
return paddr;
|
||||
}
|
||||
}
|
||||
*m_ptr = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Read from the ELF header and then the crash dump. On error, negative value is
|
||||
* returned otherwise number of bytes read are returned.
|
||||
*/
|
||||
static ssize_t read_vmcore(struct file *file, char __user *buffer,
|
||||
size_t buflen, loff_t *fpos)
|
||||
{
|
||||
ssize_t acc = 0, tmp;
|
||||
size_t tsz;
|
||||
u64 start, nr_bytes;
|
||||
struct vmcore *curr_m = NULL;
|
||||
|
||||
if (buflen == 0 || *fpos >= vmcore_size)
|
||||
return 0;
|
||||
|
||||
/* trim buflen to not go beyond EOF */
|
||||
if (buflen > vmcore_size - *fpos)
|
||||
buflen = vmcore_size - *fpos;
|
||||
|
||||
/* Read ELF core header */
|
||||
if (*fpos < elfcorebuf_sz) {
|
||||
tsz = elfcorebuf_sz - *fpos;
|
||||
if (buflen < tsz)
|
||||
tsz = buflen;
|
||||
if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
|
||||
return -EFAULT;
|
||||
buflen -= tsz;
|
||||
*fpos += tsz;
|
||||
buffer += tsz;
|
||||
acc += tsz;
|
||||
|
||||
/* leave now if filled buffer already */
|
||||
if (buflen == 0)
|
||||
return acc;
|
||||
}
|
||||
|
||||
start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
|
||||
if (!curr_m)
|
||||
return -EINVAL;
|
||||
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
|
||||
tsz = buflen;
|
||||
|
||||
/* Calculate left bytes in current memory segment. */
|
||||
nr_bytes = (curr_m->size - (start - curr_m->paddr));
|
||||
if (tsz > nr_bytes)
|
||||
tsz = nr_bytes;
|
||||
|
||||
while (buflen) {
|
||||
tmp = read_from_oldmem(buffer, tsz, &start, 1);
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
buflen -= tsz;
|
||||
*fpos += tsz;
|
||||
buffer += tsz;
|
||||
acc += tsz;
|
||||
if (start >= (curr_m->paddr + curr_m->size)) {
|
||||
if (curr_m->list.next == &vmcore_list)
|
||||
return acc; /*EOF*/
|
||||
curr_m = list_entry(curr_m->list.next,
|
||||
struct vmcore, list);
|
||||
start = curr_m->paddr;
|
||||
}
|
||||
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
|
||||
tsz = buflen;
|
||||
/* Calculate left bytes in current memory segment. */
|
||||
nr_bytes = (curr_m->size - (start - curr_m->paddr));
|
||||
if (tsz > nr_bytes)
|
||||
tsz = nr_bytes;
|
||||
}
|
||||
return acc;
|
||||
}
|
||||
|
||||
static int open_vmcore(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations proc_vmcore_operations = {
|
||||
.read = read_vmcore,
|
||||
.open = open_vmcore,
|
||||
};
|
||||
|
||||
static struct vmcore* __init get_new_element(void)
|
||||
{
|
||||
struct vmcore *p;
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (p)
|
||||
memset(p, 0, sizeof(*p));
|
||||
return p;
|
||||
}
|
||||
|
||||
static u64 __init get_vmcore_size_elf64(char *elfptr)
|
||||
{
|
||||
int i;
|
||||
u64 size;
|
||||
Elf64_Ehdr *ehdr_ptr;
|
||||
Elf64_Phdr *phdr_ptr;
|
||||
|
||||
ehdr_ptr = (Elf64_Ehdr *)elfptr;
|
||||
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
|
||||
size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
|
||||
for (i = 0; i < ehdr_ptr->e_phnum; i++) {
|
||||
size += phdr_ptr->p_memsz;
|
||||
phdr_ptr++;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static u64 __init get_vmcore_size_elf32(char *elfptr)
|
||||
{
|
||||
int i;
|
||||
u64 size;
|
||||
Elf32_Ehdr *ehdr_ptr;
|
||||
Elf32_Phdr *phdr_ptr;
|
||||
|
||||
ehdr_ptr = (Elf32_Ehdr *)elfptr;
|
||||
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
|
||||
size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
|
||||
for (i = 0; i < ehdr_ptr->e_phnum; i++) {
|
||||
size += phdr_ptr->p_memsz;
|
||||
phdr_ptr++;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/* Merges all the PT_NOTE headers into one. */
|
||||
static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
|
||||
struct list_head *vc_list)
|
||||
{
|
||||
int i, nr_ptnote=0, rc=0;
|
||||
char *tmp;
|
||||
Elf64_Ehdr *ehdr_ptr;
|
||||
Elf64_Phdr phdr, *phdr_ptr;
|
||||
Elf64_Nhdr *nhdr_ptr;
|
||||
u64 phdr_sz = 0, note_off;
|
||||
|
||||
ehdr_ptr = (Elf64_Ehdr *)elfptr;
|
||||
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
|
||||
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
|
||||
int j;
|
||||
void *notes_section;
|
||||
struct vmcore *new;
|
||||
u64 offset, max_sz, sz, real_sz = 0;
|
||||
if (phdr_ptr->p_type != PT_NOTE)
|
||||
continue;
|
||||
nr_ptnote++;
|
||||
max_sz = phdr_ptr->p_memsz;
|
||||
offset = phdr_ptr->p_offset;
|
||||
notes_section = kmalloc(max_sz, GFP_KERNEL);
|
||||
if (!notes_section)
|
||||
return -ENOMEM;
|
||||
rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
|
||||
if (rc < 0) {
|
||||
kfree(notes_section);
|
||||
return rc;
|
||||
}
|
||||
nhdr_ptr = notes_section;
|
||||
for (j = 0; j < max_sz; j += sz) {
|
||||
if (nhdr_ptr->n_namesz == 0)
|
||||
break;
|
||||
sz = sizeof(Elf64_Nhdr) +
|
||||
((nhdr_ptr->n_namesz + 3) & ~3) +
|
||||
((nhdr_ptr->n_descsz + 3) & ~3);
|
||||
real_sz += sz;
|
||||
nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
|
||||
}
|
||||
|
||||
/* Add this contiguous chunk of notes section to vmcore list.*/
|
||||
new = get_new_element();
|
||||
if (!new) {
|
||||
kfree(notes_section);
|
||||
return -ENOMEM;
|
||||
}
|
||||
new->paddr = phdr_ptr->p_offset;
|
||||
new->size = real_sz;
|
||||
list_add_tail(&new->list, vc_list);
|
||||
phdr_sz += real_sz;
|
||||
kfree(notes_section);
|
||||
}
|
||||
|
||||
/* Prepare merged PT_NOTE program header. */
|
||||
phdr.p_type = PT_NOTE;
|
||||
phdr.p_flags = 0;
|
||||
note_off = sizeof(Elf64_Ehdr) +
|
||||
(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
|
||||
phdr.p_offset = note_off;
|
||||
phdr.p_vaddr = phdr.p_paddr = 0;
|
||||
phdr.p_filesz = phdr.p_memsz = phdr_sz;
|
||||
phdr.p_align = 0;
|
||||
|
||||
/* Add merged PT_NOTE program header*/
|
||||
tmp = elfptr + sizeof(Elf64_Ehdr);
|
||||
memcpy(tmp, &phdr, sizeof(phdr));
|
||||
tmp += sizeof(phdr);
|
||||
|
||||
/* Remove unwanted PT_NOTE program headers. */
|
||||
i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
|
||||
*elfsz = *elfsz - i;
|
||||
memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
|
||||
|
||||
/* Modify e_phnum to reflect merged headers. */
|
||||
ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Merges all the PT_NOTE headers into one. */
|
||||
static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
|
||||
struct list_head *vc_list)
|
||||
{
|
||||
int i, nr_ptnote=0, rc=0;
|
||||
char *tmp;
|
||||
Elf32_Ehdr *ehdr_ptr;
|
||||
Elf32_Phdr phdr, *phdr_ptr;
|
||||
Elf32_Nhdr *nhdr_ptr;
|
||||
u64 phdr_sz = 0, note_off;
|
||||
|
||||
ehdr_ptr = (Elf32_Ehdr *)elfptr;
|
||||
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
|
||||
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
|
||||
int j;
|
||||
void *notes_section;
|
||||
struct vmcore *new;
|
||||
u64 offset, max_sz, sz, real_sz = 0;
|
||||
if (phdr_ptr->p_type != PT_NOTE)
|
||||
continue;
|
||||
nr_ptnote++;
|
||||
max_sz = phdr_ptr->p_memsz;
|
||||
offset = phdr_ptr->p_offset;
|
||||
notes_section = kmalloc(max_sz, GFP_KERNEL);
|
||||
if (!notes_section)
|
||||
return -ENOMEM;
|
||||
rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
|
||||
if (rc < 0) {
|
||||
kfree(notes_section);
|
||||
return rc;
|
||||
}
|
||||
nhdr_ptr = notes_section;
|
||||
for (j = 0; j < max_sz; j += sz) {
|
||||
if (nhdr_ptr->n_namesz == 0)
|
||||
break;
|
||||
sz = sizeof(Elf32_Nhdr) +
|
||||
((nhdr_ptr->n_namesz + 3) & ~3) +
|
||||
((nhdr_ptr->n_descsz + 3) & ~3);
|
||||
real_sz += sz;
|
||||
nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
|
||||
}
|
||||
|
||||
/* Add this contiguous chunk of notes section to vmcore list.*/
|
||||
new = get_new_element();
|
||||
if (!new) {
|
||||
kfree(notes_section);
|
||||
return -ENOMEM;
|
||||
}
|
||||
new->paddr = phdr_ptr->p_offset;
|
||||
new->size = real_sz;
|
||||
list_add_tail(&new->list, vc_list);
|
||||
phdr_sz += real_sz;
|
||||
kfree(notes_section);
|
||||
}
|
||||
|
||||
/* Prepare merged PT_NOTE program header. */
|
||||
phdr.p_type = PT_NOTE;
|
||||
phdr.p_flags = 0;
|
||||
note_off = sizeof(Elf32_Ehdr) +
|
||||
(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
|
||||
phdr.p_offset = note_off;
|
||||
phdr.p_vaddr = phdr.p_paddr = 0;
|
||||
phdr.p_filesz = phdr.p_memsz = phdr_sz;
|
||||
phdr.p_align = 0;
|
||||
|
||||
/* Add merged PT_NOTE program header*/
|
||||
tmp = elfptr + sizeof(Elf32_Ehdr);
|
||||
memcpy(tmp, &phdr, sizeof(phdr));
|
||||
tmp += sizeof(phdr);
|
||||
|
||||
/* Remove unwanted PT_NOTE program headers. */
|
||||
i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
|
||||
*elfsz = *elfsz - i;
|
||||
memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
|
||||
|
||||
/* Modify e_phnum to reflect merged headers. */
|
||||
ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add memory chunks represented by program headers to vmcore list. Also update
|
||||
* the new offset fields of exported program headers. */
|
||||
static int __init process_ptload_program_headers_elf64(char *elfptr,
|
||||
size_t elfsz,
|
||||
struct list_head *vc_list)
|
||||
{
|
||||
int i;
|
||||
Elf64_Ehdr *ehdr_ptr;
|
||||
Elf64_Phdr *phdr_ptr;
|
||||
loff_t vmcore_off;
|
||||
struct vmcore *new;
|
||||
|
||||
ehdr_ptr = (Elf64_Ehdr *)elfptr;
|
||||
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
|
||||
|
||||
/* First program header is PT_NOTE header. */
|
||||
vmcore_off = sizeof(Elf64_Ehdr) +
|
||||
(ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
|
||||
phdr_ptr->p_memsz; /* Note sections */
|
||||
|
||||
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
|
||||
if (phdr_ptr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
/* Add this contiguous chunk of memory to vmcore list.*/
|
||||
new = get_new_element();
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
new->paddr = phdr_ptr->p_offset;
|
||||
new->size = phdr_ptr->p_memsz;
|
||||
list_add_tail(&new->list, vc_list);
|
||||
|
||||
/* Update the program header offset. */
|
||||
phdr_ptr->p_offset = vmcore_off;
|
||||
vmcore_off = vmcore_off + phdr_ptr->p_memsz;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init process_ptload_program_headers_elf32(char *elfptr,
|
||||
size_t elfsz,
|
||||
struct list_head *vc_list)
|
||||
{
|
||||
int i;
|
||||
Elf32_Ehdr *ehdr_ptr;
|
||||
Elf32_Phdr *phdr_ptr;
|
||||
loff_t vmcore_off;
|
||||
struct vmcore *new;
|
||||
|
||||
ehdr_ptr = (Elf32_Ehdr *)elfptr;
|
||||
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
|
||||
|
||||
/* First program header is PT_NOTE header. */
|
||||
vmcore_off = sizeof(Elf32_Ehdr) +
|
||||
(ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
|
||||
phdr_ptr->p_memsz; /* Note sections */
|
||||
|
||||
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
|
||||
if (phdr_ptr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
/* Add this contiguous chunk of memory to vmcore list.*/
|
||||
new = get_new_element();
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
new->paddr = phdr_ptr->p_offset;
|
||||
new->size = phdr_ptr->p_memsz;
|
||||
list_add_tail(&new->list, vc_list);
|
||||
|
||||
/* Update the program header offset */
|
||||
phdr_ptr->p_offset = vmcore_off;
|
||||
vmcore_off = vmcore_off + phdr_ptr->p_memsz;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Sets offset fields of vmcore elements. */
|
||||
static void __init set_vmcore_list_offsets_elf64(char *elfptr,
|
||||
struct list_head *vc_list)
|
||||
{
|
||||
loff_t vmcore_off;
|
||||
Elf64_Ehdr *ehdr_ptr;
|
||||
struct vmcore *m;
|
||||
|
||||
ehdr_ptr = (Elf64_Ehdr *)elfptr;
|
||||
|
||||
/* Skip Elf header and program headers. */
|
||||
vmcore_off = sizeof(Elf64_Ehdr) +
|
||||
(ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
|
||||
|
||||
list_for_each_entry(m, vc_list, list) {
|
||||
m->offset = vmcore_off;
|
||||
vmcore_off += m->size;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sets offset fields of vmcore elements. */
|
||||
static void __init set_vmcore_list_offsets_elf32(char *elfptr,
|
||||
struct list_head *vc_list)
|
||||
{
|
||||
loff_t vmcore_off;
|
||||
Elf32_Ehdr *ehdr_ptr;
|
||||
struct vmcore *m;
|
||||
|
||||
ehdr_ptr = (Elf32_Ehdr *)elfptr;
|
||||
|
||||
/* Skip Elf header and program headers. */
|
||||
vmcore_off = sizeof(Elf32_Ehdr) +
|
||||
(ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
|
||||
|
||||
list_for_each_entry(m, vc_list, list) {
|
||||
m->offset = vmcore_off;
|
||||
vmcore_off += m->size;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init parse_crash_elf64_headers(void)
|
||||
{
|
||||
int rc=0;
|
||||
Elf64_Ehdr ehdr;
|
||||
u64 addr;
|
||||
|
||||
addr = elfcorehdr_addr;
|
||||
|
||||
/* Read Elf header */
|
||||
rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Do some basic Verification. */
|
||||
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
|
||||
(ehdr.e_type != ET_CORE) ||
|
||||
!elf_check_arch(&ehdr) ||
|
||||
ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
|
||||
ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
|
||||
ehdr.e_version != EV_CURRENT ||
|
||||
ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
|
||||
ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
|
||||
ehdr.e_phnum == 0) {
|
||||
printk(KERN_WARNING "Warning: Core image elf header is not"
|
||||
"sane\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Read in all elf headers. */
|
||||
elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
|
||||
elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
|
||||
if (!elfcorebuf)
|
||||
return -ENOMEM;
|
||||
addr = elfcorehdr_addr;
|
||||
rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
|
||||
if (rc < 0) {
|
||||
kfree(elfcorebuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Merge all PT_NOTE headers into one. */
|
||||
rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
|
||||
if (rc) {
|
||||
kfree(elfcorebuf);
|
||||
return rc;
|
||||
}
|
||||
rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
|
||||
&vmcore_list);
|
||||
if (rc) {
|
||||
kfree(elfcorebuf);
|
||||
return rc;
|
||||
}
|
||||
set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init parse_crash_elf32_headers(void)
|
||||
{
|
||||
int rc=0;
|
||||
Elf32_Ehdr ehdr;
|
||||
u64 addr;
|
||||
|
||||
addr = elfcorehdr_addr;
|
||||
|
||||
/* Read Elf header */
|
||||
rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Do some basic Verification. */
|
||||
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
|
||||
(ehdr.e_type != ET_CORE) ||
|
||||
!elf_check_arch(&ehdr) ||
|
||||
ehdr.e_ident[EI_CLASS] != ELFCLASS32||
|
||||
ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
|
||||
ehdr.e_version != EV_CURRENT ||
|
||||
ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
|
||||
ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
|
||||
ehdr.e_phnum == 0) {
|
||||
printk(KERN_WARNING "Warning: Core image elf header is not"
|
||||
"sane\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Read in all elf headers. */
|
||||
elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
|
||||
elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
|
||||
if (!elfcorebuf)
|
||||
return -ENOMEM;
|
||||
addr = elfcorehdr_addr;
|
||||
rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
|
||||
if (rc < 0) {
|
||||
kfree(elfcorebuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Merge all PT_NOTE headers into one. */
|
||||
rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
|
||||
if (rc) {
|
||||
kfree(elfcorebuf);
|
||||
return rc;
|
||||
}
|
||||
rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
|
||||
&vmcore_list);
|
||||
if (rc) {
|
||||
kfree(elfcorebuf);
|
||||
return rc;
|
||||
}
|
||||
set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init parse_crash_elf_headers(void)
|
||||
{
|
||||
unsigned char e_ident[EI_NIDENT];
|
||||
u64 addr;
|
||||
int rc=0;
|
||||
|
||||
addr = elfcorehdr_addr;
|
||||
rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
|
||||
printk(KERN_WARNING "Warning: Core image elf header"
|
||||
" not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (e_ident[EI_CLASS] == ELFCLASS64) {
|
||||
rc = parse_crash_elf64_headers();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Determine vmcore size. */
|
||||
vmcore_size = get_vmcore_size_elf64(elfcorebuf);
|
||||
} else if (e_ident[EI_CLASS] == ELFCLASS32) {
|
||||
rc = parse_crash_elf32_headers();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Determine vmcore size. */
|
||||
vmcore_size = get_vmcore_size_elf32(elfcorebuf);
|
||||
} else {
|
||||
printk(KERN_WARNING "Warning: Core image elf header is not"
|
||||
" sane\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Init function for vmcore module. */
|
||||
static int __init vmcore_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
|
||||
if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX))
|
||||
return rc;
|
||||
rc = parse_crash_elf_headers();
|
||||
if (rc) {
|
||||
printk(KERN_WARNING "Kdump: vmcore not initialized\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Initialize /proc/vmcore size if proc is already up. */
|
||||
if (proc_vmcore)
|
||||
proc_vmcore->size = vmcore_size;
|
||||
return 0;
|
||||
}
|
||||
module_init(vmcore_init)
|
||||
Reference in New Issue
Block a user