Creation of Cybook 2416 (actually Gen4) repository

This commit is contained in:
mlt
2009-12-18 17:10:00 +00:00
committed by godzil
commit 76f20f4d40
13791 changed files with 6812321 additions and 0 deletions

114
lib/Kconfig Normal file
View File

@@ -0,0 +1,114 @@
#
# Library configuration
#
menu "Library routines"
config BITREVERSE
tristate
config CRC_CCITT
tristate "CRC-CCITT functions"
help
This option is provided for the case where no in-kernel-tree
modules require CRC-CCITT functions, but a module built outside
the kernel tree does. Such modules that use library CRC-CCITT
functions require M here.
config CRC16
tristate "CRC16 functions"
help
This option is provided for the case where no in-kernel-tree
modules require CRC16 functions, but a module built outside
the kernel tree does. Such modules that use library CRC16
functions require M here.
config CRC32
tristate "CRC32 functions"
default y
select BITREVERSE
help
This option is provided for the case where no in-kernel-tree
modules require CRC32 functions, but a module built outside the
kernel tree does. Such modules that use library CRC32 functions
require M here.
config LIBCRC32C
tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
help
This option is provided for the case where no in-kernel-tree
modules require CRC32c functions, but a module built outside the
kernel tree does. Such modules that use library CRC32c functions
require M here. See Castagnoli93.
Module will be libcrc32c.
config AUDIT_GENERIC
bool
depends on AUDIT && !AUDIT_ARCH
default y
#
# compression support is select'ed if needed
#
config ZLIB_INFLATE
tristate
config ZLIB_DEFLATE
tristate
#
# Generic allocator support is selected if needed
#
config GENERIC_ALLOCATOR
boolean
#
# reed solomon support is select'ed if needed
#
config REED_SOLOMON
tristate
config REED_SOLOMON_ENC8
boolean
config REED_SOLOMON_DEC8
boolean
config REED_SOLOMON_ENC16
boolean
config REED_SOLOMON_DEC16
boolean
#
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
boolean
config TEXTSEARCH_KMP
tristate
config TEXTSEARCH_BM
tristate
config TEXTSEARCH_FSM
tristate
#
# plist support is select#ed if needed
#
config PLIST
boolean
config HAS_IOMEM
boolean
depends on !NO_IOMEM
default y
config HAS_IOPORT
boolean
depends on HAS_IOMEM && !NO_IOPORT
default y
endmenu

460
lib/Kconfig.debug Normal file
View File

@@ -0,0 +1,460 @@
config PRINTK_TIME
bool "Show timing information on printks"
depends on PRINTK
help
Selecting this option causes timing information to be
included in printk output. This allows you to measure
the interval between kernel operations, including bootup
operations. This is useful for identifying long delays
in kernel startup.
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
help
Enable the __must_check logic in the kernel build. Disable this to
suppress the "warning: ignoring return value of 'foo', declared with
attribute warn_unused_result" messages.
config MAGIC_SYSRQ
bool "Magic SysRq key"
depends on !UML
help
If you say Y here, you will have some control over the system even
if the system crashes for example during kernel debugging (e.g., you
will be able to flush the buffer cache to disk, reboot the system
immediately or dump some status information). This is accomplished
by pressing various keys while holding SysRq (Alt+PrintScreen). It
also works on a serial console (on PC hardware at least), if you
send a BREAK and then within 5 seconds a command keypress. The
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
config UNUSED_SYMBOLS
bool "Enable unused/obsolete exported symbols"
default y if X86
help
Unused but exported symbols make the kernel needlessly bigger. For
that reason most of these unused exports will soon be removed. This
option is provided temporarily to provide a transition period in case
some external kernel module needs one of these symbols anyway. If you
encounter such a case in your module, consider if you are actually
using the right API. (rationale: since nobody in the kernel is using
this in a module, there is a pretty good chance it's actually the
wrong interface to use). If you really need the symbol, please send a
mail to the linux kernel mailing list mentioning the symbol and why
you really need it, and what the merge plan to the mainline kernel for
your module is.
config DEBUG_FS
bool "Debug Filesystem"
depends on SYSFS
help
debugfs is a virtual file system that kernel developers use to put
debugging files into. Enable this option to be able to read and
write to these files.
If unsure, say N.
config HEADERS_CHECK
bool "Run 'make headers_check' when building vmlinux"
depends on !UML
help
This option will extract the user-visible kernel headers whenever
building the kernel, and will run basic sanity checks on them to
ensure that exported files do not attempt to include files which
were not exported, etc.
If you're making modifications to header files which are
relevant for userspace, say 'Y', and check the headers
exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
your build tree), to make sure they're suitable.
config DEBUG_KERNEL
bool "Kernel debugging"
help
Say Y here if you are developing drivers or trying to debug and
identify kernel problems.
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
depends on DEBUG_KERNEL && GENERIC_HARDIRQS
help
Enable this to generate a spurious interrupt as soon as a shared
interrupt handler is registered, and just before one is deregistered.
Drivers ought to be able to handle interrupts coming in at those
points; some don't and need to be caught.
config LOG_BUF_SHIFT
int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
range 12 21
default 17 if S390 || LOCKDEP
default 16 if X86_NUMAQ || IA64
default 15 if SMP
default 14
help
Select kernel log buffer size as a power of 2.
Defaults and Examples:
17 => 128 KB for S/390
16 => 64 KB for x86 NUMAQ or IA-64
15 => 32 KB for SMP
14 => 16 KB for uniprocessor
13 => 8 KB
12 => 4 KB
config DETECT_SOFTLOCKUP
bool "Detect Soft Lockups"
depends on DEBUG_KERNEL && !S390
default y
help
Say Y here to enable the kernel to detect "soft lockups",
which are bugs that cause the kernel to loop in kernel
mode for more than 10 seconds, without giving other tasks a
chance to run.
When a soft-lockup is detected, the kernel will print the
current stack trace (which you should report), but the
system will stay locked up. This feature has negligible
overhead.
(Note that "hard lockups" are separate type of bugs that
can be detected via the NMI-watchdog, on platforms that
support it.)
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
default y
help
If you say Y here, the /proc/sched_debug file will be provided
that can help debug the scheduler. The runtime overhead of this
option is minimal.
config SCHEDSTATS
bool "Collect scheduler statistics"
depends on DEBUG_KERNEL && PROC_FS
help
If you say Y here, additional code will be inserted into the
scheduler and related routines to collect statistics about
scheduler behavior and provide them in /proc/schedstat. These
stats may be useful for both tuning and debugging the scheduler
If you aren't debugging the scheduler or trying to tune a specific
application, you can say N to avoid the very slight overhead
this adds.
config TIMER_STATS
bool "Collect kernel timers statistics"
depends on DEBUG_KERNEL && PROC_FS
help
If you say Y here, additional code will be inserted into the
timer routines to collect statistics about kernel timers being
reprogrammed. The statistics can be read from /proc/timer_stats.
The statistics collection is started by writing 1 to /proc/timer_stats,
writing 0 stops it. This feature is useful to collect information
about timer usage patterns in kernel and userspace. This feature
is lightweight if enabled in the kernel config but not activated
(it defaults to deactivated on bootup and will only be activated
if some application like powertop activates it explicitly).
config DEBUG_SLAB
bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB
help
Say Y here to have the kernel do limited verification on memory
allocation as well as poisoning memory on free to catch use of freed
memory. This can make kmalloc/kfree-intensive workloads much slower.
config DEBUG_SLAB_LEAK
bool "Memory leak debugging"
depends on DEBUG_SLAB
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
default y
help
If you say Y here then the kernel will use a debug variant of the
commonly used smp_processor_id() function and will print warnings
if kernel code uses it in a preemption-unsafe way. Also, the kernel
will detect preemption count underflows.
config DEBUG_RT_MUTEXES
bool "RT Mutex debugging, deadlock detection"
depends on DEBUG_KERNEL && RT_MUTEXES
help
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
config DEBUG_PI_LIST
bool
default y
depends on DEBUG_RT_MUTEXES
config RT_MUTEX_TESTER
bool "Built-in scriptable tester for rt-mutexes"
depends on DEBUG_KERNEL && RT_MUTEXES
help
This option enables a rt-mutex tester.
config DEBUG_SPINLOCK
bool "Spinlock and rw-lock debugging: basic checks"
depends on DEBUG_KERNEL
help
Say Y here and build SMP to catch missing spinlock initialization
and certain other kinds of spinlock errors commonly made. This is
best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable.
config DEBUG_MUTEXES
bool "Mutex debugging: basic checks"
depends on DEBUG_KERNEL
help
This feature allows mutex semantics violations to be detected and
reported.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select LOCKDEP
help
This feature will check whether any held lock (spinlock, rwlock,
mutex or rwsem) is incorrectly freed by the kernel, via any of the
memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
vfree(), etc.), whether a live lock is incorrectly reinitialized via
spin_lock_init()/mutex_init()/etc., or whether there is any lock
held during task exit.
config PROVE_LOCKING
bool "Lock debugging: prove locking correctness"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_LOCK_ALLOC
default n
help
This feature enables the kernel to prove that all locking
that occurs in the kernel runtime is mathematically
correct: that under no circumstance could an arbitrary (and
not yet triggered) combination of observed locking
sequences (on an arbitrary number of CPUs, running an
arbitrary number of tasks and interrupt contexts) cause a
deadlock.
In short, this feature enables the kernel to report locking
related deadlocks before they actually occur.
The proof does not depend on how hard and complex a
deadlock scenario would be to trigger: how many
participant CPUs, tasks and irq-contexts would be needed
for it to trigger. The proof also does not depend on
timing: if a race and a resulting deadlock is possible
theoretically (no matter how unlikely the race scenario
is), it will be proven so and will immediately be
reported by the kernel (once the event is observed that
makes the deadlock theoretically possible).
If a deadlock is impossible (i.e. the locking rules, as
observed by the kernel, are mathematically correct), the
kernel reports nothing.
NOTE: this feature can also be enabled for rwlocks, mutexes
and rwsems - in which case all dependencies between these
different locking variants are observed and mapped too, and
the proof of observed correctness is also maintained for an
arbitrary combination of these separate locking variants.
For more details, see Documentation/lockdep-design.txt.
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
select FRAME_POINTER if !X86
select KALLSYMS
select KALLSYMS_ALL
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
help
If you say Y here, the lock dependency engine will do
additional runtime checks to debug itself, at the price
of more runtime overhead.
config TRACE_IRQFLAGS
depends on DEBUG_KERNEL
bool
default y
depends on TRACE_IRQFLAGS_SUPPORT
depends on PROVE_LOCKING
config DEBUG_SPINLOCK_SLEEP
bool "Spinlock debugging: sleep-inside-spinlock checking"
depends on DEBUG_KERNEL
help
If you say Y here, various routines which may sleep will become very
noisy if they are called with a spinlock held.
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
depends on DEBUG_KERNEL
help
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
are detected by debugging mechanisms or not. (if you disable
lock debugging then those bugs wont be detected of course.)
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
config STACKTRACE
bool
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
config DEBUG_KOBJECT
bool "kobject debugging"
depends on DEBUG_KERNEL
help
If you say Y here, some extra kobject debugging messages will be sent
to the syslog.
config DEBUG_HIGHMEM
bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM
help
This options enables addition error checking for high memory systems.
Disable for production systems.
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
depends on BUG
depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || FRV || SUPERH || GENERIC_BUG
default !EMBEDDED
help
Say Y here to make BUG() panics output the file name and line number
of the BUG call as well as the EIP and oops trace. This aids
debugging but costs about 70-100K of memory.
config DEBUG_INFO
bool "Compile the kernel with debug info"
depends on DEBUG_KERNEL
help
If you say Y here the resulting kernel image will include
debugging info resulting in a larger kernel image.
Say Y here only if you plan to debug the kernel.
If unsure, say N.
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
help
Enable this to turn on extended checks in the virtual-memory system
that may impact performance.
If unsure, say N.
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
help
Enable this to turn on extended checks in the linked-list
walking routines.
If unsure, say N.
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH)
default y if DEBUG_INFO && UML
help
If you say Y here the resulting kernel image will be slightly larger
and slower, but it might give very useful debugging information on
some architectures or if you use external debuggers.
If you don't debug the kernel, you can say N.
config FORCED_INLINING
bool "Force gcc to inline functions marked 'inline'"
depends on DEBUG_KERNEL
default y
help
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
do what it thinks is best, which is desirable for the gcc 3.x series of
compilers. The gcc 4.x series have a rewritten inlining algorithm and
disabling this option will generate a smaller kernel there. Hopefully
this algorithm is so good that allowing gcc4 to make the decision can
become the default in the future, until then this option is there to
test gcc for this.
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
default n
help
This option provides a kernel module that runs torture tests
on the RCU infrastructure. The kernel module may be built
after the fact on the running kernel to be tested, if desired.
Say Y here if you want RCU torture tests to start automatically
at boot time (you probably don't).
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
depends on KPROBES
default n
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
If you don't need it: say N
Choose M here to compile this code as a module. The module will be
called lkdtm.
Documentation on how to use the module can be found in
drivers/misc/lkdtm.c
config FAULT_INJECTION
bool "Fault-injection framework"
depends on DEBUG_KERNEL
help
Provide fault-injection framework.
For more details, see Documentation/fault-injection/.
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
help
Provide fault-injection capability for kmalloc.
config FAIL_PAGE_ALLOC
bool "Fault-injection capabilitiy for alloc_pages()"
depends on FAULT_INJECTION
help
Provide fault-injection capability for alloc_pages().
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION
help
Provide fault-injection capability for disk IO.
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
help
Enable configuration of fault-injection capabilities via debugfs.
config FAULT_INJECTION_STACKTRACE_FILTER
bool "stacktrace filter for fault-injection capabilities"
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
select STACKTRACE
select FRAME_POINTER
help
Provide stacktrace filter for fault-injection capabilities

73
lib/Makefile Normal file
View File

@@ -0,0 +1,73 @@
#
# Makefile for some libs needed in the kernel.
#
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
endif
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
endif
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
hostprogs-y := gen_crc32table
clean-files := crc32table.h
$(obj)/crc32.o: $(obj)/crc32table.h
quiet_cmd_crc32 = GEN $@
cmd_crc32 = $< > $@
$(obj)/crc32table.h: $(obj)/gen_crc32table
$(call cmd,crc32)

55
lib/audit.c Normal file
View File

@@ -0,0 +1,55 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
int audit_classify_syscall(int abi, unsigned syscall)
{
switch(syscall) {
case __NR_open:
return 2;
#ifdef __NR_openat
case __NR_openat:
return 3;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
return 4;
#endif
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
return 0;
}
__initcall(audit_classes_init);

836
lib/bitmap.c Normal file
View File

@@ -0,0 +1,836 @@
/*
* lib/bitmap.c
* Helper functions for bitmap.h.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
/*
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* These operations actually hold to a slightly stronger rule:
* if you don't input any bitmaps to these ops that have some
* unused bits set, then they won't output any set unused bits
* in output bitmaps.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
* for the best explanations of this ordering.
*/
int __bitmap_empty(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_empty);
int __bitmap_full(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
}
EXPORT_SYMBOL(__bitmap_complement);
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = (1UL << left) - 1;
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take lower rem bits of
* word above and make them the top rem bits of result.
*/
if (!rem || off + k + 1 >= lim)
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1 && left)
upper &= mask;
}
lower = src[off + k];
if (left && off + k == lim - 1)
lower &= mask;
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
if (left && k == lim - 1)
dst[k] &= mask;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_right);
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take upper rem bits of
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1];
else
lower = 0;
upper = src[k];
if (left && k == lim - 1)
upper &= (1UL << left) - 1;
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
if (left && k + off == lim - 1)
dst[k + off] &= (1UL << left) - 1;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_left);
void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] & bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_xor);
void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] & ~bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 1;
return 0;
}
EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_subset);
int __bitmap_weight(const unsigned long *bitmap, int bits)
{
int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
if (bits % BITS_PER_LONG)
w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
return w;
}
EXPORT_SYMBOL(__bitmap_weight);
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
*/
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
* bitmap_scnprintf - convert bitmap to an ASCII hex string.
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Exactly @nmaskbits bits are displayed. Hex digits are grouped into
* comma-separated sets of eight digits per set.
*/
int bitmap_scnprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int i, word, bit, len = 0;
unsigned long val;
const char *sep = "";
int chunksz;
u32 chunkmask;
chunksz = nmaskbits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (maskp[word] >> bit) & chunkmask;
len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
(chunksz+3)/4, val);
chunksz = CHUNKSZ;
sep = ",";
}
return len;
}
EXPORT_SYMBOL(bitmap_scnprintf);
/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @is_user: location of buffer, 0 indicates kernel space
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Commas group hex digits into chunks. Each chunk defines exactly 32
* bits of the resultant bitmask. No chunk may specify a value larger
* than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
* then leading 0-bits are prepended. %-EINVAL is returned for illegal
* characters and for grouping errors such as "1,,5", ",44", "," and "".
* Leading and trailing whitespace accepted, but not embedded whitespace.
*/
int __bitmap_parse(const char *buf, unsigned int buflen,
int is_user, unsigned long *maskp,
int nmaskbits)
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
const char __user *ubuf = buf;
bitmap_zero(maskp, nmaskbits);
nchunks = nbits = totaldigits = c = 0;
do {
chunk = ndigits = 0;
/* Get the next chunk of the bitmap */
while (buflen) {
old_c = c;
if (is_user) {
if (__get_user(c, ubuf++))
return -EFAULT;
}
else
c = *buf++;
buflen--;
if (isspace(c))
continue;
/*
* If the last character was a space and the current
* character isn't '\0', we've got embedded whitespace.
* This is a no-no, so throw an error.
*/
if (totaldigits && c && isspace(old_c))
return -EINVAL;
/* A '\0' or a ',' signal the end of the chunk */
if (c == '\0' || c == ',')
break;
if (!isxdigit(c))
return -EINVAL;
/*
* Make sure there are at least 4 free bits in 'chunk'.
* If not, this hexdigit will overflow 'chunk', so
* throw an error.
*/
if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
return -EOVERFLOW;
chunk = (chunk << 4) | unhex(c);
ndigits++; totaldigits++;
}
if (ndigits == 0)
return -EINVAL;
if (nchunks == 0 && chunk == 0)
continue;
__bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
*maskp |= chunk;
nchunks++;
nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
if (nbits > nmaskbits)
return -EOVERFLOW;
} while (buflen && c == ',');
return 0;
}
EXPORT_SYMBOL(__bitmap_parse);
/**
* bitmap_parse_user()
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Wrapper for __bitmap_parse(), providing it with user buffer.
*
* We cannot have this as an inline function in bitmap.h because it needs
* linux/uaccess.h to get the access_ok() declaration and this causes
* cyclic dependencies.
*/
int bitmap_parse_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
}
EXPORT_SYMBOL(bitmap_parse_user);
/*
* bscnl_emit(buf, buflen, rbot, rtop, bp)
*
* Helper routine for bitmap_scnlistprintf(). Write decimal number
* or range to buf, suppressing output past buf+buflen, with optional
* comma-prefix. Return len of what would be written to buf, if it
* all fit.
*/
static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
{
if (len > 0)
len += scnprintf(buf + len, buflen - len, ",");
if (rbot == rtop)
len += scnprintf(buf + len, buflen - len, "%d", rbot);
else
len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop);
return len;
}
/**
* bitmap_scnlistprintf - convert bitmap to list format ASCII string
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Output format is a comma-separated list of decimal numbers and
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range. Output format is compatible with the format
* accepted as input by bitmap_parselist().
*
* The return value is the number of characters which would be
* generated for the given input, excluding the trailing '\0', as
* per ISO C99.
*/
int bitmap_scnlistprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int len = 0;
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
rbot = cur = find_first_bit(maskp, nmaskbits);
while (cur < nmaskbits) {
rtop = cur;
cur = find_next_bit(maskp, nmaskbits, cur+1);
if (cur >= nmaskbits || cur > rtop + 1) {
len = bscnl_emit(buf, buflen, rbot, rtop, len);
rbot = cur;
}
}
return len;
}
EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
* bitmap_parselist - convert list format ASCII string to bitmap
* @bp: read nul-terminated user string from this buffer
* @maskp: write resulting mask here
* @nmaskbits: number of bits in mask to be written
*
* Input format is a comma-separated list of decimal numbers and
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range.
*
* Returns 0 on success, -errno on invalid input strings.
* Error values:
* %-EINVAL: second number in range smaller than first
* %-EINVAL: invalid character in string
* %-ERANGE: bit number specified too large for mask
*/
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
unsigned a, b;
bitmap_zero(maskp, nmaskbits);
do {
if (!isdigit(*bp))
return -EINVAL;
b = a = simple_strtoul(bp, (char **)&bp, BASEDEC);
if (*bp == '-') {
bp++;
if (!isdigit(*bp))
return -EINVAL;
b = simple_strtoul(bp, (char **)&bp, BASEDEC);
}
if (!(a <= b))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
while (a <= b) {
set_bit(a, maskp);
a++;
}
if (*bp == ',')
bp++;
} while (*bp != '\0' && *bp != '\n');
return 0;
}
EXPORT_SYMBOL(bitmap_parselist);
/**
* bitmap_pos_to_ord(buf, pos, bits)
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @bits)
* @bits: number of valid bit positions in @buf
*
* Map the bit at position @pos in @buf (of length @bits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to 0. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
{
int i, ord;
if (pos < 0 || pos >= bits || !test_bit(pos, buf))
return -1;
i = find_first_bit(buf, bits);
ord = 0;
while (i < pos) {
i = find_next_bit(buf, bits, i + 1);
ord++;
}
BUG_ON(i != pos);
return ord;
}
/**
* bitmap_ord_to_pos(buf, ord, bits)
* @buf: pointer to bitmap
* @ord: ordinal bit position (n-th set bit, n >= 0)
* @bits: number of valid bit positions in @buf
*
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
* Value of @ord should be in range 0 <= @ord < weight(buf), else
* results are undefined.
*
* If for example, just bits 4 through 7 are set in @buf, then @ord
* values 0 through 3 will get mapped to 4 through 7, respectively,
* and all other @ord values return undefined values. When @ord value 3
* gets mapped to (returns) @pos value 7 in this example, that means
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
{
int pos = 0;
if (ord >= 0 && ord < bits) {
int i;
for (i = find_first_bit(buf, bits);
i < bits && ord > 0;
i = find_next_bit(buf, bits, i + 1))
ord--;
if (i < bits && ord == 0)
pos = i;
}
return pos;
}
/**
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
* @dst: remapped result
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* If either of the @old and @new bitmaps are empty, or if @src and
* @dst point to the same location, then this routine copies @src
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @src comes into this routine
* with bits 1, 5 and 7 set, then @dst should leave with bits 1,
* 13 and 15 set.
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
int bits)
{
int oldbit, w;
if (dst == src) /* following doesn't handle inplace remaps */
return;
bitmap_zero(dst, bits);
w = bitmap_weight(new, bits);
for (oldbit = find_first_bit(src, bits);
oldbit < bits;
oldbit = find_next_bit(src, bits, oldbit + 1)) {
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
* @oldbit: bit position to be mapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @oldbit is 5, then this routine
* returns 13.
*/
int bitmap_bitremap(int oldbit, const unsigned long *old,
const unsigned long *new, int bits)
{
int w = bitmap_weight(new, bits);
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
return oldbit;
else
return bitmap_ord_to_pos(new, n % w, bits);
}
EXPORT_SYMBOL(bitmap_bitremap);
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
* pos: the beginning of the region
* order: region size (log base 2 of number of bits)
* reg_op: operation(s) to perform on that region of bitmap
*
* Can set, verify and/or release a region of bits in a bitmap,
* depending on which combination of REG_OP_* flag bits is set.
*
* A region of a bitmap is a sequence of bits in the bitmap, of
* some size '1 << order' (a power of two), aligned to that same
* '1 << order' power of two.
*
* Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
* Returns 0 in all other cases and reg_ops.
*/
enum {
REG_OP_ISFREE, /* true if region is all zero bits */
REG_OP_ALLOC, /* set all bits in region */
REG_OP_RELEASE, /* clear all bits in region */
};
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
int offset; /* bit offset region in bitmap[index] */
int nlongs_reg; /* num longs spanned by region in bitmap */
int nbitsinlong; /* num bits of region in each spanned long */
unsigned long mask; /* bitmask for one long of region */
int i; /* scans bitmap by longs */
int ret = 0; /* return value */
/*
* Either nlongs_reg == 1 (for small orders that fit in one long)
* or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
*/
nbits_reg = 1 << order;
index = pos / BITS_PER_LONG;
offset = pos - (index * BITS_PER_LONG);
nlongs_reg = BITS_TO_LONGS(nbits_reg);
nbitsinlong = min(nbits_reg, BITS_PER_LONG);
/*
* Can't do "mask = (1UL << nbitsinlong) - 1", as that
* overflows if nbitsinlong == BITS_PER_LONG.
*/
mask = (1UL << (nbitsinlong - 1));
mask += mask - 1;
mask <<= offset;
switch (reg_op) {
case REG_OP_ISFREE:
for (i = 0; i < nlongs_reg; i++) {
if (bitmap[index + i] & mask)
goto done;
}
ret = 1; /* all bits in region free (zero) */
break;
case REG_OP_ALLOC:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] |= mask;
break;
case REG_OP_RELEASE:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] &= ~mask;
break;
}
done:
return ret;
}
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size (log base 2 of number of bits) to find
*
* Find a region of free (zero) bits in a @bitmap of @bits bits and
* allocate them (set them to one). Only consider regions of length
* a power (@order) of two, aligned to that power of two, which
* makes the search algorithm much faster.
*
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
{
int pos; /* scans bitmap by regions of size order */
for (pos = 0; pos < bits; pos += (1 << order))
if (__reg_op(bitmap, pos, order, REG_OP_ISFREE))
break;
if (pos == bits)
return -ENOMEM;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return pos;
}
EXPORT_SYMBOL(bitmap_find_free_region);
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to release
* @order: region size (log base 2 of number of bits) to release
*
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*
* No return value.
*/
void bitmap_release_region(unsigned long *bitmap, int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
EXPORT_SYMBOL(bitmap_release_region);
/**
* bitmap_allocate_region - allocate bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to allocate
* @order: region size (log base 2 of number of bits) to allocate
*
* Allocate (set bits in) a specified region of a bitmap.
*
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return 0;
}
EXPORT_SYMBOL(bitmap_allocate_region);

58
lib/bitrev.c Normal file
View File

@@ -0,0 +1,58 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/bitrev.h>
MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
MODULE_DESCRIPTION("Bit ordering reversal functions");
MODULE_LICENSE("GPL");
const u8 byte_rev_table[256] = {
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
};
EXPORT_SYMBOL_GPL(byte_rev_table);
static __always_inline u16 bitrev16(u16 x)
{
return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
}
/**
* bitrev32 - reverse the order of bits in a u32 value
* @x: value to be bit-reversed
*/
u32 bitrev32(u32 x)
{
return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16);
}
EXPORT_SYMBOL(bitrev32);

163
lib/bug.c Normal file
View File

@@ -0,0 +1,163 @@
/*
Generic support for BUG()
This respects the following config options:
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
(though they're generally always on).
CONFIG_GENERIC_BUG is set by each architecture using this code.
To use this, your architecture must:
1. Set up the config options:
- Enable CONFIG_GENERIC_BUG if CONFIG_BUG
2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
- Define HAVE_ARCH_BUG
- Implement BUG() to generate a faulting instruction
- NOTE: struct bug_entry does not have "file" or "line" entries
when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
the values accordingly.
3. Implement the trap
- In the illegal instruction trap handler (typically), verify
that the fault was in kernel mode, and call report_bug()
- report_bug() will return whether it was a false alarm, a warning,
or an actual bug.
- You must implement the is_valid_bugaddr(bugaddr) callback which
returns true if the eip is a real kernel address, and it points
to the expected BUG trap instruction.
Jeremy Fitzhardinge <jeremy@goop.org> 2006
*/
#include <linux/list.h>
#include <linux/module.h>
#include <linux/bug.h>
extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
#ifdef CONFIG_MODULES
static LIST_HEAD(module_bug_list);
static const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct module *mod;
list_for_each_entry(mod, &module_bug_list, bug_list) {
const struct bug_entry *bug = mod->bug_table;
unsigned i;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug->bug_addr)
return bug;
}
return NULL;
}
int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
char *secstrings;
unsigned int i;
mod->bug_table = NULL;
mod->num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
mod->bug_table = (void *) sechdrs[i].sh_addr;
mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&mod->bug_list, &module_bug_list);
return 0;
}
void module_bug_cleanup(struct module *mod)
{
list_del(&mod->bug_list);
}
#else
static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
return NULL;
}
#endif
const struct bug_entry *find_bug(unsigned long bugaddr)
{
const struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug->bug_addr)
return bug;
return module_find_bug(bugaddr);
}
enum bug_trap_type report_bug(unsigned long bugaddr)
{
const struct bug_entry *bug;
const char *file;
unsigned line, warning;
if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
bug = find_bug(bugaddr);
printk(KERN_EMERG "------------[ cut here ]------------\n");
file = NULL;
line = 0;
warning = 0;
if (bug) {
#ifdef CONFIG_DEBUG_BUGVERBOSE
file = bug->file;
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
}
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
if (file)
printk(KERN_ERR "Badness at %s:%u\n",
file, line);
else
printk(KERN_ERR "Badness at %p "
"[verbose debug info unavailable]\n",
(void *)bugaddr);
dump_stack();
return BUG_TRAP_TYPE_WARN;
}
if (file)
printk(KERN_CRIT "kernel BUG at %s:%u!\n",
file, line);
else
printk(KERN_CRIT "Kernel BUG at %p "
"[verbose debug info unavailable]\n",
(void *)bugaddr);
return BUG_TRAP_TYPE_BUG;
}

30
lib/bust_spinlocks.c Normal file
View File

@@ -0,0 +1,30 @@
/*
* lib/bust_spinlocks.c
*
* Provides a minimal bust_spinlocks for architectures which don't have one of their own.
*
* bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG()
* and panic() information from reaching the user.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/wait.h>
#include <linux/vt_kern.h>
void __attribute__((weak)) bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
#ifdef CONFIG_VT
unblank_screen();
#endif
oops_in_progress = 0;
wake_up_klogd();
}
}

153
lib/cmdline.c Normal file
View File

@@ -0,0 +1,153 @@
/*
* linux/lib/cmdline.c
* Helper functions generally used for parsing kernel command line
* and module options.
*
* Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*
* GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
/*
* If a hyphen was found in get_option, this will handle the
* range of numbers, M-N. This will expand the range and insert
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
static int get_range(char **str, int *pint)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
for (x = *pint; x < upper_range; x++)
*pint++ = x;
return inc_counter;
}
/**
* get_option - Parse integer from an option string
* @str: option string
* @pint: (output) integer value parsed from @str
*
* Read an int from an option string; if available accept a subsequent
* comma as well.
*
* Return values:
* 0 - no int in string
* 1 - int found, no subsequent comma
* 2 - int found including a subsequent comma
* 3 - hyphen found to denote a range
*/
int get_option (char **str, int *pint)
{
char *cur = *str;
if (!cur || !(*cur))
return 0;
*pint = simple_strtol (cur, str, 0);
if (cur == *str)
return 0;
if (**str == ',') {
(*str)++;
return 2;
}
if (**str == '-')
return 3;
return 1;
}
/**
* get_options - Parse a string into a list of integers
* @str: String to be parsed
* @nints: size of integer array
* @ints: integer array
*
* This function parses a string containing a comma-separated
* list of integers, a hyphen-separated range of _positive_ integers,
* or a combination of both. The parse halts when the array is
* full, or when no more numbers can be retrieved from the
* string.
*
* Return value is the character in the string which caused
* the parse to end (typically a null terminator, if @str is
* completely parseable).
*/
char *get_options(const char *str, int nints, int *ints)
{
int res, i = 1;
while (i < nints) {
res = get_option ((char **)&str, ints + i);
if (res == 0)
break;
if (res == 3) {
int range_nums;
range_nums = get_range((char **)&str, ints + i);
if (range_nums < 0)
break;
/*
* Decrement the result by one to leave out the
* last number in the range. The next iteration
* will handle the upper number in the range
*/
i += (range_nums - 1);
}
i++;
if (res == 1)
break;
}
ints[0] = i - 1;
return (char *)str;
}
/**
* memparse - parse a string with mem suffixes into a number
* @ptr: Where parse begins
* @retptr: (output) Pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
* potentially suffixed with %K (for kilobytes, or 1024 bytes),
* %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
* 1073741824). If the number is suffixed with K, M, or G, then
* the return value is the number multiplied by one kilobyte, one
* megabyte, or one gigabyte, respectively.
*/
unsigned long long memparse (char *ptr, char **retptr)
{
unsigned long long ret = simple_strtoull (ptr, retptr, 0);
switch (**retptr) {
case 'G':
case 'g':
ret <<= 10;
case 'M':
case 'm':
ret <<= 10;
case 'K':
case 'k':
ret <<= 10;
(*retptr)++;
default:
break;
}
return ret;
}
EXPORT_SYMBOL(memparse);
EXPORT_SYMBOL(get_option);
EXPORT_SYMBOL(get_options);

28
lib/cpumask.c Normal file
View File

@@ -0,0 +1,28 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/module.h>
int __first_cpu(const cpumask_t *srcp)
{
return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
}
EXPORT_SYMBOL(__first_cpu);
int __next_cpu(int n, const cpumask_t *srcp)
{
return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
}
EXPORT_SYMBOL(__next_cpu);
int __any_online_cpu(const cpumask_t *mask)
{
int cpu;
for_each_cpu_mask(cpu, *mask) {
if (cpu_online(cpu))
break;
}
return cpu;
}
EXPORT_SYMBOL(__any_online_cpu);

69
lib/crc-ccitt.c Normal file
View File

@@ -0,0 +1,69 @@
/*
* linux/lib/crc-ccitt.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-ccitt.h>
/*
* This mysterious table is just the CRC of each possible byte. It can be
* computed using the standard bit-at-a-time methods. The polynomial can
* be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
* Add the implicit x^16, and you have the standard CRC-CCITT.
*/
u16 const crc_ccitt_table[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
EXPORT_SYMBOL(crc_ccitt_table);
/**
* crc_ccitt - recompute the CRC for the data buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*/
u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc_ccitt_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc_ccitt);
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");

67
lib/crc16.c Normal file
View File

@@ -0,0 +1,67 @@
/*
* crc16.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc16.h>
/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
u16 const crc16_table[256] = {
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
};
EXPORT_SYMBOL(crc16_table);
/**
* crc16 - compute the CRC-16 for the data buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*
* Returns the updated CRC value.
*/
u16 crc16(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc16_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc16);
MODULE_DESCRIPTION("CRC16 calculations");
MODULE_LICENSE("GPL");

501
lib/crc32.c Normal file
View File

@@ -0,0 +1,501 @@
/*
* Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
* Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
* Code was from the public domain, copyright abandoned. Code was
* subsequently included in the kernel, thus was re-licensed under the
* GNU GPL v2.
*
* Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
* Same crc32 function was used in 5 other places in the kernel.
* I made one version, and deleted the others.
* There are various incantations of crc32(). Some use a seed of 0 or ~0.
* Some xor at the end with ~0. The generic crc32() function takes
* seed as an argument, and doesn't xor at the end. Then individual
* users can do whatever they need.
* drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
* fs/jffs2 uses seed 0, doesn't xor with ~0.
* fs/partitions/efi.c uses seed ~0, xor's with ~0.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include "crc32defs.h"
#if CRC_LE_BITS == 8
#define tole(x) __constant_cpu_to_le32(x)
#define tobe(x) __constant_cpu_to_be32(x)
#else
#define tole(x) (x)
#define tobe(x) (x)
#endif
#include "crc32table.h"
MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
/**
* crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
* other uses, or the previous crc32 value if computing incrementally.
* @p: pointer to buffer over which CRC is run
* @len: length of buffer @p
*/
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len);
#if CRC_LE_BITS == 1
/*
* In fact, the table-based code will work in this case, but it can be
* simplified by inlining the table in ?: form.
*/
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
{
int i;
while (len--) {
crc ^= *p++;
for (i = 0; i < 8; i++)
crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
}
return crc;
}
#else /* Table-based approach */
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
const u32 *b =(u32 *)p;
const u32 *tab = crc32table_le;
# ifdef __LITTLE_ENDIAN
# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
# else
# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
# endif
crc = __cpu_to_le32(crc);
/* Align it */
if(unlikely(((long)b)&3 && len)){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (void *)p;
} while ((--len) && ((long)b)&3 );
}
if(likely(len >= 4)){
/* load data 32 bits wide, xor data 32 bits wide. */
size_t save_len = len & 3;
len = len >> 2;
--b; /* use pre increment below(*++b) for speed */
do {
crc ^= *++b;
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
} while (--len);
b++; /* point to next byte(s) */
len = save_len;
}
/* And the last few bytes */
if(len){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (void *)p;
} while (--len);
}
return __le32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
# elif CRC_LE_BITS == 4
while (len--) {
crc ^= *p++;
crc = (crc >> 4) ^ crc32table_le[crc & 15];
crc = (crc >> 4) ^ crc32table_le[crc & 15];
}
return crc;
# elif CRC_LE_BITS == 2
while (len--) {
crc ^= *p++;
crc = (crc >> 2) ^ crc32table_le[crc & 3];
crc = (crc >> 2) ^ crc32table_le[crc & 3];
crc = (crc >> 2) ^ crc32table_le[crc & 3];
crc = (crc >> 2) ^ crc32table_le[crc & 3];
}
return crc;
# endif
}
#endif
/**
* crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
* other uses, or the previous crc32 value if computing incrementally.
* @p: pointer to buffer over which CRC is run
* @len: length of buffer @p
*/
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len);
#if CRC_BE_BITS == 1
/*
* In fact, the table-based code will work in this case, but it can be
* simplified by inlining the table in ?: form.
*/
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
{
int i;
while (len--) {
crc ^= *p++ << 24;
for (i = 0; i < 8; i++)
crc =
(crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE :
0);
}
return crc;
}
#else /* Table-based approach */
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
const u32 *b =(u32 *)p;
const u32 *tab = crc32table_be;
# ifdef __LITTLE_ENDIAN
# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
# else
# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
# endif
crc = __cpu_to_be32(crc);
/* Align it */
if(unlikely(((long)b)&3 && len)){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (u32 *)p;
} while ((--len) && ((long)b)&3 );
}
if(likely(len >= 4)){
/* load data 32 bits wide, xor data 32 bits wide. */
size_t save_len = len & 3;
len = len >> 2;
--b; /* use pre increment below(*++b) for speed */
do {
crc ^= *++b;
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
} while (--len);
b++; /* point to next byte(s) */
len = save_len;
}
/* And the last few bytes */
if(len){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (void *)p;
} while (--len);
}
return __be32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
# elif CRC_BE_BITS == 4
while (len--) {
crc ^= *p++ << 24;
crc = (crc << 4) ^ crc32table_be[crc >> 28];
crc = (crc << 4) ^ crc32table_be[crc >> 28];
}
return crc;
# elif CRC_BE_BITS == 2
while (len--) {
crc ^= *p++ << 24;
crc = (crc << 2) ^ crc32table_be[crc >> 30];
crc = (crc << 2) ^ crc32table_be[crc >> 30];
crc = (crc << 2) ^ crc32table_be[crc >> 30];
crc = (crc << 2) ^ crc32table_be[crc >> 30];
}
return crc;
# endif
}
#endif
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(crc32_be);
/*
* A brief CRC tutorial.
*
* A CRC is a long-division remainder. You add the CRC to the message,
* and the whole thing (message+CRC) is a multiple of the given
* CRC polynomial. To check the CRC, you can either check that the
* CRC matches the recomputed value, *or* you can check that the
* remainder computed on the message+CRC is 0. This latter approach
* is used by a lot of hardware implementations, and is why so many
* protocols put the end-of-frame flag after the CRC.
*
* It's actually the same long division you learned in school, except that
* - We're working in binary, so the digits are only 0 and 1, and
* - When dividing polynomials, there are no carries. Rather than add and
* subtract, we just xor. Thus, we tend to get a bit sloppy about
* the difference between adding and subtracting.
*
* A 32-bit CRC polynomial is actually 33 bits long. But since it's
* 33 bits long, bit 32 is always going to be set, so usually the CRC
* is written in hex with the most significant bit omitted. (If you're
* familiar with the IEEE 754 floating-point format, it's the same idea.)
*
* Note that a CRC is computed over a string of *bits*, so you have
* to decide on the endianness of the bits within each byte. To get
* the best error-detecting properties, this should correspond to the
* order they're actually sent. For example, standard RS-232 serial is
* little-endian; the most significant bit (sometimes used for parity)
* is sent last. And when appending a CRC word to a message, you should
* do it in the right order, matching the endianness.
*
* Just like with ordinary division, the remainder is always smaller than
* the divisor (the CRC polynomial) you're dividing by. Each step of the
* division, you take one more digit (bit) of the dividend and append it
* to the current remainder. Then you figure out the appropriate multiple
* of the divisor to subtract to being the remainder back into range.
* In binary, it's easy - it has to be either 0 or 1, and to make the
* XOR cancel, it's just a copy of bit 32 of the remainder.
*
* When computing a CRC, we don't care about the quotient, so we can
* throw the quotient bit away, but subtract the appropriate multiple of
* the polynomial from the remainder and we're back to where we started,
* ready to process the next bit.
*
* A big-endian CRC written this way would be coded like:
* for (i = 0; i < input_bits; i++) {
* multiple = remainder & 0x80000000 ? CRCPOLY : 0;
* remainder = (remainder << 1 | next_input_bit()) ^ multiple;
* }
* Notice how, to get at bit 32 of the shifted remainder, we look
* at bit 31 of the remainder *before* shifting it.
*
* But also notice how the next_input_bit() bits we're shifting into
* the remainder don't actually affect any decision-making until
* 32 bits later. Thus, the first 32 cycles of this are pretty boring.
* Also, to add the CRC to a message, we need a 32-bit-long hole for it at
* the end, so we have to add 32 extra cycles shifting in zeros at the
* end of every message,
*
* So the standard trick is to rearrage merging in the next_input_bit()
* until the moment it's needed. Then the first 32 cycles can be precomputed,
* and merging in the final 32 zero bits to make room for the CRC can be
* skipped entirely.
* This changes the code to:
* for (i = 0; i < input_bits; i++) {
* remainder ^= next_input_bit() << 31;
* multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
* remainder = (remainder << 1) ^ multiple;
* }
* With this optimization, the little-endian code is simpler:
* for (i = 0; i < input_bits; i++) {
* remainder ^= next_input_bit();
* multiple = (remainder & 1) ? CRCPOLY : 0;
* remainder = (remainder >> 1) ^ multiple;
* }
*
* Note that the other details of endianness have been hidden in CRCPOLY
* (which must be bit-reversed) and next_input_bit().
*
* However, as long as next_input_bit is returning the bits in a sensible
* order, we can actually do the merging 8 or more bits at a time rather
* than one bit at a time:
* for (i = 0; i < input_bytes; i++) {
* remainder ^= next_input_byte() << 24;
* for (j = 0; j < 8; j++) {
* multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
* remainder = (remainder << 1) ^ multiple;
* }
* }
* Or in little-endian:
* for (i = 0; i < input_bytes; i++) {
* remainder ^= next_input_byte();
* for (j = 0; j < 8; j++) {
* multiple = (remainder & 1) ? CRCPOLY : 0;
* remainder = (remainder << 1) ^ multiple;
* }
* }
* If the input is a multiple of 32 bits, you can even XOR in a 32-bit
* word at a time and increase the inner loop count to 32.
*
* You can also mix and match the two loop styles, for example doing the
* bulk of a message byte-at-a-time and adding bit-at-a-time processing
* for any fractional bytes at the end.
*
* The only remaining optimization is to the byte-at-a-time table method.
* Here, rather than just shifting one bit of the remainder to decide
* in the correct multiple to subtract, we can shift a byte at a time.
* This produces a 40-bit (rather than a 33-bit) intermediate remainder,
* but again the multiple of the polynomial to subtract depends only on
* the high bits, the high 8 bits in this case.
*
* The multile we need in that case is the low 32 bits of a 40-bit
* value whose high 8 bits are given, and which is a multiple of the
* generator polynomial. This is simply the CRC-32 of the given
* one-byte message.
*
* Two more details: normally, appending zero bits to a message which
* is already a multiple of a polynomial produces a larger multiple of that
* polynomial. To enable a CRC to detect this condition, it's common to
* invert the CRC before appending it. This makes the remainder of the
* message+crc come out not as zero, but some fixed non-zero value.
*
* The same problem applies to zero bits prepended to the message, and
* a similar solution is used. Instead of starting with a remainder of
* 0, an initial remainder of all ones is used. As long as you start
* the same way on decoding, it doesn't make a difference.
*/
#ifdef UNITTEST
#include <stdlib.h>
#include <stdio.h>
#if 0 /*Not used at present */
static void
buf_dump(char const *prefix, unsigned char const *buf, size_t len)
{
fputs(prefix, stdout);
while (len--)
printf(" %02x", *buf++);
putchar('\n');
}
#endif
static void bytereverse(unsigned char *buf, size_t len)
{
while (len--) {
unsigned char x = bitrev8(*buf);
*buf++ = x;
}
}
static void random_garbage(unsigned char *buf, size_t len)
{
while (len--)
*buf++ = (unsigned char) random();
}
#if 0 /* Not used at present */
static void store_le(u32 x, unsigned char *buf)
{
buf[0] = (unsigned char) x;
buf[1] = (unsigned char) (x >> 8);
buf[2] = (unsigned char) (x >> 16);
buf[3] = (unsigned char) (x >> 24);
}
#endif
static void store_be(u32 x, unsigned char *buf)
{
buf[0] = (unsigned char) (x >> 24);
buf[1] = (unsigned char) (x >> 16);
buf[2] = (unsigned char) (x >> 8);
buf[3] = (unsigned char) x;
}
/*
* This checks that CRC(buf + CRC(buf)) = 0, and that
* CRC commutes with bit-reversal. This has the side effect
* of bytewise bit-reversing the input buffer, and returns
* the CRC of the reversed buffer.
*/
static u32 test_step(u32 init, unsigned char *buf, size_t len)
{
u32 crc1, crc2;
size_t i;
crc1 = crc32_be(init, buf, len);
store_be(crc1, buf + len);
crc2 = crc32_be(init, buf, len + 4);
if (crc2)
printf("\nCRC cancellation fail: 0x%08x should be 0\n",
crc2);
for (i = 0; i <= len + 4; i++) {
crc2 = crc32_be(init, buf, i);
crc2 = crc32_be(crc2, buf + i, len + 4 - i);
if (crc2)
printf("\nCRC split fail: 0x%08x\n", crc2);
}
/* Now swap it around for the other test */
bytereverse(buf, len + 4);
init = bitrev32(init);
crc2 = bitrev32(crc1);
if (crc1 != bitrev32(crc2))
printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n",
crc1, crc2, bitrev32(crc2));
crc1 = crc32_le(init, buf, len);
if (crc1 != crc2)
printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1,
crc2);
crc2 = crc32_le(init, buf, len + 4);
if (crc2)
printf("\nCRC cancellation fail: 0x%08x should be 0\n",
crc2);
for (i = 0; i <= len + 4; i++) {
crc2 = crc32_le(init, buf, i);
crc2 = crc32_le(crc2, buf + i, len + 4 - i);
if (crc2)
printf("\nCRC split fail: 0x%08x\n", crc2);
}
return crc1;
}
#define SIZE 64
#define INIT1 0
#define INIT2 0
int main(void)
{
unsigned char buf1[SIZE + 4];
unsigned char buf2[SIZE + 4];
unsigned char buf3[SIZE + 4];
int i, j;
u32 crc1, crc2, crc3;
for (i = 0; i <= SIZE; i++) {
printf("\rTesting length %d...", i);
fflush(stdout);
random_garbage(buf1, i);
random_garbage(buf2, i);
for (j = 0; j < i; j++)
buf3[j] = buf1[j] ^ buf2[j];
crc1 = test_step(INIT1, buf1, i);
crc2 = test_step(INIT2, buf2, i);
/* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */
crc3 = test_step(INIT1 ^ INIT2, buf3, i);
if (crc3 != (crc1 ^ crc2))
printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n",
crc3, crc1, crc2);
}
printf("\nAll test complete. No failures expected.\n");
return 0;
}
#endif /* UNITTEST */

32
lib/crc32defs.h Normal file
View File

@@ -0,0 +1,32 @@
/*
* There are multiple 16-bit CRC polynomials in common use, but this is
* *the* standard CRC-32 polynomial, first popularized by Ethernet.
* x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
*/
#define CRCPOLY_LE 0xedb88320
#define CRCPOLY_BE 0x04c11db7
/* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */
/* For less performance-sensitive, use 4 */
#ifndef CRC_LE_BITS
# define CRC_LE_BITS 8
#endif
#ifndef CRC_BE_BITS
# define CRC_BE_BITS 8
#endif
/*
* Little-endian CRC computation. Used with serial bit streams sent
* lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
*/
#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1
# error CRC_LE_BITS must be a power of 2 between 1 and 8
#endif
/*
* Big-endian CRC computation. Used with serial bit streams sent
* msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
*/
#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1
# error CRC_BE_BITS must be a power of 2 between 1 and 8
#endif

36
lib/ctype.c Normal file
View File

@@ -0,0 +1,36 @@
/*
* linux/lib/ctype.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/ctype.h>
#include <linux/module.h>
unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);

45
lib/debug_locks.c Normal file
View File

@@ -0,0 +1,45 @@
/*
* lib/debug_locks.c
*
* Generic place for common debugging facilities for various locks:
* spinlocks, rwlocks, mutexes and rwsems.
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/debug_locks.h>
/*
* We want to turn all lock-debugging facilities on/off at once,
* via a global flag. The reason is that once a single bug has been
* detected and reported, there might be cascade of followup bugs
* that would just muddy the log. So we report the first one and
* shut up after that.
*/
int debug_locks = 1;
/*
* The locking-testsuite uses <debug_locks_silent> to get a
* 'silent failure': nothing is printed to the console when
* a locking bug is detected.
*/
int debug_locks_silent;
/*
* Generic 'turn off all lock debugging' function:
*/
int debug_locks_off(void)
{
if (xchg(&debug_locks, 0)) {
if (!debug_locks_silent) {
console_verbose();
return 1;
}
}
return 0;
}

35
lib/dec_and_lock.c Normal file
View File

@@ -0,0 +1,35 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* NOTE NOTE NOTE! This is _not_ equivalent to
*
* if (atomic_dec_and_test(&atomic)) {
* spin_lock(&lock);
* return 1;
* }
* return 0;
*
* because the spin-lock and the decrement must be
* "atomic".
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
#ifdef CONFIG_SMP
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
#endif
/* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

300
lib/devres.c Normal file
View File

@@ -0,0 +1,300 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/module.h>
static void devm_ioremap_release(struct device *dev, void *res)
{
iounmap(*(void __iomem **)res);
}
static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
{
return *(void **)res == match_data;
}
/**
* devm_ioremap - Managed ioremap()
* @dev: Generic device to remap IO address for
* @offset: BUS offset to map
* @size: Size of map
*
* Managed ioremap(). Map is automatically unmapped on driver detach.
*/
void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
unsigned long size)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = ioremap(offset, size);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_ioremap);
/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
* @offset: BUS offset to map
* @size: Size of map
*
* Managed ioremap_nocache(). Map is automatically unmapped on driver
* detach.
*/
void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
unsigned long size)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = ioremap_nocache(offset, size);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_ioremap_nocache);
/**
* devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
*
* Managed iounmap(). @addr must have been mapped using devm_ioremap*().
*/
void devm_iounmap(struct device *dev, void __iomem *addr)
{
iounmap(addr);
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
(void *)addr));
}
EXPORT_SYMBOL(devm_iounmap);
#ifdef CONFIG_HAS_IOPORT
/*
* Generic iomap devres
*/
static void devm_ioport_map_release(struct device *dev, void *res)
{
ioport_unmap(*(void __iomem **)res);
}
static int devm_ioport_map_match(struct device *dev, void *res,
void *match_data)
{
return *(void **)res == match_data;
}
/**
* devm_ioport_map - Managed ioport_map()
* @dev: Generic device to map ioport for
* @port: Port to map
* @nr: Number of ports to map
*
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
*/
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = ioport_map(port, nr);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_ioport_map);
/**
* devm_ioport_unmap - Managed ioport_unmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
*
* Managed ioport_unmap(). @addr must have been mapped using
* devm_ioport_map().
*/
void devm_ioport_unmap(struct device *dev, void __iomem *addr)
{
ioport_unmap(addr);
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
devm_ioport_map_match, (void *)addr));
}
EXPORT_SYMBOL(devm_ioport_unmap);
#ifdef CONFIG_PCI
/*
* PCI iomap devres
*/
#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
struct pcim_iomap_devres {
void __iomem *table[PCIM_IOMAP_MAX];
};
static void pcim_iomap_release(struct device *gendev, void *res)
{
struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
struct pcim_iomap_devres *this = res;
int i;
for (i = 0; i < PCIM_IOMAP_MAX; i++)
if (this->table[i])
pci_iounmap(dev, this->table[i]);
}
/**
* pcim_iomap_table - access iomap allocation table
* @pdev: PCI device to access iomap table for
*
* Access iomap allocation table for @dev. If iomap table doesn't
* exist and @pdev is managed, it will be allocated. All iomaps
* recorded in the iomap table are automatically unmapped on driver
* detach.
*
* This function might sleep when the table is first allocated but can
* be safely called without context and guaranteed to succed once
* allocated.
*/
void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr, *new_dr;
dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
if (dr)
return dr->table;
new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
if (!new_dr)
return NULL;
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
return dr->table;
}
EXPORT_SYMBOL(pcim_iomap_table);
/**
* pcim_iomap - Managed pcim_iomap()
* @pdev: PCI device to iomap for
* @bar: BAR to iomap
* @maxlen: Maximum length of iomap
*
* Managed pci_iomap(). Map is automatically unmapped on driver
* detach.
*/
void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
void __iomem **tbl;
BUG_ON(bar >= PCIM_IOMAP_MAX);
tbl = (void __iomem **)pcim_iomap_table(pdev);
if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
return NULL;
tbl[bar] = pci_iomap(pdev, bar, maxlen);
return tbl[bar];
}
EXPORT_SYMBOL(pcim_iomap);
/**
* pcim_iounmap - Managed pci_iounmap()
* @pdev: PCI device to iounmap for
* @addr: Address to unmap
*
* Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
*/
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
void __iomem **tbl;
int i;
pci_iounmap(pdev, addr);
tbl = (void __iomem **)pcim_iomap_table(pdev);
BUG_ON(!tbl);
for (i = 0; i < PCIM_IOMAP_MAX; i++)
if (tbl[i] == addr) {
tbl[i] = NULL;
return;
}
WARN_ON(1);
}
EXPORT_SYMBOL(pcim_iounmap);
/**
* pcim_iomap_regions - Request and iomap PCI BARs
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
* @name: Name used when requesting regions
*
* Request and iomap regions specified by @mask.
*/
int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
{
void __iomem * const *iomap;
int i, rc;
iomap = pcim_iomap_table(pdev);
if (!iomap)
return -ENOMEM;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long len;
if (!(mask & (1 << i)))
continue;
rc = -EINVAL;
len = pci_resource_len(pdev, i);
if (!len)
goto err_inval;
rc = pci_request_region(pdev, i, name);
if (rc)
goto err_inval;
rc = -ENOMEM;
if (!pcim_iomap(pdev, i, 0))
goto err_region;
}
return 0;
err_region:
pci_release_region(pdev, i);
err_inval:
while (--i >= 0) {
if (!(mask & (1 << i)))
continue;
pcim_iounmap(pdev, iomap[i]);
pci_release_region(pdev, i);
}
return rc;
}
EXPORT_SYMBOL(pcim_iomap_regions);
#endif
#endif

83
lib/div64.c Normal file
View File

@@ -0,0 +1,83 @@
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
*
* Based on former do_div() implementation from asm-parisc/div64.h:
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
*
* Generic C version of 64bit/32bit division and modulo, with
* 64bit result and 32bit remainder.
*
* The fast case for (n>>32 == 0) is handled inline by do_div().
*
* Code generated for this function might be very inefficient
* for some CPUs. __div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <asm/div64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
uint32_t __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
EXPORT_SYMBOL(__div64_32);
/* 64bit divisor, dividend and result. dynamic precision */
uint64_t div64_64(uint64_t dividend, uint64_t divisor)
{
uint32_t d = divisor;
if (divisor > 0xffffffffULL) {
unsigned int shift = fls(divisor >> 32);
d = divisor >> shift;
dividend >>= shift;
}
/* avoid 64 bit division if possible */
if (dividend >> 32)
do_div(dividend, d);
else
dividend = (uint32_t) dividend / d;
return dividend;
}
EXPORT_SYMBOL(div64_64);
#endif /* BITS_PER_LONG == 32 */

15
lib/dump_stack.c Normal file
View File

@@ -0,0 +1,15 @@
/*
* Provide a default dump_stack() function for architectures
* which don't implement their own.
*/
#include <linux/kernel.h>
#include <linux/module.h>
void dump_stack(void)
{
printk(KERN_NOTICE
"This architecture does not implement dump_stack()\n");
}
EXPORT_SYMBOL(dump_stack);

74
lib/extable.c Normal file
View File

@@ -0,0 +1,74 @@
/*
* Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
*
* Copyright (C) 2004 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
#ifndef ARCH_HAS_SORT_EXTABLE
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*/
static int cmp_ex(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
if (x->insn > y->insn)
return 1;
if (x->insn < y->insn)
return -1;
return 0;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex, NULL);
}
#endif
#ifndef ARCH_HAS_SEARCH_EXTABLE
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
while (first <= last) {
const struct exception_table_entry *mid;
mid = (last - first) / 2 + first;
/*
* careful, the distance between entries can be
* larger than 2GB:
*/
if (mid->insn < value)
first = mid + 1;
else if (mid->insn > value)
last = mid - 1;
else
return mid;
}
return NULL;
}
#endif

307
lib/fault-inject.c Normal file
View File

@@ -0,0 +1,307 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/unwind.h>
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/fault-inject.h>
/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
int __init setup_fault_attr(struct fault_attr *attr, char *str)
{
unsigned long probability;
unsigned long interval;
int times;
int space;
/* "<interval>,<probability>,<space>,<times>" */
if (sscanf(str, "%lu,%lu,%d,%d",
&interval, &probability, &space, &times) < 4) {
printk(KERN_WARNING
"FAULT_INJECTION: failed to parse arguments\n");
return 0;
}
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
atomic_set(&attr->space, space);
return 1;
}
static void fail_dump(struct fault_attr *attr)
{
if (attr->verbose > 0)
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n");
if (attr->verbose > 1)
dump_stack();
}
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
static bool fail_task(struct fault_attr *attr, struct task_struct *task)
{
return !in_interrupt() && task->make_it_fail;
}
#define MAX_STACK_TRACE_DEPTH 32
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static bool fail_stacktrace(struct fault_attr *attr)
{
struct stack_trace trace;
int depth = attr->stacktrace_depth;
unsigned long entries[MAX_STACK_TRACE_DEPTH];
int n;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
if (depth == 0)
return found;
trace.nr_entries = 0;
trace.entries = entries;
trace.max_entries = depth;
trace.skip = 1;
trace.all_contexts = 0;
save_stack_trace(&trace, NULL);
for (n = 0; n < trace.nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)
return false;
if (attr->require_start <= entries[n] &&
entries[n] < attr->require_end)
found = true;
}
return found;
}
#else
static inline bool fail_stacktrace(struct fault_attr *attr)
{
return true;
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
/*
* This code is stolen from failmalloc-1.0
* http://www.nongnu.org/failmalloc/
*/
bool should_fail(struct fault_attr *attr, ssize_t size)
{
if (attr->task_filter && !fail_task(attr, current))
return false;
if (atomic_read(&attr->times) == 0)
return false;
if (atomic_read(&attr->space) > size) {
atomic_sub(size, &attr->space);
return false;
}
if (attr->interval > 1) {
attr->count++;
if (attr->count % attr->interval)
return false;
}
if (attr->probability <= random32() % 100)
return false;
if (!fail_stacktrace(attr))
return false;
fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static void debugfs_ul_set(void *data, u64 val)
{
*(unsigned long *)data = val;
}
static void debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
{
*(unsigned long *)data =
val < MAX_STACK_TRACE_DEPTH ?
val : MAX_STACK_TRACE_DEPTH;
}
static u64 debugfs_ul_get(void *data)
{
return *(unsigned long *)data;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_ul);
}
DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
const char *name, mode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value,
&fops_ul_MAX_STACK_TRACE_DEPTH);
}
static void debugfs_atomic_t_set(void *data, u64 val)
{
atomic_set((atomic_t *)data, val);
}
static u64 debugfs_atomic_t_get(void *data)
{
return atomic_read((atomic_t *)data);
}
DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
debugfs_atomic_t_set, "%lld\n");
static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
struct dentry *parent, atomic_t *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
}
void cleanup_fault_attr_dentries(struct fault_attr *attr)
{
debugfs_remove(attr->dentries.probability_file);
attr->dentries.probability_file = NULL;
debugfs_remove(attr->dentries.interval_file);
attr->dentries.interval_file = NULL;
debugfs_remove(attr->dentries.times_file);
attr->dentries.times_file = NULL;
debugfs_remove(attr->dentries.space_file);
attr->dentries.space_file = NULL;
debugfs_remove(attr->dentries.verbose_file);
attr->dentries.verbose_file = NULL;
debugfs_remove(attr->dentries.task_filter_file);
attr->dentries.task_filter_file = NULL;
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
debugfs_remove(attr->dentries.stacktrace_depth_file);
attr->dentries.stacktrace_depth_file = NULL;
debugfs_remove(attr->dentries.require_start_file);
attr->dentries.require_start_file = NULL;
debugfs_remove(attr->dentries.require_end_file);
attr->dentries.require_end_file = NULL;
debugfs_remove(attr->dentries.reject_start_file);
attr->dentries.reject_start_file = NULL;
debugfs_remove(attr->dentries.reject_end_file);
attr->dentries.reject_end_file = NULL;
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
if (attr->dentries.dir)
WARN_ON(!simple_empty(attr->dentries.dir));
debugfs_remove(attr->dentries.dir);
attr->dentries.dir = NULL;
}
int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
{
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
memset(&attr->dentries, 0, sizeof(attr->dentries));
dir = debugfs_create_dir(name, NULL);
if (!dir)
goto fail;
attr->dentries.dir = dir;
attr->dentries.probability_file =
debugfs_create_ul("probability", mode, dir, &attr->probability);
attr->dentries.interval_file =
debugfs_create_ul("interval", mode, dir, &attr->interval);
attr->dentries.times_file =
debugfs_create_atomic_t("times", mode, dir, &attr->times);
attr->dentries.space_file =
debugfs_create_atomic_t("space", mode, dir, &attr->space);
attr->dentries.verbose_file =
debugfs_create_ul("verbose", mode, dir, &attr->verbose);
attr->dentries.task_filter_file = debugfs_create_bool("task-filter",
mode, dir, &attr->task_filter);
if (!attr->dentries.probability_file || !attr->dentries.interval_file ||
!attr->dentries.times_file || !attr->dentries.space_file ||
!attr->dentries.verbose_file || !attr->dentries.task_filter_file)
goto fail;
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
attr->dentries.stacktrace_depth_file =
debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
"stacktrace-depth", mode, dir, &attr->stacktrace_depth);
attr->dentries.require_start_file =
debugfs_create_ul("require-start", mode, dir, &attr->require_start);
attr->dentries.require_end_file =
debugfs_create_ul("require-end", mode, dir, &attr->require_end);
attr->dentries.reject_start_file =
debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
attr->dentries.reject_end_file =
debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
if (!attr->dentries.stacktrace_depth_file ||
!attr->dentries.require_start_file ||
!attr->dentries.require_end_file ||
!attr->dentries.reject_start_file ||
!attr->dentries.reject_end_file)
goto fail;
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
return 0;
fail:
cleanup_fault_attr_dentries(attr);
return -ENOMEM;
}
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

181
lib/find_next_bit.c Normal file
View File

@@ -0,0 +1,181 @@
/* find_next_bit.c: fallback find next bit implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_next_bit);
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (BITS_PER_LONG - offset);
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found_middle:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_next_zero_bit);
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swabp(const unsigned long * x)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64p((u64 *) x);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32p((u32 *) x);
#else
#error BITS_PER_LONG not defined
#endif
}
/* include/linux/byteorder doesn't support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp |= (~0UL >> (BITS_PER_LONG - offset));
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
if (~(tmp = *(p++)))
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. Skip ffz */
found_middle:
return result + ffz(tmp);
found_middle_swap:
return result + ffz(ext2_swab(tmp));
}
EXPORT_SYMBOL(generic_find_next_zero_le_bit);
#endif /* __BIG_ENDIAN */

82
lib/gen_crc32table.c Normal file
View File

@@ -0,0 +1,82 @@
#include <stdio.h>
#include "crc32defs.h"
#include <inttypes.h>
#define ENTRIES_PER_LINE 4
#define LE_TABLE_SIZE (1 << CRC_LE_BITS)
#define BE_TABLE_SIZE (1 << CRC_BE_BITS)
static uint32_t crc32table_le[LE_TABLE_SIZE];
static uint32_t crc32table_be[BE_TABLE_SIZE];
/**
* crc32init_le() - allocate and initialize LE table data
*
* crc is the crc of the byte i; other entries are filled in based on the
* fact that crctable[i^j] = crctable[i] ^ crctable[j].
*
*/
static void crc32init_le(void)
{
unsigned i, j;
uint32_t crc = 1;
crc32table_le[0] = 0;
for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
crc32table_le[i + j] = crc ^ crc32table_le[j];
}
}
/**
* crc32init_be() - allocate and initialize BE table data
*/
static void crc32init_be(void)
{
unsigned i, j;
uint32_t crc = 0x80000000;
crc32table_be[0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[i + j] = crc ^ crc32table_be[j];
}
}
static void output_table(uint32_t table[], int len, char *trans)
{
int i;
for (i = 0; i < len - 1; i++) {
if (i % ENTRIES_PER_LINE == 0)
printf("\n");
printf("%s(0x%8.8xL), ", trans, table[i]);
}
printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
}
int main(int argc, char** argv)
{
printf("/* this file is generated - do not edit */\n\n");
if (CRC_LE_BITS > 1) {
crc32init_le();
printf("static const u32 crc32table_le[] = {");
output_table(crc32table_le, LE_TABLE_SIZE, "tole");
printf("};\n");
}
if (CRC_BE_BITS > 1) {
crc32init_be();
printf("static const u32 crc32table_be[] = {");
output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
printf("};\n");
}
return 0;
}

198
lib/genalloc.c Normal file
View File

@@ -0,0 +1,198 @@
/*
* Basic general purpose allocator for managing special purpose memory
* not managed by the regular kmalloc/kfree interface.
* Uses for this includes on-device special memory, uncached memory
* etc.
*
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/module.h>
#include <linux/genalloc.h>
/**
* gen_pool_create - create a new special memory pool
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
* @nid: node id of the node the pool structure should be allocated on, or -1
*
* Create a new special memory pool that can be used to manage special purpose
* memory not managed by the regular kmalloc/kfree interface.
*/
struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
struct gen_pool *pool;
pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
if (pool != NULL) {
rwlock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
}
return pool;
}
EXPORT_SYMBOL(gen_pool_create);
/**
* gen_pool_add - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
* @addr: starting address of memory chunk to add to pool
* @size: size in bytes of the memory chunk to add to pool
* @nid: node id of the node the chunk structure and bitmap should be
* allocated on, or -1
*
* Add a new chunk of special memory to the specified pool.
*/
int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
int nid)
{
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) +
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
if (unlikely(chunk == NULL))
return -1;
memset(chunk, 0, nbytes);
spin_lock_init(&chunk->lock);
chunk->start_addr = addr;
chunk->end_addr = addr + size;
write_lock(&pool->lock);
list_add(&chunk->next_chunk, &pool->chunks);
write_unlock(&pool->lock);
return 0;
}
EXPORT_SYMBOL(gen_pool_add);
/**
* gen_pool_destroy - destroy a special memory pool
* @pool: pool to destroy
*
* Destroy the specified special memory pool. Verifies that there are no
* outstanding allocations.
*/
void gen_pool_destroy(struct gen_pool *pool)
{
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
int bit, end_bit;
write_lock(&pool->lock);
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
kfree(chunk);
}
kfree(pool);
return;
}
EXPORT_SYMBOL(gen_pool_destroy);
/**
* gen_pool_alloc - allocate special memory from the pool
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
* Uses a first-fit algorithm.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
struct list_head *_chunk;
struct gen_pool_chunk *chunk;
unsigned long addr, flags;
int order = pool->min_alloc_order;
int nbits, bit, start_bit, end_bit;
if (size == 0)
return 0;
nbits = (size + (1UL << order) - 1) >> order;
read_lock(&pool->lock);
list_for_each(_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
end_bit -= nbits + 1;
spin_lock_irqsave(&chunk->lock, flags);
bit = -1;
while (bit + 1 < end_bit) {
bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
if (bit >= end_bit)
break;
start_bit = bit;
if (nbits > 1) {
bit = find_next_bit(chunk->bits, bit + nbits,
bit + 1);
if (bit - start_bit < nbits)
continue;
}
addr = chunk->start_addr +
((unsigned long)start_bit << order);
while (nbits--)
__set_bit(start_bit++, chunk->bits);
spin_unlock_irqrestore(&chunk->lock, flags);
read_unlock(&pool->lock);
return addr;
}
spin_unlock_irqrestore(&chunk->lock, flags);
}
read_unlock(&pool->lock);
return 0;
}
EXPORT_SYMBOL(gen_pool_alloc);
/**
* gen_pool_free - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
*
* Free previously allocated special memory back to the specified pool.
*/
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
struct list_head *_chunk;
struct gen_pool_chunk *chunk;
unsigned long flags;
int order = pool->min_alloc_order;
int bit, nbits;
nbits = (size + (1UL << order) - 1) >> order;
read_lock(&pool->lock);
list_for_each(_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
if (addr >= chunk->start_addr && addr < chunk->end_addr) {
BUG_ON(addr + size > chunk->end_addr);
spin_lock_irqsave(&chunk->lock, flags);
bit = (addr - chunk->start_addr) >> order;
while (nbits--)
__clear_bit(bit++, chunk->bits);
spin_unlock_irqrestore(&chunk->lock, flags);
break;
}
}
BUG_ON(nbits > 0);
read_unlock(&pool->lock);
}
EXPORT_SYMBOL(gen_pool_free);

66
lib/halfmd4.c Normal file
View File

@@ -0,0 +1,66 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cryptohash.h>
/* F, G and H are basic MD4 functions: selection, majority, parity */
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
/*
* The generic round function. The application is so specific that
* we don't bother protecting all the arguments with parens, as is generally
* good macro practice, in favor of extra legibility.
* Rotation is separate from addition to prevent recomputation
*/
#define ROUND(f, a, b, c, d, x, s) \
(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
#define K1 0
#define K2 013240474631UL
#define K3 015666365641UL
/*
* Basic cut-down MD4 transform. Returns only 32 bits of result.
*/
__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
{
__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
/* Round 1 */
ROUND(F, a, b, c, d, in[0] + K1, 3);
ROUND(F, d, a, b, c, in[1] + K1, 7);
ROUND(F, c, d, a, b, in[2] + K1, 11);
ROUND(F, b, c, d, a, in[3] + K1, 19);
ROUND(F, a, b, c, d, in[4] + K1, 3);
ROUND(F, d, a, b, c, in[5] + K1, 7);
ROUND(F, c, d, a, b, in[6] + K1, 11);
ROUND(F, b, c, d, a, in[7] + K1, 19);
/* Round 2 */
ROUND(G, a, b, c, d, in[1] + K2, 3);
ROUND(G, d, a, b, c, in[3] + K2, 5);
ROUND(G, c, d, a, b, in[5] + K2, 9);
ROUND(G, b, c, d, a, in[7] + K2, 13);
ROUND(G, a, b, c, d, in[0] + K2, 3);
ROUND(G, d, a, b, c, in[2] + K2, 5);
ROUND(G, c, d, a, b, in[4] + K2, 9);
ROUND(G, b, c, d, a, in[6] + K2, 13);
/* Round 3 */
ROUND(H, a, b, c, d, in[3] + K3, 3);
ROUND(H, d, a, b, c, in[7] + K3, 9);
ROUND(H, c, d, a, b, in[2] + K3, 11);
ROUND(H, b, c, d, a, in[6] + K3, 15);
ROUND(H, a, b, c, d, in[1] + K3, 3);
ROUND(H, d, a, b, c, in[5] + K3, 9);
ROUND(H, c, d, a, b, in[0] + K3, 11);
ROUND(H, b, c, d, a, in[4] + K3, 15);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
return buf[1]; /* "most hashed" word */
}
EXPORT_SYMBOL(half_md4_transform);

59
lib/hweight.c Normal file
View File

@@ -0,0 +1,59 @@
#include <linux/module.h>
#include <asm/types.h>
#include <asm/bitops.h>
/**
* hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh
*
* The Hamming Weight of a number is the total number of bits set in it.
*/
unsigned int hweight32(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
}
EXPORT_SYMBOL(hweight32);
unsigned int hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
EXPORT_SYMBOL(hweight16);
unsigned int hweight8(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res + (res >> 4)) & 0x0F;
}
EXPORT_SYMBOL(hweight8);
unsigned long hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
#ifdef ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
return (w * 0x0101010101010101ul) >> 56;
#else
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
#endif
#endif
}
EXPORT_SYMBOL(hweight64);

475
lib/idr.c Normal file
View File

@@ -0,0 +1,475 @@
/*
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
* Copyright (C) 2002 by Concurrent Computer Corporation
* Distributed under the GNU GPL license version 2.
*
* Modified by George Anzinger to reuse immediately and to use
* find bit instructions. Also removed _irq on spinlocks.
*
* Small id to pointer translation service.
*
* It uses a radix tree like structure as a sparse array indexed
* by the id to obtain the pointer. The bitmap makes allocating
* a new id quick.
*
* You call it to allocate an id (an int) an associate with that id a
* pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer.
* You can release ids at any time. When all ids are released, most of
* the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
*/
#ifndef TEST // to test in user space...
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#endif
#include <linux/err.h>
#include <linux/string.h>
#include <linux/idr.h>
static struct kmem_cache *idr_layer_cache;
static struct idr_layer *alloc_layer(struct idr *idp)
{
struct idr_layer *p;
unsigned long flags;
spin_lock_irqsave(&idp->lock, flags);
if ((p = idp->id_free)) {
idp->id_free = p->ary[0];
idp->id_free_cnt--;
p->ary[0] = NULL;
}
spin_unlock_irqrestore(&idp->lock, flags);
return(p);
}
/* only called when idp->lock is held */
static void __free_layer(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
static void free_layer(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
/*
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
__free_layer(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
/**
* idr_pre_get - reserver resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* This function should be called prior to locking and calling the
* following function. It preallocates enough memory to satisfy
* the worst possible allocation.
*
* If the system is REALLY out of memory this function returns 0,
* otherwise 1.
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < IDR_FREE_MAX) {
struct idr_layer *new;
new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
if (new == NULL)
return (0);
free_layer(idp, new);
}
return 1;
}
EXPORT_SYMBOL(idr_pre_get);
static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
{
int n, m, sh;
struct idr_layer *p, *new;
struct idr_layer *pa[MAX_LEVEL];
int l, id;
long bm;
id = *starting_id;
p = idp->top;
l = idp->layers;
pa[l--] = NULL;
while (1) {
/*
* We run around this while until we reach the leaf node...
*/
n = (id >> (IDR_BITS*l)) & IDR_MASK;
bm = ~p->bitmap;
m = find_next_bit(&bm, IDR_SIZE, n);
if (m == IDR_SIZE) {
/* no space available go back to previous layer. */
l++;
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
if (!(p = pa[l])) {
*starting_id = id;
return -2;
}
continue;
}
if (m != n) {
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_ID_BIT) || (id < 0))
return -3;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
if (!(new = alloc_layer(idp)))
return -1;
p->ary[m] = new;
p->count++;
}
pa[l--] = p;
p = p->ary[m];
}
/*
* We have reached the leaf node, plant the
* users pointer and return the raw id.
*/
p->ary[m] = (struct idr_layer *)ptr;
__set_bit(m, &p->bitmap);
p->count++;
/*
* If this layer is full mark the bit in the layer above
* to show that this part of the radix tree is full.
* This may complete the layer above and require walking
* up the radix tree.
*/
n = id;
while (p->bitmap == IDR_FULL) {
if (!(p = pa[++l]))
break;
n = n >> IDR_BITS;
__set_bit((n & IDR_MASK), &p->bitmap);
}
return(id);
}
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
struct idr_layer *p, *new;
int layers, v, id;
unsigned long flags;
id = starting_id;
build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
if (!(p = alloc_layer(idp)))
return -1;
layers = 1;
}
/*
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count)
continue;
if (!(new = alloc_layer(idp))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
*/
spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
__free_layer(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -1;
}
new->ary[0] = p;
new->count = 1;
if (p->bitmap == IDR_FULL)
__set_bit(0, &new->bitmap);
p = new;
}
idp->top = p;
idp->layers = layers;
v = sub_alloc(idp, ptr, &id);
if (v == -2)
goto build_up;
return(v);
}
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the ide
* @start_id: id to start search at
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
* required locks.
*
* If memory is required, it will return -EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
* return -ENOSPC.
*
* @id returns a value in the range 0 ... 0x7fffffff
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
int rv;
rv = idr_get_new_above_int(idp, ptr, starting_id);
/*
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
if (rv < 0) {
if (rv == -1)
return -EAGAIN;
else /* Will be -3 */
return -ENOSPC;
}
*id = rv;
return 0;
}
EXPORT_SYMBOL(idr_get_new_above);
/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the ide
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
* required locks.
*
* If memory is required, it will return -EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
* return -ENOSPC.
*
* @id returns a value in the range 0 ... 0x7fffffff
*/
int idr_get_new(struct idr *idp, void *ptr, int *id)
{
int rv;
rv = idr_get_new_above_int(idp, ptr, 0);
/*
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
if (rv < 0) {
if (rv == -1)
return -EAGAIN;
else /* Will be -3 */
return -ENOSPC;
}
*id = rv;
return 0;
}
EXPORT_SYMBOL(idr_get_new);
static void idr_remove_warning(int id)
{
printk("idr_remove called for id=%d which is not allocated.\n", id);
dump_stack();
}
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_LEVEL];
struct idr_layer ***paa = &pa[0];
int n;
*paa = NULL;
*++paa = &idp->top;
while ((shift > 0) && p) {
n = (id >> shift) & IDR_MASK;
__clear_bit(n, &p->bitmap);
*++paa = &p->ary[n];
p = p->ary[n];
shift -= IDR_BITS;
}
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, &p->bitmap))){
__clear_bit(n, &p->bitmap);
p->ary[n] = NULL;
while(*paa && ! --((**paa)->count)){
free_layer(idp, **paa);
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
} else
idr_remove_warning(id);
}
/**
* idr_remove - remove the given id and free it's slot
* @idp: idr handle
* @id: unique key
*/
void idr_remove(struct idr *idp, int id)
{
struct idr_layer *p;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) { // We can drop a layer
p = idp->top->ary[0];
idp->top->bitmap = idp->top->count = 0;
free_layer(idp, idp->top);
idp->top = p;
--idp->layers;
}
while (idp->id_free_cnt >= IDR_FREE_MAX) {
p = alloc_layer(idp);
kmem_cache_free(idr_layer_cache, p);
return;
}
}
EXPORT_SYMBOL(idr_remove);
/**
* idr_destroy - release all cached layers within an idr tree
* idp: idr handle
*/
void idr_destroy(struct idr *idp)
{
while (idp->id_free_cnt) {
struct idr_layer *p = alloc_layer(idp);
kmem_cache_free(idr_layer_cache, p);
}
}
EXPORT_SYMBOL(idr_destroy);
/**
* idr_find - return pointer for given id
* @idp: idr handle
* @id: lookup key
*
* Return the pointer given the id it has been registered with. A %NULL
* return indicates that @id is not valid or you passed %NULL in
* idr_get_new().
*
* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
*/
void *idr_find(struct idr *idp, int id)
{
int n;
struct idr_layer *p;
n = idp->layers * IDR_BITS;
p = idp->top;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
if (id >= (1 << n))
return NULL;
while (n > 0 && p) {
n -= IDR_BITS;
p = p->ary[(id >> n) & IDR_MASK];
}
return((void *)p);
}
EXPORT_SYMBOL(idr_find);
/**
* idr_replace - replace pointer for given id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: lookup key
*
* Replace the pointer registered with an id and return the old value.
* A -ENOENT return indicates that @id was not found.
* A -EINVAL return indicates that @id was not within valid constraints.
*
* The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
*/
void *idr_replace(struct idr *idp, void *ptr, int id)
{
int n;
struct idr_layer *p, *old_p;
n = idp->layers * IDR_BITS;
p = idp->top;
id &= MAX_ID_MASK;
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
n -= IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
}
n = id & IDR_MASK;
if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
p->ary[n] = ptr;
return old_p;
}
EXPORT_SYMBOL(idr_replace);
static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
unsigned long flags)
{
memset(idr_layer, 0, sizeof(struct idr_layer));
}
static int init_id_cache(void)
{
if (!idr_layer_cache)
idr_layer_cache = kmem_cache_create("idr_layer_cache",
sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
return 0;
}
/**
* idr_init - initialize idr handle
* @idp: idr handle
*
* This function is use to set up the handle (@idp) that you will pass
* to the rest of the functions.
*/
void idr_init(struct idr *idp)
{
init_id_cache();
memset(idp, 0, sizeof(struct idr));
spin_lock_init(&idp->lock);
}
EXPORT_SYMBOL(idr_init);

1212
lib/inflate.c Normal file

File diff suppressed because it is too large Load Diff

32
lib/int_sqrt.c Normal file
View File

@@ -0,0 +1,32 @@
#include <linux/kernel.h>
#include <linux/module.h>
/**
* int_sqrt - rough approximation to sqrt
* @x: integer of which to calculate the sqrt
*
* A very rough approximation to the sqrt() function.
*/
unsigned long int_sqrt(unsigned long x)
{
unsigned long op, res, one;
op = x;
res = 0;
one = 1UL << (BITS_PER_LONG - 2);
while (one > op)
one >>= 2;
while (one != 0) {
if (op >= res + one) {
op = op - (res + one);
res = res + 2 * one;
}
res /= 2;
one /= 4;
}
return res;
}
EXPORT_SYMBOL(int_sqrt);

257
lib/iomap.c Normal file
View File

@@ -0,0 +1,257 @@
/*
* Implement the default iomap interfaces
*
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/module.h>
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines don't assume any hardware mappings, and just
* encode the PIO/MMIO as part of the cookie. They coldly assume that
* the MMIO IO mappings are not in the low address range.
*
* Architectures for which this is not true can't use this generic
* implementation and should do their own copy.
*/
#ifndef HAVE_ARCH_PIO_SIZE
/*
* We encode the physical PIO addresses (0-0xffff) into the
* pointer by offsetting them with a constant (0x10000) and
* assuming that all the low addresses are always PIO. That means
* we can do some sanity checks on the low bits, and don't
* need to just take things for granted.
*/
#define PIO_OFFSET 0x10000UL
#define PIO_MASK 0x0ffffUL
#define PIO_RESERVED 0x40000UL
#endif
/*
* Ugly macros are a way of life.
*/
#define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
#define IO_COND(addr, is_pio, is_mmio) do { \
unsigned long port = (unsigned long __force)addr; \
if (port < PIO_RESERVED) { \
VERIFY_PIO(port); \
port &= PIO_MASK; \
is_pio; \
} else { \
is_mmio; \
} \
} while (0)
#ifndef pio_read16be
#define pio_read16be(port) swab16(inw(port))
#define pio_read32be(port) swab32(inl(port))
#endif
#ifndef mmio_read16be
#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
#endif
unsigned int fastcall ioread8(void __iomem *addr)
{
IO_COND(addr, return inb(port), return readb(addr));
}
unsigned int fastcall ioread16(void __iomem *addr)
{
IO_COND(addr, return inw(port), return readw(addr));
}
unsigned int fastcall ioread16be(void __iomem *addr)
{
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
}
unsigned int fastcall ioread32(void __iomem *addr)
{
IO_COND(addr, return inl(port), return readl(addr));
}
unsigned int fastcall ioread32be(void __iomem *addr)
{
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
}
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
#define pio_write32be(val,port) outl(swab32(val),port)
#endif
#ifndef mmio_write16be
#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
#endif
void fastcall iowrite8(u8 val, void __iomem *addr)
{
IO_COND(addr, outb(val,port), writeb(val, addr));
}
void fastcall iowrite16(u16 val, void __iomem *addr)
{
IO_COND(addr, outw(val,port), writew(val, addr));
}
void fastcall iowrite16be(u16 val, void __iomem *addr)
{
IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
}
void fastcall iowrite32(u32 val, void __iomem *addr)
{
IO_COND(addr, outl(val,port), writel(val, addr));
}
void fastcall iowrite32be(u32 val, void __iomem *addr)
{
IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
}
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
* convert to CPU byte order. We write in "IO byte
* order" (we also don't have IO barriers).
*/
#ifndef mmio_insb
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
u8 data = __raw_readb(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
u16 data = __raw_readw(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
u32 data = __raw_readl(addr);
*dst = data;
dst++;
}
}
#endif
#ifndef mmio_outsb
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
{
while (--count >= 0) {
__raw_writeb(*src, addr);
src++;
}
}
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
{
while (--count >= 0) {
__raw_writew(*src, addr);
src++;
}
}
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
{
while (--count >= 0) {
__raw_writel(*src, addr);
src++;
}
}
#endif
void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
}
void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
}
void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
}
void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
}
void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
}
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
if (port > PIO_MASK)
return NULL;
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
}
void ioport_unmap(void __iomem *addr)
{
/* Nothing to do */
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
unsigned long start = pci_resource_start(dev, bar);
unsigned long len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
if (!len || !start)
return NULL;
if (maxlen && len > maxlen)
len = maxlen;
if (flags & IORESOURCE_IO)
return ioport_map(start, len);
if (flags & IORESOURCE_MEM) {
if (flags & IORESOURCE_CACHEABLE)
return ioremap(start, len);
return ioremap_nocache(start, len);
}
/* What? */
return NULL;
}
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
IO_COND(addr, /* nothing */, iounmap(addr));
}
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);

70
lib/iomap_copy.c Normal file
View File

@@ -0,0 +1,70 @@
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/io.h>
/**
* __iowrite32_copy - copy data to MMIO space, in 32-bit units
* @to: destination, in MMIO space (must be 32-bit aligned)
* @from: source (must be 32-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
const void *from,
size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
const u32 *end = src + count;
while (src < end)
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
/**
* __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
* @to: destination, in MMIO space (must be 64-bit aligned)
* @from: source (must be 64-bit aligned)
* @count: number of 64-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
const void *from,
size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
const u64 *src = from;
const u64 *end = src + count;
while (src < end)
__raw_writeq(*src++, dst++);
#else
__iowrite32_copy(to, from, count * 2);
#endif
}
EXPORT_SYMBOL_GPL(__iowrite64_copy);

91
lib/ioremap.c Normal file
View File

@@ -0,0 +1,91 @@
/*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
pte_t *pte;
unsigned long pfn;
pfn = phys_addr >> PAGE_SHIFT;
pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
do {
BUG_ON(!pte_none(*pte));
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
return 0;
}
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
phys_addr -= addr;
pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
phys_addr -= addr;
pud = pud_alloc(&init_mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
int ioremap_page_range(unsigned long addr,
unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
pgd_t *pgd;
unsigned long start;
unsigned long next;
int err;
BUG_ON(addr >= end);
start = addr;
phys_addr -= addr;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
flush_cache_vmap(start, end);
return err;
}

17
lib/irq_regs.c Normal file
View File

@@ -0,0 +1,17 @@
/* saved per-CPU IRQ register pointer
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/irq_regs.h>
#ifndef ARCH_HAS_OWN_IRQ_REGS
DEFINE_PER_CPU(struct pt_regs *, __irq_regs);
EXPORT_PER_CPU_SYMBOL(__irq_regs);
#endif

213
lib/kernel_lock.c Normal file
View File

@@ -0,0 +1,213 @@
/*
* lib/kernel_lock.c
*
* This is the traditional BKL - big kernel lock. Largely
* relegated to obsolescense, but used by various less
* important (or lazy) subsystems.
*/
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#ifdef CONFIG_PREEMPT_BKL
/*
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Note: code locked by this semaphore will only be serialized against
* other code using the same locking facility. The code guarantees that
* the task remains on the same CPU.
*
* Don't use in new code.
*/
static DECLARE_MUTEX(kernel_sem);
/*
* Re-acquire the kernel semaphore.
*
* This function is called with preemption off.
*
* We are executing in schedule() so the code must be extremely careful
* about recursion, both due to the down() and due to the enabling of
* preemption. schedule() will re-check the preemption flag after
* reacquiring the semaphore.
*/
int __lockfunc __reacquire_kernel_lock(void)
{
struct task_struct *task = current;
int saved_lock_depth = task->lock_depth;
BUG_ON(saved_lock_depth < 0);
task->lock_depth = -1;
preempt_enable_no_resched();
down(&kernel_sem);
preempt_disable();
task->lock_depth = saved_lock_depth;
return 0;
}
void __lockfunc __release_kernel_lock(void)
{
up(&kernel_sem);
}
/*
* Getting the big kernel semaphore.
*/
void __lockfunc lock_kernel(void)
{
struct task_struct *task = current;
int depth = task->lock_depth + 1;
if (likely(!depth))
/*
* No recursion worries - we set up lock_depth _after_
*/
down(&kernel_sem);
task->lock_depth = depth;
}
void __lockfunc unlock_kernel(void)
{
struct task_struct *task = current;
BUG_ON(task->lock_depth < 0);
if (likely(--task->lock_depth < 0))
up(&kernel_sem);
}
#else
/*
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Don't use in new code.
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
/*
* Acquire/release the underlying lock from the scheduler.
*
* This is called with preemption disabled, and should
* return an error value if it cannot get the lock and
* TIF_NEED_RESCHED gets set.
*
* If it successfully gets the lock, it should increment
* the preemption count like any spinlock does.
*
* (This works on UP too - _raw_spin_trylock will never
* return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
while (!_raw_spin_trylock(&kernel_flag)) {
if (test_thread_flag(TIF_NEED_RESCHED))
return -EAGAIN;
cpu_relax();
}
preempt_disable();
return 0;
}
void __lockfunc __release_kernel_lock(void)
{
_raw_spin_unlock(&kernel_flag);
preempt_enable_no_resched();
}
/*
* These are the BKL spinlocks - we try to be polite about preemption.
* If SMP is not on (ie UP preemption), this all goes away because the
* _raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
static inline void __lock_kernel(void)
{
preempt_disable();
if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
/*
* If preemption was disabled even before this
* was called, there's nothing we can be polite
* about - just spin.
*/
if (preempt_count() > 1) {
_raw_spin_lock(&kernel_flag);
return;
}
/*
* Otherwise, let's wait for the kernel lock
* with preemption enabled..
*/
do {
preempt_enable();
while (spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
}
}
#else
/*
* Non-preemption case - just get the spinlock
*/
static inline void __lock_kernel(void)
{
_raw_spin_lock(&kernel_flag);
}
#endif
static inline void __unlock_kernel(void)
{
/*
* the BKL is not covered by lockdep, so we open-code the
* unlocking sequence (and thus avoid the dep-chain ops):
*/
_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously, so we only need to
* worry about other CPU's.
*/
void __lockfunc lock_kernel(void)
{
int depth = current->lock_depth+1;
if (likely(!depth))
__lock_kernel();
current->lock_depth = depth;
}
void __lockfunc unlock_kernel(void)
{
BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0))
__unlock_kernel();
}
#endif
EXPORT_SYMBOL(lock_kernel);
EXPORT_SYMBOL(unlock_kernel);

287
lib/klist.c Normal file
View File

@@ -0,0 +1,287 @@
/*
* klist.c - Routines for manipulating klists.
*
*
* This klist interface provides a couple of structures that wrap around
* struct list_head to provide explicit list "head" (struct klist) and
* list "node" (struct klist_node) objects. For struct klist, a spinlock
* is included that protects access to the actual list itself. struct
* klist_node provides a pointer to the klist that owns it and a kref
* reference count that indicates the number of current users of that node
* in the list.
*
* The entire point is to provide an interface for iterating over a list
* that is safe and allows for modification of the list during the
* iteration (e.g. insertion and removal), including modification of the
* current node on the list.
*
* It works using a 3rd object type - struct klist_iter - that is declared
* and initialized before an iteration. klist_next() is used to acquire the
* next element in the list. It returns NULL if there are no more items.
* Internally, that routine takes the klist's lock, decrements the reference
* count of the previous klist_node and increments the count of the next
* klist_node. It then drops the lock and returns.
*
* There are primitives for adding and removing nodes to/from a klist.
* When deleting, klist_del() will simply decrement the reference count.
* Only when the count goes to 0 is the node removed from the list.
* klist_remove() will try to delete the node from the list and block
* until it is actually removed. This is useful for objects (like devices)
* that have been removed from the system and must be freed (but must wait
* until all accessors have finished).
*
* Copyright (C) 2005 Patrick Mochel
*
* This file is released under the GPL v2.
*/
#include <linux/klist.h>
#include <linux/module.h>
/**
* klist_init - Initialize a klist structure.
* @k: The klist we're initializing.
* @get: The get function for the embedding object (NULL if none)
* @put: The put function for the embedding object (NULL if none)
*
* Initialises the klist structure. If the klist_node structures are
* going to be embedded in refcounted objects (necessary for safe
* deletion) then the get/put arguments are used to initialise
* functions that take and release references on the embedding
* objects.
*/
void klist_init(struct klist * k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
spin_lock_init(&k->k_lock);
k->get = get;
k->put = put;
}
EXPORT_SYMBOL_GPL(klist_init);
static void add_head(struct klist * k, struct klist_node * n)
{
spin_lock(&k->k_lock);
list_add(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void add_tail(struct klist * k, struct klist_node * n)
{
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void klist_node_init(struct klist * k, struct klist_node * n)
{
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
kref_init(&n->n_ref);
n->n_klist = k;
if (k->get)
k->get(n);
}
/**
* klist_add_head - Initialize a klist_node and add it to front.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_head(struct klist_node * n, struct klist * k)
{
klist_node_init(k, n);
add_head(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_head);
/**
* klist_add_tail - Initialize a klist_node and add it to back.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_tail(struct klist_node * n, struct klist * k)
{
klist_node_init(k, n);
add_tail(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_tail);
static void klist_release(struct kref * kref)
{
struct klist_node * n = container_of(kref, struct klist_node, n_ref);
list_del(&n->n_node);
complete(&n->n_removed);
n->n_klist = NULL;
}
static int klist_dec_and_del(struct klist_node * n)
{
return kref_put(&n->n_ref, klist_release);
}
/**
* klist_del - Decrement the reference count of node and try to remove.
* @n: node we're deleting.
*/
void klist_del(struct klist_node * n)
{
struct klist * k = n->n_klist;
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
EXPORT_SYMBOL_GPL(klist_del);
/**
* klist_remove - Decrement the refcount of node and wait for it to go away.
* @n: node we're removing.
*/
void klist_remove(struct klist_node * n)
{
klist_del(n);
wait_for_completion(&n->n_removed);
}
EXPORT_SYMBOL_GPL(klist_remove);
/**
* klist_node_attached - Say whether a node is bound to a list or not.
* @n: Node that we're testing.
*/
int klist_node_attached(struct klist_node * n)
{
return (n->n_klist != NULL);
}
EXPORT_SYMBOL_GPL(klist_node_attached);
/**
* klist_iter_init_node - Initialize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter we're filling.
* @n: node to start with.
*
* Similar to klist_iter_init(), but starts the action off with @n,
* instead of with the list head.
*/
void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
{
i->i_klist = k;
i->i_head = &k->k_list;
i->i_cur = n;
if (n)
kref_get(&n->n_ref);
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
/**
* klist_iter_init - Iniitalize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter structure we're filling.
*
* Similar to klist_iter_init_node(), but start with the list head.
*/
void klist_iter_init(struct klist * k, struct klist_iter * i)
{
klist_iter_init_node(k, i, NULL);
}
EXPORT_SYMBOL_GPL(klist_iter_init);
/**
* klist_iter_exit - Finish a list iteration.
* @i: Iterator structure.
*
* Must be called when done iterating over list, as it decrements the
* refcount of the current node. Necessary in case iteration exited before
* the end of the list was reached, and always good form.
*/
void klist_iter_exit(struct klist_iter * i)
{
if (i->i_cur) {
klist_del(i->i_cur);
i->i_cur = NULL;
}
}
EXPORT_SYMBOL_GPL(klist_iter_exit);
static struct klist_node * to_klist_node(struct list_head * n)
{
return container_of(n, struct klist_node, n_node);
}
/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the next node, increment its reference
* count, drop the lock, and return that next node.
*/
struct klist_node * klist_next(struct klist_iter * i)
{
struct list_head * next;
struct klist_node * lnode = i->i_cur;
struct klist_node * knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
spin_lock(&i->i_klist->k_lock);
if (lnode) {
next = lnode->n_node.next;
if (!klist_dec_and_del(lnode))
put = NULL;
} else
next = i->i_head->next;
if (next != i->i_head) {
knode = to_klist_node(next);
kref_get(&knode->n_ref);
}
i->i_cur = knode;
spin_unlock(&i->i_klist->k_lock);
if (put && lnode)
put(lnode);
return knode;
}
EXPORT_SYMBOL_GPL(klist_next);

704
lib/kobject.c Normal file
View File

@@ -0,0 +1,704 @@
/*
* kobject.c - library routines for handling generic kernel objects
*
* Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org>
*
* This file is released under the GPLv2.
*
*
* Please see the file Documentation/kobject.txt for critical information
* about using the kobject interface.
*/
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
/**
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
*
* Most subsystems have a set of default attributes that
* are associated with an object that registers with them.
* This is a helper called during object registration that
* loops through the default attributes of the subsystem
* and creates attributes files for them in sysfs.
*
*/
static int populate_dir(struct kobject * kobj)
{
struct kobj_type * t = get_ktype(kobj);
struct attribute * attr;
int error = 0;
int i;
if (t && t->default_attrs) {
for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
if ((error = sysfs_create_file(kobj,attr)))
break;
}
}
return error;
}
static int create_dir(struct kobject * kobj, struct dentry *shadow_parent)
{
int error = 0;
if (kobject_name(kobj)) {
error = sysfs_create_dir(kobj, shadow_parent);
if (!error) {
if ((error = populate_dir(kobj)))
sysfs_remove_dir(kobj);
}
}
return error;
}
static inline struct kobject * to_kobj(struct list_head * entry)
{
return container_of(entry,struct kobject,entry);
}
static int get_kobj_path_length(struct kobject *kobj)
{
int length = 1;
struct kobject * parent = kobj;
/* walk up the ancestors until we hit the one pointing to the
* root.
* Add 1 to strlen for leading '/' of each level.
*/
do {
if (kobject_name(parent) == NULL)
return 0;
length += strlen(kobject_name(parent)) + 1;
parent = parent->parent;
} while (parent);
return length;
}
static void fill_kobj_path(struct kobject *kobj, char *path, int length)
{
struct kobject * parent;
--length;
for (parent = kobj; parent; parent = parent->parent) {
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
strncpy (path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
pr_debug("%s: path = '%s'\n",__FUNCTION__,path);
}
/**
* kobject_get_path - generate and return the path associated with a given kobj and kset pair.
*
* @kobj: kobject in question, with which to build the path
* @gfp_mask: the allocation type used to allocate the path
*
* The result must be freed by the caller with kfree().
*/
char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
{
char *path;
int len;
len = get_kobj_path_length(kobj);
if (len == 0)
return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
fill_kobj_path(kobj, path, len);
return path;
}
EXPORT_SYMBOL_GPL(kobject_get_path);
/**
* kobject_init - initialize object.
* @kobj: object in question.
*/
void kobject_init(struct kobject * kobj)
{
if (!kobj)
return;
kref_init(&kobj->kref);
INIT_LIST_HEAD(&kobj->entry);
init_waitqueue_head(&kobj->poll);
kobj->kset = kset_get(kobj->kset);
}
/**
* unlink - remove kobject from kset list.
* @kobj: kobject.
*
* Remove the kobject from the kset list and decrement
* its parent's refcount.
* This is separated out, so we can use it in both
* kobject_del() and kobject_add() on error.
*/
static void unlink(struct kobject * kobj)
{
if (kobj->kset) {
spin_lock(&kobj->kset->list_lock);
list_del_init(&kobj->entry);
spin_unlock(&kobj->kset->list_lock);
}
kobject_put(kobj);
}
/**
* kobject_add - add an object to the hierarchy.
* @kobj: object.
* @shadow_parent: sysfs directory to add to.
*/
int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
{
int error = 0;
struct kobject * parent;
if (!(kobj = kobject_get(kobj)))
return -ENOENT;
if (!kobj->k_name)
kobj->k_name = kobj->name;
if (!*kobj->k_name) {
pr_debug("kobject attempted to be registered with no name!\n");
WARN_ON(1);
return -EINVAL;
}
parent = kobject_get(kobj->parent);
pr_debug("kobject %s: registering. parent: %s, set: %s\n",
kobject_name(kobj), parent ? kobject_name(parent) : "<NULL>",
kobj->kset ? kobj->kset->kobj.name : "<NULL>" );
if (kobj->kset) {
spin_lock(&kobj->kset->list_lock);
if (!parent)
parent = kobject_get(&kobj->kset->kobj);
list_add_tail(&kobj->entry,&kobj->kset->list);
spin_unlock(&kobj->kset->list_lock);
}
kobj->parent = parent;
error = create_dir(kobj, shadow_parent);
if (error) {
/* unlink does the kobject_put() for us */
unlink(kobj);
kobject_put(parent);
/* be noisy on error issues */
if (error == -EEXIST)
printk("kobject_add failed for %s with -EEXIST, "
"don't try to register things with the "
"same name in the same directory.\n",
kobject_name(kobj));
else
printk("kobject_add failed for %s (%d)\n",
kobject_name(kobj), error);
dump_stack();
}
return error;
}
/**
* kobject_add - add an object to the hierarchy.
* @kobj: object.
*/
int kobject_add(struct kobject * kobj)
{
return kobject_shadow_add(kobj, NULL);
}
/**
* kobject_register - initialize and add an object.
* @kobj: object in question.
*/
int kobject_register(struct kobject * kobj)
{
int error = -EINVAL;
if (kobj) {
kobject_init(kobj);
error = kobject_add(kobj);
if (!error)
kobject_uevent(kobj, KOBJ_ADD);
}
return error;
}
/**
* kobject_set_name - Set the name of an object
* @kobj: object.
* @fmt: format string used to build the name
*
* If strlen(name) >= KOBJ_NAME_LEN, then use a dynamically allocated
* string that @kobj->k_name points to. Otherwise, use the static
* @kobj->name array.
*/
int kobject_set_name(struct kobject * kobj, const char * fmt, ...)
{
int error = 0;
int limit = KOBJ_NAME_LEN;
int need;
va_list args;
char * name;
/*
* First, try the static array
*/
va_start(args,fmt);
need = vsnprintf(kobj->name,limit,fmt,args);
va_end(args);
if (need < limit)
name = kobj->name;
else {
/*
* Need more space? Allocate it and try again
*/
limit = need + 1;
name = kmalloc(limit,GFP_KERNEL);
if (!name) {
error = -ENOMEM;
goto Done;
}
va_start(args,fmt);
need = vsnprintf(name,limit,fmt,args);
va_end(args);
/* Still? Give up. */
if (need >= limit) {
kfree(name);
error = -EFAULT;
goto Done;
}
}
/* Free the old name, if necessary. */
if (kobj->k_name && kobj->k_name != kobj->name)
kfree(kobj->k_name);
/* Now, set the new name */
kobj->k_name = name;
Done:
return error;
}
EXPORT_SYMBOL(kobject_set_name);
/**
* kobject_rename - change the name of an object
* @kobj: object in question.
* @new_name: object's new name
*/
int kobject_rename(struct kobject * kobj, const char *new_name)
{
int error = 0;
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
if (!kobj->parent)
return -EINVAL;
error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
kobject_put(kobj);
return error;
}
/**
* kobject_rename - change the name of an object
* @kobj: object in question.
* @new_parent: object's new parent
* @new_name: object's new name
*/
int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent,
const char *new_name)
{
int error = 0;
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
error = sysfs_rename_dir(kobj, new_parent, new_name);
kobject_put(kobj);
return error;
}
/**
* kobject_move - move object to another parent
* @kobj: object in question.
* @new_parent: object's new parent (can be NULL)
*/
int kobject_move(struct kobject *kobj, struct kobject *new_parent)
{
int error;
struct kobject *old_parent;
const char *devpath = NULL;
char *devpath_string = NULL;
char *envp[2];
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
new_parent = kobject_get(new_parent);
if (!new_parent) {
if (kobj->kset)
new_parent = kobject_get(&kobj->kset->kobj);
}
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
error = -ENOMEM;
goto out;
}
devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
if (!devpath_string) {
error = -ENOMEM;
goto out;
}
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
error = sysfs_move_dir(kobj, new_parent);
if (error)
goto out;
old_parent = kobj->parent;
kobj->parent = new_parent;
new_parent = NULL;
kobject_put(old_parent);
kobject_uevent_env(kobj, KOBJ_MOVE, envp);
out:
kobject_put(new_parent);
kobject_put(kobj);
kfree(devpath_string);
kfree(devpath);
return error;
}
/**
* kobject_del - unlink kobject from hierarchy.
* @kobj: object.
*/
void kobject_del(struct kobject * kobj)
{
if (!kobj)
return;
sysfs_remove_dir(kobj);
unlink(kobj);
}
/**
* kobject_unregister - remove object from hierarchy and decrement refcount.
* @kobj: object going away.
*/
void kobject_unregister(struct kobject * kobj)
{
if (!kobj)
return;
pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
kobject_uevent(kobj, KOBJ_REMOVE);
kobject_del(kobj);
kobject_put(kobj);
}
/**
* kobject_get - increment refcount for object.
* @kobj: object.
*/
struct kobject * kobject_get(struct kobject * kobj)
{
if (kobj)
kref_get(&kobj->kref);
return kobj;
}
/**
* kobject_cleanup - free kobject resources.
* @kobj: object.
*/
void kobject_cleanup(struct kobject * kobj)
{
struct kobj_type * t = get_ktype(kobj);
struct kset * s = kobj->kset;
struct kobject * parent = kobj->parent;
pr_debug("kobject %s: cleaning up\n",kobject_name(kobj));
if (kobj->k_name != kobj->name)
kfree(kobj->k_name);
kobj->k_name = NULL;
if (t && t->release)
t->release(kobj);
if (s)
kset_put(s);
kobject_put(parent);
}
static void kobject_release(struct kref *kref)
{
kobject_cleanup(container_of(kref, struct kobject, kref));
}
/**
* kobject_put - decrement refcount for object.
* @kobj: object.
*
* Decrement the refcount, and if 0, call kobject_cleanup().
*/
void kobject_put(struct kobject * kobj)
{
if (kobj)
kref_put(&kobj->kref, kobject_release);
}
static void dir_release(struct kobject *kobj)
{
kfree(kobj);
}
static struct kobj_type dir_ktype = {
.release = dir_release,
.sysfs_ops = NULL,
.default_attrs = NULL,
};
/**
* kobject_add_dir - add sub directory of object.
* @parent: object in which a directory is created.
* @name: directory name.
*
* Add a plain directory object as child of given object.
*/
struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
{
struct kobject *k;
int ret;
if (!parent)
return NULL;
k = kzalloc(sizeof(*k), GFP_KERNEL);
if (!k)
return NULL;
k->parent = parent;
k->ktype = &dir_ktype;
kobject_set_name(k, name);
ret = kobject_register(k);
if (ret < 0) {
printk(KERN_WARNING "kobject_add_dir: "
"kobject_register error: %d\n", ret);
kobject_del(k);
return NULL;
}
return k;
}
/**
* kset_init - initialize a kset for use
* @k: kset
*/
void kset_init(struct kset * k)
{
kobject_init(&k->kobj);
INIT_LIST_HEAD(&k->list);
spin_lock_init(&k->list_lock);
}
/**
* kset_add - add a kset object to the hierarchy.
* @k: kset.
*
* Simply, this adds the kset's embedded kobject to the
* hierarchy.
* We also try to make sure that the kset's embedded kobject
* has a parent before it is added. We only care if the embedded
* kobject is not part of a kset itself, since kobject_add()
* assigns a parent in that case.
* If that is the case, and the kset has a controlling subsystem,
* then we set the kset's parent to be said subsystem.
*/
int kset_add(struct kset * k)
{
if (!k->kobj.parent && !k->kobj.kset && k->subsys)
k->kobj.parent = &k->subsys->kset.kobj;
return kobject_add(&k->kobj);
}
/**
* kset_register - initialize and add a kset.
* @k: kset.
*/
int kset_register(struct kset * k)
{
if (!k)
return -EINVAL;
kset_init(k);
return kset_add(k);
}
/**
* kset_unregister - remove a kset.
* @k: kset.
*/
void kset_unregister(struct kset * k)
{
if (!k)
return;
kobject_unregister(&k->kobj);
}
/**
* kset_find_obj - search for object in kset.
* @kset: kset we're looking in.
* @name: object's name.
*
* Lock kset via @kset->subsys, and iterate over @kset->list,
* looking for a matching kobject. If matching object is found
* take a reference and return the object.
*/
struct kobject * kset_find_obj(struct kset * kset, const char * name)
{
struct list_head * entry;
struct kobject * ret = NULL;
spin_lock(&kset->list_lock);
list_for_each(entry,&kset->list) {
struct kobject * k = to_kobj(entry);
if (kobject_name(k) && !strcmp(kobject_name(k),name)) {
ret = kobject_get(k);
break;
}
}
spin_unlock(&kset->list_lock);
return ret;
}
void subsystem_init(struct subsystem * s)
{
init_rwsem(&s->rwsem);
kset_init(&s->kset);
}
/**
* subsystem_register - register a subsystem.
* @s: the subsystem we're registering.
*
* Once we register the subsystem, we want to make sure that
* the kset points back to this subsystem for correct usage of
* the rwsem.
*/
int subsystem_register(struct subsystem * s)
{
int error;
if (!s)
return -EINVAL;
subsystem_init(s);
pr_debug("subsystem %s: registering\n",s->kset.kobj.name);
if (!(error = kset_add(&s->kset))) {
if (!s->kset.subsys)
s->kset.subsys = s;
}
return error;
}
void subsystem_unregister(struct subsystem * s)
{
if (!s)
return;
pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name);
kset_unregister(&s->kset);
}
/**
* subsystem_create_file - export sysfs attribute file.
* @s: subsystem.
* @a: subsystem attribute descriptor.
*/
int subsys_create_file(struct subsystem * s, struct subsys_attribute * a)
{
int error = 0;
if (!s || !a)
return -EINVAL;
if (subsys_get(s)) {
error = sysfs_create_file(&s->kset.kobj,&a->attr);
subsys_put(s);
}
return error;
}
/**
* subsystem_remove_file - remove sysfs attribute file.
* @s: subsystem.
* @a: attribute desciptor.
*/
#if 0
void subsys_remove_file(struct subsystem * s, struct subsys_attribute * a)
{
if (subsys_get(s)) {
sysfs_remove_file(&s->kset.kobj,&a->attr);
subsys_put(s);
}
}
#endif /* 0 */
EXPORT_SYMBOL(kobject_init);
EXPORT_SYMBOL(kobject_register);
EXPORT_SYMBOL(kobject_unregister);
EXPORT_SYMBOL(kobject_get);
EXPORT_SYMBOL(kobject_put);
EXPORT_SYMBOL(kobject_add);
EXPORT_SYMBOL(kobject_del);
EXPORT_SYMBOL(kset_register);
EXPORT_SYMBOL(kset_unregister);
EXPORT_SYMBOL(subsystem_register);
EXPORT_SYMBOL(subsystem_unregister);
EXPORT_SYMBOL(subsys_create_file);

310
lib/kobject_uevent.c Normal file
View File

@@ -0,0 +1,310 @@
/*
* kernel userspace event delivery
*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004 Novell, Inc. All rights reserved.
* Copyright (C) 2004 IBM, Inc. All rights reserved.
*
* Licensed under the GNU GPL v2.
*
* Authors:
* Robert Love <rml@novell.com>
* Kay Sievers <kay.sievers@vrfy.org>
* Arjan van de Ven <arjanv@redhat.com>
* Greg Kroah-Hartman <greg@kroah.com>
*/
#include <linux/spinlock.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/string.h>
#include <linux/kobject.h>
#include <net/sock.h>
#define BUFFER_SIZE 2048 /* buffer for the variables */
#define NUM_ENVP 32 /* number of env pointers */
#if defined(CONFIG_HOTPLUG)
u64 uevent_seqnum;
char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug";
static DEFINE_SPINLOCK(sequence_lock);
#if defined(CONFIG_NET)
static struct sock *uevent_sock;
#endif
static char *action_to_string(enum kobject_action action)
{
switch (action) {
case KOBJ_ADD:
return "add";
case KOBJ_REMOVE:
return "remove";
case KOBJ_CHANGE:
return "change";
case KOBJ_MOUNT:
return "mount";
case KOBJ_UMOUNT:
return "umount";
case KOBJ_OFFLINE:
return "offline";
case KOBJ_ONLINE:
return "online";
case KOBJ_MOVE:
return "move";
default:
return NULL;
}
}
/**
* kobject_uevent_env - send an uevent with environmental data
*
* @action: action that is happening (usually KOBJ_MOVE)
* @kobj: struct kobject that the action is happening to
* @envp_ext: pointer to environmental data
*
* Returns 0 if kobject_uevent() is completed with success or the
* corresponding error when it fails.
*/
int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
char *envp_ext[])
{
char **envp;
char *buffer;
char *scratch;
const char *action_string;
const char *devpath = NULL;
const char *subsystem;
struct kobject *top_kobj;
struct kset *kset;
struct kset_uevent_ops *uevent_ops;
u64 seq;
char *seq_buff;
int i = 0;
int retval = 0;
int j;
pr_debug("%s\n", __FUNCTION__);
action_string = action_to_string(action);
if (!action_string) {
pr_debug("kobject attempted to send uevent without action_string!\n");
return -EINVAL;
}
/* search the kset we belong to */
top_kobj = kobj;
if (!top_kobj->kset && top_kobj->parent) {
do {
top_kobj = top_kobj->parent;
} while (!top_kobj->kset && top_kobj->parent);
}
if (!top_kobj->kset) {
pr_debug("kobject attempted to send uevent without kset!\n");
return -EINVAL;
}
kset = top_kobj->kset;
uevent_ops = kset->uevent_ops;
/* skip the event, if the filter returns zero. */
if (uevent_ops && uevent_ops->filter)
if (!uevent_ops->filter(kset, kobj)) {
pr_debug("kobject filter function caused the event to drop!\n");
return 0;
}
/* environment index */
envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL);
if (!envp)
return -ENOMEM;
/* environment values */
buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
if (!buffer) {
retval = -ENOMEM;
goto exit;
}
/* complete object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
retval = -ENOENT;
goto exit;
}
/* originating subsystem */
if (uevent_ops && uevent_ops->name)
subsystem = uevent_ops->name(kset, kobj);
else
subsystem = kobject_name(&kset->kobj);
/* event environemnt for helper process only */
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
/* default keys */
scratch = buffer;
envp [i++] = scratch;
scratch += sprintf(scratch, "ACTION=%s", action_string) + 1;
envp [i++] = scratch;
scratch += sprintf (scratch, "DEVPATH=%s", devpath) + 1;
envp [i++] = scratch;
scratch += sprintf(scratch, "SUBSYSTEM=%s", subsystem) + 1;
for (j = 0; envp_ext && envp_ext[j]; j++)
envp[i++] = envp_ext[j];
/* just reserve the space, overwrite it after kset call has returned */
envp[i++] = seq_buff = scratch;
scratch += strlen("SEQNUM=18446744073709551616") + 1;
/* let the kset specific function add its stuff */
if (uevent_ops && uevent_ops->uevent) {
retval = uevent_ops->uevent(kset, kobj,
&envp[i], NUM_ENVP - i, scratch,
BUFFER_SIZE - (scratch - buffer));
if (retval) {
pr_debug ("%s - uevent() returned %d\n",
__FUNCTION__, retval);
goto exit;
}
}
/* we will send an event, request a new sequence number */
spin_lock(&sequence_lock);
seq = ++uevent_seqnum;
spin_unlock(&sequence_lock);
sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq);
#if defined(CONFIG_NET)
/* send netlink message */
if (uevent_sock) {
struct sk_buff *skb;
size_t len;
/* allocate message with the maximum possible size */
len = strlen(action_string) + strlen(devpath) + 2;
skb = alloc_skb(len + BUFFER_SIZE, GFP_KERNEL);
if (skb) {
/* add header */
scratch = skb_put(skb, len);
sprintf(scratch, "%s@%s", action_string, devpath);
/* copy keys to our continuous event payload buffer */
for (i = 2; envp[i]; i++) {
len = strlen(envp[i]) + 1;
scratch = skb_put(skb, len);
strcpy(scratch, envp[i]);
}
NETLINK_CB(skb).dst_group = 1;
netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL);
}
}
#endif
/* call uevent_helper, usually only enabled during early boot */
if (uevent_helper[0]) {
char *argv [3];
argv [0] = uevent_helper;
argv [1] = (char *)subsystem;
argv [2] = NULL;
call_usermodehelper (argv[0], argv, envp, 0);
}
exit:
kfree(devpath);
kfree(buffer);
kfree(envp);
return retval;
}
EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
* kobject_uevent - notify userspace by ending an uevent
*
* @action: action that is happening (usually KOBJ_ADD and KOBJ_REMOVE)
* @kobj: struct kobject that the action is happening to
*
* Returns 0 if kobject_uevent() is completed with success or the
* corresponding error when it fails.
*/
int kobject_uevent(struct kobject *kobj, enum kobject_action action)
{
return kobject_uevent_env(kobj, action, NULL);
}
EXPORT_SYMBOL_GPL(kobject_uevent);
/**
* add_uevent_var - helper for creating event variables
* @envp: Pointer to table of environment variables, as passed into
* uevent() method.
* @num_envp: Number of environment variable slots available, as
* passed into uevent() method.
* @cur_index: Pointer to current index into @envp. It should be
* initialized to 0 before the first call to add_uevent_var(),
* and will be incremented on success.
* @buffer: Pointer to buffer for environment variables, as passed
* into uevent() method.
* @buffer_size: Length of @buffer, as passed into uevent() method.
* @cur_len: Pointer to current length of space used in @buffer.
* Should be initialized to 0 before the first call to
* add_uevent_var(), and will be incremented on success.
* @format: Format for creating environment variable (of the form
* "XXX=%x") for snprintf().
*
* Returns 0 if environment variable was added successfully or -ENOMEM
* if no space was available.
*/
int add_uevent_var(char **envp, int num_envp, int *cur_index,
char *buffer, int buffer_size, int *cur_len,
const char *format, ...)
{
va_list args;
/*
* We check against num_envp - 1 to make sure there is at
* least one slot left after we return, since kobject_uevent()
* needs to set the last slot to NULL.
*/
if (*cur_index >= num_envp - 1)
return -ENOMEM;
envp[*cur_index] = buffer + *cur_len;
va_start(args, format);
*cur_len += vsnprintf(envp[*cur_index],
max(buffer_size - *cur_len, 0),
format, args) + 1;
va_end(args);
if (*cur_len > buffer_size)
return -ENOMEM;
(*cur_index)++;
return 0;
}
EXPORT_SYMBOL_GPL(add_uevent_var);
#if defined(CONFIG_NET)
static int __init kobject_uevent_init(void)
{
uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL,
THIS_MODULE);
if (!uevent_sock) {
printk(KERN_ERR
"kobject_uevent: unable to create netlink socket!\n");
return -ENODEV;
}
return 0;
}
postcore_initcall(kobject_uevent_init);
#endif
#endif /* CONFIG_HOTPLUG */

64
lib/kref.c Normal file
View File

@@ -0,0 +1,64 @@
/*
* kref.c - library routines for handling generic reference counted objects
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Corp.
*
* based on lib/kobject.c which was:
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
*
* This file is released under the GPLv2.
*
*/
#include <linux/kref.h>
#include <linux/module.h>
/**
* kref_init - initialize object.
* @kref: object in question.
*/
void kref_init(struct kref *kref)
{
atomic_set(&kref->refcount,1);
}
/**
* kref_get - increment refcount for object.
* @kref: object.
*/
void kref_get(struct kref *kref)
{
WARN_ON(!atomic_read(&kref->refcount));
atomic_inc(&kref->refcount);
}
/**
* kref_put - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
WARN_ON(release == (void (*)(struct kref *))kfree);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
}
return 0;
}
EXPORT_SYMBOL(kref_init);
EXPORT_SYMBOL(kref_get);
EXPORT_SYMBOL(kref_put);

200
lib/libcrc32c.c Normal file
View File

@@ -0,0 +1,200 @@
/*
* CRC32C
*@Article{castagnoli-crc,
* author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
* title = {{Optimization of Cyclic Redundancy-Check Codes with 24
* and 32 Parity Bits}},
* journal = IEEE Transactions on Communication,
* year = {1993},
* volume = {41},
* number = {6},
* pages = {},
* month = {June},
*}
* Used by the iSCSI driver, possibly others, and derived from the
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
* Following the example of lib/crc32, this function is intended to be
* flexible and useful for all users. Modules that currently have their
* own crc32c, but hopefully may be able to use this one are:
* net/sctp (please add all your doco to here if you change to
* use this one!)
* <endoflist>
*
* Copyright (c) 2004 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/crc32c.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <asm/byteorder.h>
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
MODULE_LICENSE("GPL");
#define CRC32C_POLY_BE 0x1EDC6F41
#define CRC32C_POLY_LE 0x82F63B78
#ifndef CRC_LE_BITS
# define CRC_LE_BITS 8
#endif
/*
* Haven't generated a big-endian table yet, but the bit-wise version
* should at least work.
*/
#if defined CRC_BE_BITS && CRC_BE_BITS != 1
#undef CRC_BE_BITS
#endif
#ifndef CRC_BE_BITS
# define CRC_BE_BITS 1
#endif
EXPORT_SYMBOL(crc32c_le);
#if CRC_LE_BITS == 1
/*
* Compute things bit-wise, as done in crc32.c. We could share the tight
* loop below with crc32 and vary the POLY if we don't find value in terms
* of space and maintainability in keeping the two modules separate.
*/
u32 __attribute_pure__
crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
int i;
while (len--) {
crc ^= *p++;
for (i = 0; i < 8; i++)
crc = (crc >> 1) ^ ((crc & 1) ? CRC32C_POLY_LE : 0);
}
return crc;
}
#else
/*
* This is the CRC-32C table
* Generated with:
* width = 32 bits
* poly = 0x1EDC6F41
* reflect input bytes = true
* reflect output bytes = true
*/
static const u32 crc32c_table[256] = {
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
};
/*
* Steps through buffer one byte at at time, calculates reflected
* crc using table.
*/
u32 __attribute_pure__
crc32c_le(u32 seed, unsigned char const *data, size_t length)
{
u32 crc = __cpu_to_le32(seed);
while (length--)
crc =
crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8);
return __le32_to_cpu(crc);
}
#endif /* CRC_LE_BITS == 8 */
EXPORT_SYMBOL(crc32c_be);
#if CRC_BE_BITS == 1
u32 __attribute_pure__
crc32c_be(u32 crc, unsigned char const *p, size_t len)
{
int i;
while (len--) {
crc ^= *p++ << 24;
for (i = 0; i < 8; i++)
crc =
(crc << 1) ^ ((crc & 0x80000000) ? CRC32C_POLY_BE :
0);
}
return crc;
}
#endif
/*
* Unit test
*
* A small unit test suite is implemented as part of the crypto suite.
* Select CRYPTO_CRC32C and use the tcrypt module to run the tests.
*/

78
lib/list_debug.c Normal file
View File

@@ -0,0 +1,78 @@
/*
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
* This file contains the linked list implementations for
* DEBUG_LIST.
*/
#include <linux/module.h>
#include <linux/list.h>
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
if (unlikely(next->prev != prev)) {
printk(KERN_ERR "list_add corruption. next->prev should be "
"prev (%p), but was %p. (next=%p).\n",
prev, next->prev, next);
BUG();
}
if (unlikely(prev->next != next)) {
printk(KERN_ERR "list_add corruption. prev->next should be "
"next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
BUG();
}
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
EXPORT_SYMBOL(__list_add);
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
EXPORT_SYMBOL(list_add);
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
* in an undefined state.
*/
void list_del(struct list_head *entry)
{
if (unlikely(entry->prev->next != entry)) {
printk(KERN_ERR "list_del corruption. prev->next should be %p, "
"but was %p\n", entry, entry->prev->next);
BUG();
}
if (unlikely(entry->next->prev != entry)) {
printk(KERN_ERR "list_del corruption. next->prev should be %p, "
"but was %p\n", entry, entry->next->prev);
BUG();
}
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
EXPORT_SYMBOL(list_del);

View File

@@ -0,0 +1,9 @@
#undef IRQ_DISABLE
#undef IRQ_ENABLE
#undef IRQ_ENTER
#undef IRQ_EXIT
#define IRQ_ENABLE HARDIRQ_ENABLE
#define IRQ_DISABLE HARDIRQ_DISABLE
#define IRQ_ENTER HARDIRQ_ENTER
#define IRQ_EXIT HARDIRQ_EXIT

View File

@@ -0,0 +1,11 @@
#undef LOCK
#define LOCK ML
#undef UNLOCK
#define UNLOCK MU
#undef RLOCK
#undef WLOCK
#undef INIT
#define INIT MI

View File

@@ -0,0 +1,2 @@
#include "locking-selftest-rlock.h"
#include "locking-selftest-hardirq.h"

View File

@@ -0,0 +1,2 @@
#include "locking-selftest-rlock.h"
#include "locking-selftest-softirq.h"

View File

@@ -0,0 +1,14 @@
#undef LOCK
#define LOCK RL
#undef UNLOCK
#define UNLOCK RU
#undef RLOCK
#define RLOCK RL
#undef WLOCK
#define WLOCK WL
#undef INIT
#define INIT RWI

View File

@@ -0,0 +1,14 @@
#undef LOCK
#define LOCK RSL
#undef UNLOCK
#define UNLOCK RSU
#undef RLOCK
#define RLOCK RSL
#undef WLOCK
#define WLOCK WSL
#undef INIT
#define INIT RWSI

View File

@@ -0,0 +1,9 @@
#undef IRQ_DISABLE
#undef IRQ_ENABLE
#undef IRQ_ENTER
#undef IRQ_EXIT
#define IRQ_DISABLE SOFTIRQ_DISABLE
#define IRQ_ENABLE SOFTIRQ_ENABLE
#define IRQ_ENTER SOFTIRQ_ENTER
#define IRQ_EXIT SOFTIRQ_EXIT

View File

@@ -0,0 +1,2 @@
#include "locking-selftest-spin.h"
#include "locking-selftest-hardirq.h"

View File

@@ -0,0 +1,2 @@
#include "locking-selftest-spin.h"
#include "locking-selftest-softirq.h"

View File

@@ -0,0 +1,11 @@
#undef LOCK
#define LOCK L
#undef UNLOCK
#define UNLOCK U
#undef RLOCK
#undef WLOCK
#undef INIT
#define INIT SI

View File

@@ -0,0 +1,2 @@
#include "locking-selftest-wlock.h"
#include "locking-selftest-hardirq.h"

View File

@@ -0,0 +1,2 @@
#include "locking-selftest-wlock.h"
#include "locking-selftest-softirq.h"

View File

@@ -0,0 +1,14 @@
#undef LOCK
#define LOCK WL
#undef UNLOCK
#define UNLOCK WU
#undef RLOCK
#define RLOCK RL
#undef WLOCK
#define WLOCK WL
#undef INIT
#define INIT RWI

View File

@@ -0,0 +1,14 @@
#undef LOCK
#define LOCK WSL
#undef UNLOCK
#define UNLOCK WSU
#undef RLOCK
#define RLOCK RSL
#undef WLOCK
#define WLOCK WSL
#undef INIT
#define INIT RWSI

1218
lib/locking-selftest.c Normal file

File diff suppressed because it is too large Load Diff

220
lib/parser.c Normal file
View File

@@ -0,0 +1,220 @@
/*
* lib/parser.c - simple parser for mount, etc. options.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/string.h>
/**
* match_one: - Determines if a string matches a simple pattern
* @s: the string to examine for presense of the pattern
* @p: the string containing the pattern
* @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
* locations.
*
* Description: Determines if the pattern @p is present in string @s. Can only
* match extremely simple token=arg style patterns. If the pattern is found,
* the location(s) of the arguments will be returned in the @args array.
*/
static int match_one(char *s, char *p, substring_t args[])
{
char *meta;
int argc = 0;
if (!p)
return 1;
while(1) {
int len = -1;
meta = strchr(p, '%');
if (!meta)
return strcmp(p, s) == 0;
if (strncmp(p, s, meta-p))
return 0;
s += meta - p;
p = meta + 1;
if (isdigit(*p))
len = simple_strtoul(p, &p, 10);
else if (*p == '%') {
if (*s++ != '%')
return 0;
p++;
continue;
}
if (argc >= MAX_OPT_ARGS)
return 0;
args[argc].from = s;
switch (*p++) {
case 's':
if (strlen(s) == 0)
return 0;
else if (len == -1 || len > strlen(s))
len = strlen(s);
args[argc].to = s + len;
break;
case 'd':
simple_strtol(s, &args[argc].to, 0);
goto num;
case 'u':
simple_strtoul(s, &args[argc].to, 0);
goto num;
case 'o':
simple_strtoul(s, &args[argc].to, 8);
goto num;
case 'x':
simple_strtoul(s, &args[argc].to, 16);
num:
if (args[argc].to == args[argc].from)
return 0;
break;
default:
return 0;
}
s = args[argc].to;
argc++;
}
}
/**
* match_token: - Find a token (and optional args) in a string
* @s: the string to examine for token/argument pairs
* @table: match_table_t describing the set of allowed option tokens and the
* arguments that may be associated with them. Must be terminated with a
* &struct match_token whose pattern is set to the NULL pointer.
* @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
* locations.
*
* Description: Detects which if any of a set of token strings has been passed
* to it. Tokens can include up to MAX_OPT_ARGS instances of basic c-style
* format identifiers which will be taken into account when matching the
* tokens, and whose locations will be returned in the @args array.
*/
int match_token(char *s, match_table_t table, substring_t args[])
{
struct match_token *p;
for (p = table; !match_one(s, p->pattern, args) ; p++)
;
return p->token;
}
/**
* match_number: scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting integer on success
* @base: base to use when converting string
*
* Description: Given a &substring_t and a base, attempts to parse the substring
* as a number in that base. On success, sets @result to the integer represented
* by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure.
*/
static int match_number(substring_t *s, int *result, int base)
{
char *endp;
char *buf;
int ret;
buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, s->from, s->to - s->from);
buf[s->to - s->from] = '\0';
*result = simple_strtol(buf, &endp, base);
ret = 0;
if (endp == buf)
ret = -EINVAL;
kfree(buf);
return ret;
}
/**
* match_int: - scan a decimal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
* Description: Attempts to parse the &substring_t @s as a decimal integer. On
* success, sets @result to the integer represented by the string and returns 0.
* Returns either -ENOMEM or -EINVAL on failure.
*/
int match_int(substring_t *s, int *result)
{
return match_number(s, result, 0);
}
/**
* match_octal: - scan an octal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
* Description: Attempts to parse the &substring_t @s as an octal integer. On
* success, sets @result to the integer represented by the string and returns
* 0. Returns either -ENOMEM or -EINVAL on failure.
*/
int match_octal(substring_t *s, int *result)
{
return match_number(s, result, 8);
}
/**
* match_hex: - scan a hex representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
* Description: Attempts to parse the &substring_t @s as a hexadecimal integer.
* On success, sets @result to the integer represented by the string and
* returns 0. Returns either -ENOMEM or -EINVAL on failure.
*/
int match_hex(substring_t *s, int *result)
{
return match_number(s, result, 16);
}
/**
* match_strcpy: - copies the characters from a substring_t to a string
* @to: string to copy characters to.
* @s: &substring_t to copy
*
* Description: Copies the set of characters represented by the given
* &substring_t @s to the c-style string @to. Caller guarantees that @to is
* large enough to hold the characters of @s.
*/
void match_strcpy(char *to, substring_t *s)
{
memcpy(to, s->from, s->to - s->from);
to[s->to - s->from] = '\0';
}
/**
* match_strdup: - allocate a new string with the contents of a substring_t
* @s: &substring_t to copy
*
* Description: Allocates and returns a string filled with the contents of
* the &substring_t @s. The caller is responsible for freeing the returned
* string with kfree().
*/
char *match_strdup(substring_t *s)
{
char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL);
if (p)
match_strcpy(p, s);
return p;
}
EXPORT_SYMBOL(match_token);
EXPORT_SYMBOL(match_int);
EXPORT_SYMBOL(match_octal);
EXPORT_SYMBOL(match_hex);
EXPORT_SYMBOL(match_strcpy);
EXPORT_SYMBOL(match_strdup);

46
lib/percpu_counter.c Normal file
View File

@@ -0,0 +1,46 @@
/*
* Fast batching percpu counters.
*/
#include <linux/percpu_counter.h>
#include <linux/module.h>
void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
{
long count;
s32 *pcount;
int cpu = get_cpu();
pcount = per_cpu_ptr(fbc->counters, cpu);
count = *pcount + amount;
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock);
fbc->count += count;
*pcount = 0;
spin_unlock(&fbc->lock);
} else {
*pcount = count;
}
put_cpu();
}
EXPORT_SYMBOL(percpu_counter_mod);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
spin_lock(&fbc->lock);
ret = fbc->count;
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
spin_unlock(&fbc->lock);
return ret < 0 ? 0 : ret;
}
EXPORT_SYMBOL(percpu_counter_sum);

118
lib/plist.c Normal file
View File

@@ -0,0 +1,118 @@
/*
* lib/plist.c
*
* Descending-priority-sorted double-linked list
*
* (C) 2002-2003 Intel Corp
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
*
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <dwalker@mvista.com>
*
* (C) 2005 Thomas Gleixner <tglx@linutronix.de>
*
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
*
* Licensed under the FSF's GNU Public License v2 or later.
*
* Based on simple lists (include/linux/list.h).
*
* This file contains the add / del functions which are considered to
* be too large to inline. See include/linux/plist.h for further
* information.
*/
#include <linux/plist.h>
#include <linux/spinlock.h>
#ifdef CONFIG_DEBUG_PI_LIST
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
if (n->prev != p || p->next != n) {
printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
WARN_ON(1);
}
}
static void plist_check_list(struct list_head *top)
{
struct list_head *prev = top, *next = top->next;
plist_check_prev_next(top, prev, next);
while (next != top) {
prev = next;
next = prev->next;
plist_check_prev_next(top, prev, next);
}
}
static void plist_check_head(struct plist_head *head)
{
WARN_ON(!head->lock);
if (head->lock)
WARN_ON_SMP(!spin_is_locked(head->lock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
#else
# define plist_check_head(h) do { } while (0)
#endif
/**
* plist_add - add @node to @head
*
* @node: &struct plist_node pointer
* @head: &struct plist_head pointer
*/
void plist_add(struct plist_node *node, struct plist_head *head)
{
struct plist_node *iter;
plist_check_head(head);
WARN_ON(!plist_node_empty(node));
list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
if (node->prio < iter->prio)
goto lt_prio;
else if (node->prio == iter->prio) {
iter = list_entry(iter->plist.prio_list.next,
struct plist_node, plist.prio_list);
goto eq_prio;
}
}
lt_prio:
list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
eq_prio:
list_add_tail(&node->plist.node_list, &iter->plist.node_list);
plist_check_head(head);
}
/**
* plist_del - Remove a @node from plist.
*
* @node: &struct plist_node pointer - entry to be removed
* @head: &struct plist_head pointer - list head
*/
void plist_del(struct plist_node *node, struct plist_head *head)
{
plist_check_head(head);
if (!list_empty(&node->plist.prio_list)) {
struct plist_node *next = plist_first(&node->plist);
list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
list_del_init(&node->plist.prio_list);
}
list_del_init(&node->plist.node_list);
plist_check_head(head);
}

484
lib/prio_tree.c Normal file
View File

@@ -0,0 +1,484 @@
/*
* lib/prio_tree.c - priority search tree
*
* Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
*
* This file is released under the GPL v2.
*
* Based on the radix priority search tree proposed by Edward M. McCreight
* SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
*
* 02Feb2004 Initial version
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/prio_tree.h>
/*
* A clever mix of heap and radix trees forms a radix priority search tree (PST)
* which is useful for storing intervals, e.g, we can consider a vma as a closed
* interval of file pages [offset_begin, offset_end], and store all vmas that
* map a file in a PST. Then, using the PST, we can answer a stabbing query,
* i.e., selecting a set of stored intervals (vmas) that overlap with (map) a
* given input interval X (a set of consecutive file pages), in "O(log n + m)"
* time where 'log n' is the height of the PST, and 'm' is the number of stored
* intervals (vmas) that overlap (map) with the input interval X (the set of
* consecutive file pages).
*
* In our implementation, we store closed intervals of the form [radix_index,
* heap_index]. We assume that always radix_index <= heap_index. McCreight's PST
* is designed for storing intervals with unique radix indices, i.e., each
* interval have different radix_index. However, this limitation can be easily
* overcome by using the size, i.e., heap_index - radix_index, as part of the
* index, so we index the tree using [(radix_index,size), heap_index].
*
* When the above-mentioned indexing scheme is used, theoretically, in a 32 bit
* machine, the maximum height of a PST can be 64. We can use a balanced version
* of the priority search tree to optimize the tree height, but the balanced
* tree proposed by McCreight is too complex and memory-hungry for our purpose.
*/
/*
* The following macros are used for implementing prio_tree for i_mmap
*/
#define RADIX_INDEX(vma) ((vma)->vm_pgoff)
#define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
/* avoid overflow */
#define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
static void get_index(const struct prio_tree_root *root,
const struct prio_tree_node *node,
unsigned long *radix, unsigned long *heap)
{
if (root->raw) {
struct vm_area_struct *vma = prio_tree_entry(
node, struct vm_area_struct, shared.prio_tree_node);
*radix = RADIX_INDEX(vma);
*heap = HEAP_INDEX(vma);
}
else {
*radix = node->start;
*heap = node->last;
}
}
static unsigned long index_bits_to_maxindex[BITS_PER_LONG];
void __init prio_tree_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++)
index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1;
index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL;
}
/*
* Maximum heap_index that can be stored in a PST with index_bits bits
*/
static inline unsigned long prio_tree_maxindex(unsigned int bits)
{
return index_bits_to_maxindex[bits - 1];
}
/*
* Extend a priority search tree so that it can store a node with heap_index
* max_heap_index. In the worst case, this algorithm takes O((log n)^2).
* However, this function is used rarely and the common case performance is
* not bad.
*/
static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root,
struct prio_tree_node *node, unsigned long max_heap_index)
{
struct prio_tree_node *first = NULL, *prev, *last = NULL;
if (max_heap_index > prio_tree_maxindex(root->index_bits))
root->index_bits++;
while (max_heap_index > prio_tree_maxindex(root->index_bits)) {
root->index_bits++;
if (prio_tree_empty(root))
continue;
if (first == NULL) {
first = root->prio_tree_node;
prio_tree_remove(root, root->prio_tree_node);
INIT_PRIO_TREE_NODE(first);
last = first;
} else {
prev = last;
last = root->prio_tree_node;
prio_tree_remove(root, root->prio_tree_node);
INIT_PRIO_TREE_NODE(last);
prev->left = last;
last->parent = prev;
}
}
INIT_PRIO_TREE_NODE(node);
if (first) {
node->left = first;
first->parent = node;
} else
last = node;
if (!prio_tree_empty(root)) {
last->left = root->prio_tree_node;
last->left->parent = last;
}
root->prio_tree_node = node;
return node;
}
/*
* Replace a prio_tree_node with a new node and return the old node
*/
struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
struct prio_tree_node *old, struct prio_tree_node *node)
{
INIT_PRIO_TREE_NODE(node);
if (prio_tree_root(old)) {
BUG_ON(root->prio_tree_node != old);
/*
* We can reduce root->index_bits here. However, it is complex
* and does not help much to improve performance (IMO).
*/
node->parent = node;
root->prio_tree_node = node;
} else {
node->parent = old->parent;
if (old->parent->left == old)
old->parent->left = node;
else
old->parent->right = node;
}
if (!prio_tree_left_empty(old)) {
node->left = old->left;
old->left->parent = node;
}
if (!prio_tree_right_empty(old)) {
node->right = old->right;
old->right->parent = node;
}
return old;
}
/*
* Insert a prio_tree_node @node into a radix priority search tree @root. The
* algorithm typically takes O(log n) time where 'log n' is the number of bits
* required to represent the maximum heap_index. In the worst case, the algo
* can take O((log n)^2) - check prio_tree_expand.
*
* If a prior node with same radix_index and heap_index is already found in
* the tree, then returns the address of the prior node. Otherwise, inserts
* @node into the tree and returns @node.
*/
struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
struct prio_tree_node *node)
{
struct prio_tree_node *cur, *res = node;
unsigned long radix_index, heap_index;
unsigned long r_index, h_index, index, mask;
int size_flag = 0;
get_index(root, node, &radix_index, &heap_index);
if (prio_tree_empty(root) ||
heap_index > prio_tree_maxindex(root->index_bits))
return prio_tree_expand(root, node, heap_index);
cur = root->prio_tree_node;
mask = 1UL << (root->index_bits - 1);
while (mask) {
get_index(root, cur, &r_index, &h_index);
if (r_index == radix_index && h_index == heap_index)
return cur;
if (h_index < heap_index ||
(h_index == heap_index && r_index > radix_index)) {
struct prio_tree_node *tmp = node;
node = prio_tree_replace(root, cur, node);
cur = tmp;
/* swap indices */
index = r_index;
r_index = radix_index;
radix_index = index;
index = h_index;
h_index = heap_index;
heap_index = index;
}
if (size_flag)
index = heap_index - radix_index;
else
index = radix_index;
if (index & mask) {
if (prio_tree_right_empty(cur)) {
INIT_PRIO_TREE_NODE(node);
cur->right = node;
node->parent = cur;
return res;
} else
cur = cur->right;
} else {
if (prio_tree_left_empty(cur)) {
INIT_PRIO_TREE_NODE(node);
cur->left = node;
node->parent = cur;
return res;
} else
cur = cur->left;
}
mask >>= 1;
if (!mask) {
mask = 1UL << (BITS_PER_LONG - 1);
size_flag = 1;
}
}
/* Should not reach here */
BUG();
return NULL;
}
/*
* Remove a prio_tree_node @node from a radix priority search tree @root. The
* algorithm takes O(log n) time where 'log n' is the number of bits required
* to represent the maximum heap_index.
*/
void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node)
{
struct prio_tree_node *cur;
unsigned long r_index, h_index_right, h_index_left;
cur = node;
while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) {
if (!prio_tree_left_empty(cur))
get_index(root, cur->left, &r_index, &h_index_left);
else {
cur = cur->right;
continue;
}
if (!prio_tree_right_empty(cur))
get_index(root, cur->right, &r_index, &h_index_right);
else {
cur = cur->left;
continue;
}
/* both h_index_left and h_index_right cannot be 0 */
if (h_index_left >= h_index_right)
cur = cur->left;
else
cur = cur->right;
}
if (prio_tree_root(cur)) {
BUG_ON(root->prio_tree_node != cur);
__INIT_PRIO_TREE_ROOT(root, root->raw);
return;
}
if (cur->parent->right == cur)
cur->parent->right = cur->parent;
else
cur->parent->left = cur->parent;
while (cur != node)
cur = prio_tree_replace(root, cur->parent, cur);
}
/*
* Following functions help to enumerate all prio_tree_nodes in the tree that
* overlap with the input interval X [radix_index, heap_index]. The enumeration
* takes O(log n + m) time where 'log n' is the height of the tree (which is
* proportional to # of bits required to represent the maximum heap_index) and
* 'm' is the number of prio_tree_nodes that overlap the interval X.
*/
static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter,
unsigned long *r_index, unsigned long *h_index)
{
if (prio_tree_left_empty(iter->cur))
return NULL;
get_index(iter->root, iter->cur->left, r_index, h_index);
if (iter->r_index <= *h_index) {
iter->cur = iter->cur->left;
iter->mask >>= 1;
if (iter->mask) {
if (iter->size_level)
iter->size_level++;
} else {
if (iter->size_level) {
BUG_ON(!prio_tree_left_empty(iter->cur));
BUG_ON(!prio_tree_right_empty(iter->cur));
iter->size_level++;
iter->mask = ULONG_MAX;
} else {
iter->size_level = 1;
iter->mask = 1UL << (BITS_PER_LONG - 1);
}
}
return iter->cur;
}
return NULL;
}
static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter,
unsigned long *r_index, unsigned long *h_index)
{
unsigned long value;
if (prio_tree_right_empty(iter->cur))
return NULL;
if (iter->size_level)
value = iter->value;
else
value = iter->value | iter->mask;
if (iter->h_index < value)
return NULL;
get_index(iter->root, iter->cur->right, r_index, h_index);
if (iter->r_index <= *h_index) {
iter->cur = iter->cur->right;
iter->mask >>= 1;
iter->value = value;
if (iter->mask) {
if (iter->size_level)
iter->size_level++;
} else {
if (iter->size_level) {
BUG_ON(!prio_tree_left_empty(iter->cur));
BUG_ON(!prio_tree_right_empty(iter->cur));
iter->size_level++;
iter->mask = ULONG_MAX;
} else {
iter->size_level = 1;
iter->mask = 1UL << (BITS_PER_LONG - 1);
}
}
return iter->cur;
}
return NULL;
}
static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter)
{
iter->cur = iter->cur->parent;
if (iter->mask == ULONG_MAX)
iter->mask = 1UL;
else if (iter->size_level == 1)
iter->mask = 1UL;
else
iter->mask <<= 1;
if (iter->size_level)
iter->size_level--;
if (!iter->size_level && (iter->value & iter->mask))
iter->value ^= iter->mask;
return iter->cur;
}
static inline int overlap(struct prio_tree_iter *iter,
unsigned long r_index, unsigned long h_index)
{
return iter->h_index >= r_index && iter->r_index <= h_index;
}
/*
* prio_tree_first:
*
* Get the first prio_tree_node that overlaps with the interval [radix_index,
* heap_index]. Note that always radix_index <= heap_index. We do a pre-order
* traversal of the tree.
*/
static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter)
{
struct prio_tree_root *root;
unsigned long r_index, h_index;
INIT_PRIO_TREE_ITER(iter);
root = iter->root;
if (prio_tree_empty(root))
return NULL;
get_index(root, root->prio_tree_node, &r_index, &h_index);
if (iter->r_index > h_index)
return NULL;
iter->mask = 1UL << (root->index_bits - 1);
iter->cur = root->prio_tree_node;
while (1) {
if (overlap(iter, r_index, h_index))
return iter->cur;
if (prio_tree_left(iter, &r_index, &h_index))
continue;
if (prio_tree_right(iter, &r_index, &h_index))
continue;
break;
}
return NULL;
}
/*
* prio_tree_next:
*
* Get the next prio_tree_node that overlaps with the input interval in iter
*/
struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter)
{
unsigned long r_index, h_index;
if (iter->cur == NULL)
return prio_tree_first(iter);
repeat:
while (prio_tree_left(iter, &r_index, &h_index))
if (overlap(iter, r_index, h_index))
return iter->cur;
while (!prio_tree_right(iter, &r_index, &h_index)) {
while (!prio_tree_root(iter->cur) &&
iter->cur->parent->right == iter->cur)
prio_tree_parent(iter);
if (prio_tree_root(iter->cur))
return NULL;
prio_tree_parent(iter);
}
if (overlap(iter, r_index, h_index))
return iter->cur;
goto repeat;
}

1026
lib/radix-tree.c Normal file

File diff suppressed because it is too large Load Diff

143
lib/random32.c Normal file
View File

@@ -0,0 +1,143 @@
/*
This is a maximally equidistributed combined Tausworthe generator
based on code from GNU Scientific Library 1.5 (30 Jun 2004)
x_n = (s1_n ^ s2_n ^ s3_n)
s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
The period of this generator is about 2^88.
From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
This is available on the net from L'Ecuyer's home page,
http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
There is an erratum in the paper "Tables of Maximally
Equidistributed Combined LFSR Generators", Mathematics of
Computation, 68, 225 (1999), 261--269:
http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
... the k_j most significant bits of z_j must be non-
zero, for each j. (Note: this restriction also applies to the
computer code given in [4], but was mistakenly not mentioned in
that paper.)
This affects the seeding procedure by imposing the requirement
s1 > 1, s2 > 7, s3 > 15.
*/
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/random.h>
struct rnd_state {
u32 s1, s2, s3;
};
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
static u32 __random32(struct rnd_state *state)
{
#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
return (state->s1 ^ state->s2 ^ state->s3);
}
static void __set_random32(struct rnd_state *state, unsigned long s)
{
if (s == 0)
s = 1; /* default seed is 1 */
#define LCG(n) (69069 * n)
state->s1 = LCG(s);
state->s2 = LCG(state->s1);
state->s3 = LCG(state->s2);
/* "warm it up" */
__random32(state);
__random32(state);
__random32(state);
__random32(state);
__random32(state);
__random32(state);
}
/**
* random32 - pseudo random number generator
*
* A 32 bit pseudo-random number is generated using a fast
* algorithm suitable for simulation. This algorithm is NOT
* considered safe for cryptographic use.
*/
u32 random32(void)
{
unsigned long r;
struct rnd_state *state = &get_cpu_var(net_rand_state);
r = __random32(state);
put_cpu_var(state);
return r;
}
EXPORT_SYMBOL(random32);
/**
* srandom32 - add entropy to pseudo random number generator
* @seed: seed value
*
* Add some additional seeding to the random32() pool.
* Note: this pool is per cpu so it only affects current CPU.
*/
void srandom32(u32 entropy)
{
struct rnd_state *state = &get_cpu_var(net_rand_state);
__set_random32(state, state->s1 ^ entropy);
put_cpu_var(state);
}
EXPORT_SYMBOL(srandom32);
/*
* Generate some initially weak seeding values to allow
* to start the random32() engine.
*/
static int __init random32_init(void)
{
int i;
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
__set_random32(state, i + jiffies);
}
return 0;
}
core_initcall(random32_init);
/*
* Generate better values after random number generator
* is fully initalized.
*/
static int __init random32_reseed(void)
{
int i;
unsigned long seed;
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
get_random_bytes(&seed, sizeof(seed));
__set_random32(state, seed);
}
return 0;
}
late_initcall(random32_reseed);

397
lib/rbtree.c Normal file
View File

@@ -0,0 +1,397 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/lib/rbtree.c
*/
#include <linux/rbtree.h>
#include <linux/module.h>
static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
{
struct rb_node *right = node->rb_right;
struct rb_node *parent = rb_parent(node);
if ((node->rb_right = right->rb_left))
rb_set_parent(right->rb_left, node);
right->rb_left = node;
rb_set_parent(right, parent);
if (parent)
{
if (node == parent->rb_left)
parent->rb_left = right;
else
parent->rb_right = right;
}
else
root->rb_node = right;
rb_set_parent(node, right);
}
static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
{
struct rb_node *left = node->rb_left;
struct rb_node *parent = rb_parent(node);
if ((node->rb_left = left->rb_right))
rb_set_parent(left->rb_right, node);
left->rb_right = node;
rb_set_parent(left, parent);
if (parent)
{
if (node == parent->rb_right)
parent->rb_right = left;
else
parent->rb_left = left;
}
else
root->rb_node = left;
rb_set_parent(node, left);
}
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
struct rb_node *parent, *gparent;
while ((parent = rb_parent(node)) && rb_is_red(parent))
{
gparent = rb_parent(parent);
if (parent == gparent->rb_left)
{
{
register struct rb_node *uncle = gparent->rb_right;
if (uncle && rb_is_red(uncle))
{
rb_set_black(uncle);
rb_set_black(parent);
rb_set_red(gparent);
node = gparent;
continue;
}
}
if (parent->rb_right == node)
{
register struct rb_node *tmp;
__rb_rotate_left(parent, root);
tmp = parent;
parent = node;
node = tmp;
}
rb_set_black(parent);
rb_set_red(gparent);
__rb_rotate_right(gparent, root);
} else {
{
register struct rb_node *uncle = gparent->rb_left;
if (uncle && rb_is_red(uncle))
{
rb_set_black(uncle);
rb_set_black(parent);
rb_set_red(gparent);
node = gparent;
continue;
}
}
if (parent->rb_left == node)
{
register struct rb_node *tmp;
__rb_rotate_right(parent, root);
tmp = parent;
parent = node;
node = tmp;
}
rb_set_black(parent);
rb_set_red(gparent);
__rb_rotate_left(gparent, root);
}
}
rb_set_black(root->rb_node);
}
EXPORT_SYMBOL(rb_insert_color);
static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
struct rb_root *root)
{
struct rb_node *other;
while ((!node || rb_is_black(node)) && node != root->rb_node)
{
if (parent->rb_left == node)
{
other = parent->rb_right;
if (rb_is_red(other))
{
rb_set_black(other);
rb_set_red(parent);
__rb_rotate_left(parent, root);
other = parent->rb_right;
}
if ((!other->rb_left || rb_is_black(other->rb_left)) &&
(!other->rb_right || rb_is_black(other->rb_right)))
{
rb_set_red(other);
node = parent;
parent = rb_parent(node);
}
else
{
if (!other->rb_right || rb_is_black(other->rb_right))
{
struct rb_node *o_left;
if ((o_left = other->rb_left))
rb_set_black(o_left);
rb_set_red(other);
__rb_rotate_right(other, root);
other = parent->rb_right;
}
rb_set_color(other, rb_color(parent));
rb_set_black(parent);
if (other->rb_right)
rb_set_black(other->rb_right);
__rb_rotate_left(parent, root);
node = root->rb_node;
break;
}
}
else
{
other = parent->rb_left;
if (rb_is_red(other))
{
rb_set_black(other);
rb_set_red(parent);
__rb_rotate_right(parent, root);
other = parent->rb_left;
}
if ((!other->rb_left || rb_is_black(other->rb_left)) &&
(!other->rb_right || rb_is_black(other->rb_right)))
{
rb_set_red(other);
node = parent;
parent = rb_parent(node);
}
else
{
if (!other->rb_left || rb_is_black(other->rb_left))
{
register struct rb_node *o_right;
if ((o_right = other->rb_right))
rb_set_black(o_right);
rb_set_red(other);
__rb_rotate_left(other, root);
other = parent->rb_left;
}
rb_set_color(other, rb_color(parent));
rb_set_black(parent);
if (other->rb_left)
rb_set_black(other->rb_left);
__rb_rotate_right(parent, root);
node = root->rb_node;
break;
}
}
}
if (node)
rb_set_black(node);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *child, *parent;
int color;
if (!node->rb_left)
child = node->rb_right;
else if (!node->rb_right)
child = node->rb_left;
else
{
struct rb_node *old = node, *left;
node = node->rb_right;
while ((left = node->rb_left) != NULL)
node = left;
child = node->rb_right;
parent = rb_parent(node);
color = rb_color(node);
if (child)
rb_set_parent(child, parent);
if (parent == old) {
parent->rb_right = child;
parent = node;
} else
parent->rb_left = child;
node->rb_parent_color = old->rb_parent_color;
node->rb_right = old->rb_right;
node->rb_left = old->rb_left;
if (rb_parent(old))
{
if (rb_parent(old)->rb_left == old)
rb_parent(old)->rb_left = node;
else
rb_parent(old)->rb_right = node;
} else
root->rb_node = node;
rb_set_parent(old->rb_left, node);
if (old->rb_right)
rb_set_parent(old->rb_right, node);
goto color;
}
parent = rb_parent(node);
color = rb_color(node);
if (child)
rb_set_parent(child, parent);
if (parent)
{
if (parent->rb_left == node)
parent->rb_left = child;
else
parent->rb_right = child;
}
else
root->rb_node = child;
color:
if (color == RB_BLACK)
__rb_erase_color(child, parent, root);
}
EXPORT_SYMBOL(rb_erase);
/*
* This function returns the first node (in sort order) of the tree.
*/
struct rb_node *rb_first(struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n)
return NULL;
while (n->rb_left)
n = n->rb_left;
return n;
}
EXPORT_SYMBOL(rb_first);
struct rb_node *rb_last(struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n)
return NULL;
while (n->rb_right)
n = n->rb_right;
return n;
}
EXPORT_SYMBOL(rb_last);
struct rb_node *rb_next(struct rb_node *node)
{
struct rb_node *parent;
if (rb_parent(node) == node)
return NULL;
/* If we have a right-hand child, go down and then left as far
as we can. */
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
node=node->rb_left;
return node;
}
/* No right-hand children. Everything down and left is
smaller than us, so any 'next' node must be in the general
direction of our parent. Go up the tree; any time the
ancestor is a right-hand child of its parent, keep going
up. First time it's a left-hand child of its parent, said
parent is our 'next' node. */
while ((parent = rb_parent(node)) && node == parent->rb_right)
node = parent;
return parent;
}
EXPORT_SYMBOL(rb_next);
struct rb_node *rb_prev(struct rb_node *node)
{
struct rb_node *parent;
if (rb_parent(node) == node)
return NULL;
/* If we have a left-hand child, go down and then right as far
as we can. */
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
node=node->rb_right;
return node;
}
/* No left-hand children. Go up till we find an ancestor which
is a right-hand child of its parent */
while ((parent = rb_parent(node)) && node == parent->rb_left)
node = parent;
return parent;
}
EXPORT_SYMBOL(rb_prev);
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
struct rb_node *parent = rb_parent(victim);
/* Set the surrounding nodes to point to the replacement */
if (parent) {
if (victim == parent->rb_left)
parent->rb_left = new;
else
parent->rb_right = new;
} else {
root->rb_node = new;
}
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
}
EXPORT_SYMBOL(rb_replace_node);

9
lib/reciprocal_div.c Normal file
View File

@@ -0,0 +1,9 @@
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
u32 reciprocal_value(u32 k)
{
u64 val = (1LL << 32) + (k - 1);
do_div(val, k);
return (u32)val;
}

View File

@@ -0,0 +1,6 @@
#
# This is a modified version of reed solomon lib,
#
obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o

View File

@@ -0,0 +1,272 @@
/*
* lib/reed_solomon/decode_rs.c
*
* Overview:
* Generic Reed Solomon encoder / decoder library
*
* Copyright 2002, Phil Karn, KA9Q
* May be used under the terms of the GNU General Public License (GPL)
*
* Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
*
* $Id: decode_rs.c,v 1.1.1.1 2007/06/12 07:27:13 eyryu Exp $
*
*/
/* Generic data width independent code which is included by the
* wrappers.
*/
{
int deg_lambda, el, deg_omega;
int i, j, r, k, pad;
int nn = rs->nn;
int nroots = rs->nroots;
int fcr = rs->fcr;
int prim = rs->prim;
int iprim = rs->iprim;
uint16_t *alpha_to = rs->alpha_to;
uint16_t *index_of = rs->index_of;
uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
/* Err+Eras Locator poly and syndrome poly The maximum value
* of nroots is 8. So the necessary stack size will be about
* 220 bytes max.
*/
uint16_t lambda[nroots + 1], syn[nroots];
uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1];
uint16_t root[nroots], reg[nroots + 1], loc[nroots];
int count = 0;
uint16_t msk = (uint16_t) rs->nn;
/* Check length parameter for validity */
pad = nn - nroots - len;
if (pad < 0 || pad >= nn)
return -ERANGE;
/* Does the caller provide the syndrome ? */
if (s != NULL)
goto decode;
/* form the syndromes; i.e., evaluate data(x) at roots of
* g(x) */
for (i = 0; i < nroots; i++)
syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk;
for (j = 1; j < len; j++) {
for (i = 0; i < nroots; i++) {
if (syn[i] == 0) {
syn[i] = (((uint16_t) data[j]) ^
invmsk) & msk;
} else {
syn[i] = ((((uint16_t) data[j]) ^
invmsk) & msk) ^
alpha_to[rs_modnn(rs, index_of[syn[i]] +
(fcr + i) * prim)];
}
}
}
for (j = 0; j < nroots; j++) {
for (i = 0; i < nroots; i++) {
if (syn[i] == 0) {
syn[i] = ((uint16_t) par[j]) & msk;
} else {
syn[i] = (((uint16_t) par[j]) & msk) ^
alpha_to[rs_modnn(rs, index_of[syn[i]] +
(fcr+i)*prim)];
}
}
}
s = syn;
/* Convert syndromes to index form, checking for nonzero condition */
syn_error = 0;
for (i = 0; i < nroots; i++) {
syn_error |= s[i];
s[i] = index_of[s[i]];
}
if (!syn_error) {
/* if syndrome is zero, data[] is a codeword and there are no
* errors to correct. So return data[] unmodified
*/
count = 0;
goto finish;
}
decode:
memset(&lambda[1], 0, nroots * sizeof(lambda[0]));
lambda[0] = 1;
if (no_eras > 0) {
/* Init lambda to be the erasure locator polynomial */
lambda[1] = alpha_to[rs_modnn(rs,
prim * (nn - 1 - eras_pos[0]))];
for (i = 1; i < no_eras; i++) {
u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
for (j = i + 1; j > 0; j--) {
tmp = index_of[lambda[j - 1]];
if (tmp != nn) {
lambda[j] ^=
alpha_to[rs_modnn(rs, u + tmp)];
}
}
}
}
for (i = 0; i < nroots + 1; i++)
b[i] = index_of[lambda[i]];
/*
* Begin Berlekamp-Massey algorithm to determine error+erasure
* locator polynomial
*/
r = no_eras;
el = no_eras;
while (++r <= nroots) { /* r is the step number */
/* Compute discrepancy at the r-th step in poly-form */
discr_r = 0;
for (i = 0; i < r; i++) {
if ((lambda[i] != 0) && (s[r - i - 1] != nn)) {
discr_r ^=
alpha_to[rs_modnn(rs,
index_of[lambda[i]] +
s[r - i - 1])];
}
}
discr_r = index_of[discr_r]; /* Index form */
if (discr_r == nn) {
/* 2 lines below: B(x) <-- x*B(x) */
memmove (&b[1], b, nroots * sizeof (b[0]));
b[0] = nn;
} else {
/* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */
t[0] = lambda[0];
for (i = 0; i < nroots; i++) {
if (b[i] != nn) {
t[i + 1] = lambda[i + 1] ^
alpha_to[rs_modnn(rs, discr_r +
b[i])];
} else
t[i + 1] = lambda[i + 1];
}
if (2 * el <= r + no_eras - 1) {
el = r + no_eras - el;
/*
* 2 lines below: B(x) <-- inv(discr_r) *
* lambda(x)
*/
for (i = 0; i <= nroots; i++) {
b[i] = (lambda[i] == 0) ? nn :
rs_modnn(rs, index_of[lambda[i]]
- discr_r + nn);
}
} else {
/* 2 lines below: B(x) <-- x*B(x) */
memmove(&b[1], b, nroots * sizeof(b[0]));
b[0] = nn;
}
memcpy(lambda, t, (nroots + 1) * sizeof(t[0]));
}
}
/* Convert lambda to index form and compute deg(lambda(x)) */
deg_lambda = 0;
for (i = 0; i < nroots + 1; i++) {
lambda[i] = index_of[lambda[i]];
if (lambda[i] != nn)
deg_lambda = i;
}
/* Find roots of error+erasure locator polynomial by Chien search */
memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
count = 0; /* Number of roots of lambda(x) */
for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) {
q = 1; /* lambda[0] is always 0 */
for (j = deg_lambda; j > 0; j--) {
if (reg[j] != nn) {
reg[j] = rs_modnn(rs, reg[j] + j);
q ^= alpha_to[reg[j]];
}
}
if (q != 0)
continue; /* Not a root */
/* store root (index-form) and error location number */
root[count] = i;
loc[count] = k;
/* If we've already found max possible roots,
* abort the search to save time
*/
if (++count == deg_lambda)
break;
}
if (deg_lambda != count) {
/*
* deg(lambda) unequal to number of roots => uncorrectable
* error detected
*/
count = -1;
goto finish;
}
/*
* Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
* x**nroots). in index form. Also find deg(omega).
*/
deg_omega = deg_lambda - 1;
for (i = 0; i <= deg_omega; i++) {
tmp = 0;
for (j = i; j >= 0; j--) {
if ((s[i - j] != nn) && (lambda[j] != nn))
tmp ^=
alpha_to[rs_modnn(rs, s[i - j] + lambda[j])];
}
omega[i] = index_of[tmp];
}
/*
* Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
* inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
*/
for (j = count - 1; j >= 0; j--) {
num1 = 0;
for (i = deg_omega; i >= 0; i--) {
if (omega[i] != nn)
num1 ^= alpha_to[rs_modnn(rs, omega[i] +
i * root[j])];
}
num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
den = 0;
/* lambda[i+1] for i even is the formal derivative
* lambda_pr of lambda[i] */
for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) {
if (lambda[i + 1] != nn) {
den ^= alpha_to[rs_modnn(rs, lambda[i + 1] +
i * root[j])];
}
}
/* Apply error to data */
if (num1 != 0 && loc[j] >= pad) {
uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] +
index_of[num2] +
nn - index_of[den])];
/* Store the error correction pattern, if a
* correction buffer is available */
if (corr) {
corr[j] = cor;
} else {
/* If a data buffer is given and the
* error is inside the message,
* correct it */
if (data && (loc[j] < (nn - nroots)))
data[loc[j] - pad] ^= cor;
}
}
}
finish:
if (eras_pos != NULL) {
for (i = 0; i < count; i++)
eras_pos[i] = loc[i] - pad;
}
return count;
}

View File

@@ -0,0 +1,54 @@
/*
* lib/reed_solomon/encode_rs.c
*
* Overview:
* Generic Reed Solomon encoder / decoder library
*
* Copyright 2002, Phil Karn, KA9Q
* May be used under the terms of the GNU General Public License (GPL)
*
* Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
*
* $Id: encode_rs.c,v 1.1.1.1 2007/06/12 07:27:13 eyryu Exp $
*
*/
/* Generic data width independent code which is included by the
* wrappers.
* int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par)
*/
{
int i, j, pad;
int nn = rs->nn;
int nroots = rs->nroots;
uint16_t *alpha_to = rs->alpha_to;
uint16_t *index_of = rs->index_of;
uint16_t *genpoly = rs->genpoly;
uint16_t fb;
uint16_t msk = (uint16_t) rs->nn;
/* Check length parameter for validity */
pad = nn - nroots - len;
if (pad < 0 || pad >= nn)
return -ERANGE;
for (i = 0; i < len; i++) {
fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]];
/* feedback term is non-zero */
if (fb != nn) {
for (j = 1; j < nroots; j++) {
par[j] ^= alpha_to[rs_modnn(rs, fb +
genpoly[nroots - j])];
}
}
/* Shift */
memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1));
if (fb != nn) {
par[nroots - 1] = alpha_to[rs_modnn(rs,
fb + genpoly[0])];
} else {
par[nroots - 1] = 0;
}
}
return 0;
}

View File

@@ -0,0 +1,329 @@
/*
* lib/reed_solomon/reed_solomon.c
*
* Overview:
* Generic Reed Solomon encoder / decoder library
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* Reed Solomon code lifted from reed solomon library written by Phil Karn
* Copyright 2002 Phil Karn, KA9Q
*
* $Id: reed_solomon.c,v 1.1.1.1 2007/06/12 07:27:13 eyryu Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Description:
*
* The generic Reed Solomon library provides runtime configurable
* encoding / decoding of RS codes.
* Each user must call init_rs to get a pointer to a rs_control
* structure for the given rs parameters. This structure is either
* generated or a already available matching control structure is used.
* If a structure is generated then the polynomial arrays for
* fast encoding / decoding are built. This can take some time so
* make sure not to call this function from a time critical path.
* Usually a module / driver should initialize the necessary
* rs_control structure on module / driver init and release it
* on exit.
* The encoding puts the calculated syndrome into a given syndrome
* buffer.
* The decoding is a two step process. The first step calculates
* the syndrome over the received (data + syndrome) and calls the
* second stage, which does the decoding / error correction itself.
* Many hw encoders provide a syndrome calculation over the received
* data + syndrome and can call the second stage directly.
*
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rslib.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <asm/semaphore.h>
/* This list holds all currently allocated rs control structures */
static LIST_HEAD (rslist);
/* Protection for the list */
static DEFINE_MUTEX(rslistlock);
/**
* rs_init - Initialize a Reed-Solomon codec
* @symsize: symbol size, bits (1-8)
* @gfpoly: Field generator polynomial coefficients
* @fcr: first root of RS code generator polynomial, index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
*
* Allocate a control structure and the polynom arrays for faster
* en/decoding. Fill the arrays according to the given parameters.
*/
static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
int prim, int nroots)
{
struct rs_control *rs;
int i, j, sr, root, iprim;
/* Allocate the control structure */
rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL);
if (rs == NULL)
return NULL;
INIT_LIST_HEAD(&rs->list);
rs->mm = symsize;
rs->nn = (1 << symsize) - 1;
rs->fcr = fcr;
rs->prim = prim;
rs->nroots = nroots;
rs->gfpoly = gfpoly;
/* Allocate the arrays */
rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
if (rs->alpha_to == NULL)
goto errrs;
rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
if (rs->index_of == NULL)
goto erralp;
rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL);
if(rs->genpoly == NULL)
goto erridx;
/* Generate Galois field lookup tables */
rs->index_of[0] = rs->nn; /* log(zero) = -inf */
rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */
sr = 1;
for (i = 0; i < rs->nn; i++) {
rs->index_of[sr] = i;
rs->alpha_to[i] = sr;
sr <<= 1;
if (sr & (1 << symsize))
sr ^= gfpoly;
sr &= rs->nn;
}
/* If it's not primitive, exit */
if(sr != 1)
goto errpol;
/* Find prim-th root of 1, used in decoding */
for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn);
/* prim-th root of 1, index form */
rs->iprim = iprim / prim;
/* Form RS code generator polynomial from its roots */
rs->genpoly[0] = 1;
for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) {
rs->genpoly[i + 1] = 1;
/* Multiply rs->genpoly[] by @**(root + x) */
for (j = i; j > 0; j--) {
if (rs->genpoly[j] != 0) {
rs->genpoly[j] = rs->genpoly[j -1] ^
rs->alpha_to[rs_modnn(rs,
rs->index_of[rs->genpoly[j]] + root)];
} else
rs->genpoly[j] = rs->genpoly[j - 1];
}
/* rs->genpoly[0] can never be zero */
rs->genpoly[0] =
rs->alpha_to[rs_modnn(rs,
rs->index_of[rs->genpoly[0]] + root)];
}
/* convert rs->genpoly[] to index form for quicker encoding */
for (i = 0; i <= nroots; i++)
rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
return rs;
/* Error exit */
errpol:
kfree(rs->genpoly);
erridx:
kfree(rs->index_of);
erralp:
kfree(rs->alpha_to);
errrs:
kfree(rs);
return NULL;
}
/**
* free_rs - Free the rs control structure, if it is no longer used
* @rs: the control structure which is not longer used by the
* caller
*/
void free_rs(struct rs_control *rs)
{
mutex_lock(&rslistlock);
rs->users--;
if(!rs->users) {
list_del(&rs->list);
kfree(rs->alpha_to);
kfree(rs->index_of);
kfree(rs->genpoly);
kfree(rs);
}
mutex_unlock(&rslistlock);
}
/**
* init_rs - Find a matching or allocate a new rs control structure
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
* must be primitive;
* @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
*/
struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
int nroots)
{
struct list_head *tmp;
struct rs_control *rs;
/* Sanity checks */
if (symsize < 1)
return NULL;
if (fcr < 0 || fcr >= (1<<symsize))
return NULL;
if (prim <= 0 || prim >= (1<<symsize))
return NULL;
if (nroots < 0 || nroots >= (1<<symsize))
return NULL;
mutex_lock(&rslistlock);
/* Walk through the list and look for a matching entry */
list_for_each(tmp, &rslist) {
rs = list_entry(tmp, struct rs_control, list);
if (symsize != rs->mm)
continue;
if (gfpoly != rs->gfpoly)
continue;
if (fcr != rs->fcr)
continue;
if (prim != rs->prim)
continue;
if (nroots != rs->nroots)
continue;
/* We have a matching one already */
rs->users++;
goto out;
}
/* Create a new one */
rs = rs_init(symsize, gfpoly, fcr, prim, nroots);
if (rs) {
rs->users = 1;
list_add(&rs->list, &rslist);
}
out:
mutex_unlock(&rslistlock);
return rs;
}
#ifdef CONFIG_REED_SOLOMON_ENC8
/**
* encode_rs8 - Calculate the parity for data values (8bit data width)
* @rs: the rs control structure
* @data: data field of a given type
* @len: data length
* @par: parity data, must be initialized by caller (usually all 0)
* @invmsk: invert data mask (will be xored on data)
*
* The parity uses a uint16_t data type to enable
* symbol size > 8. The calling code must take care of encoding of the
* syndrome result for storage itself.
*/
int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
uint16_t invmsk)
{
#include "encode_rs.c"
}
EXPORT_SYMBOL_GPL(encode_rs8);
#endif
#ifdef CONFIG_REED_SOLOMON_DEC8
/**
* decode_rs8 - Decode codeword (8bit data width)
* @rs: the rs control structure
* @data: data field of a given type
* @par: received parity data field
* @len: data length
* @s: syndrome data field (if NULL, syndrome is calculated)
* @no_eras: number of erasures
* @eras_pos: position of erasures, can be NULL
* @invmsk: invert data mask (will be xored on data, not on parity!)
* @corr: buffer to store correction bitmask on eras_pos
*
* The syndrome and parity uses a uint16_t data type to enable
* symbol size > 8. The calling code must take care of decoding of the
* syndrome result and the received parity before calling this code.
*/
int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
uint16_t *corr)
{
#include "decode_rs.c"
}
EXPORT_SYMBOL_GPL(decode_rs8);
#endif
#ifdef CONFIG_REED_SOLOMON_ENC16
/**
* encode_rs16 - Calculate the parity for data values (16bit data width)
* @rs: the rs control structure
* @data: data field of a given type
* @len: data length
* @par: parity data, must be initialized by caller (usually all 0)
* @invmsk: invert data mask (will be xored on data, not on parity!)
*
* Each field in the data array contains up to symbol size bits of valid data.
*/
int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par,
uint16_t invmsk)
{
#include "encode_rs.c"
}
EXPORT_SYMBOL_GPL(encode_rs16);
#endif
#ifdef CONFIG_REED_SOLOMON_DEC16
/**
* decode_rs16 - Decode codeword (16bit data width)
* @rs: the rs control structure
* @data: data field of a given type
* @par: received parity data field
* @len: data length
* @s: syndrome data field (if NULL, syndrome is calculated)
* @no_eras: number of erasures
* @eras_pos: position of erasures, can be NULL
* @invmsk: invert data mask (will be xored on data, not on parity!)
* @corr: buffer to store correction bitmask on eras_pos
*
* Each field in the data array contains up to symbol size bits of valid data.
*/
int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
uint16_t *corr)
{
#include "decode_rs.c"
}
EXPORT_SYMBOL_GPL(decode_rs16);
#endif
EXPORT_SYMBOL_GPL(init_rs);
EXPORT_SYMBOL_GPL(free_rs);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Reed Solomon encoder/decoder");
MODULE_AUTHOR("Phil Karn, Thomas Gleixner");

316
lib/rwsem-spinlock.c Normal file
View File

@@ -0,0 +1,316 @@
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
* generic spinlock implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
* - Derived also from comments by Linus
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/module.h>
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
/*
* initialise the semaphore
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here, then:
* - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
int woken;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!wakewrite) {
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out;
goto dont_wake_writers;
}
/* if we are allowed to wake writers try to grant a single write lock
* if there's a writer at the front of the queue
* - we leave the 'waiting count' incremented to signify potential
* contention
*/
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
sem->activity = -1;
list_del(&waiter->list);
tsk = waiter->task;
/* Don't touch waiter after ->task has been NULLed */
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
goto out;
}
/* grant an infinite number of read locks to the front of the queue */
dont_wake_writers:
woken = 0;
while (waiter->flags & RWSEM_WAITING_FOR_READ) {
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
tsk = waiter->task;
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
if (list_empty(&sem->wait_list))
break;
waiter = list_entry(next, struct rwsem_waiter, list);
}
sem->activity += woken;
out:
return sem;
}
/*
* wake a single writer
*/
static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
sem->activity = -1;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
list_del(&waiter->list);
tsk = waiter->task;
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
return sem;
}
/*
* get a read lock on the semaphore
*/
void fastcall __sched __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
spin_lock_irq(&sem->wait_lock);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
spin_unlock_irq(&sem->wait_lock);
goto out;
}
tsk = current;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
if (!waiter.task)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
out:
;
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
ret = 1;
}
spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
/*
* get a write lock on the semaphore
* - we increment the waiting count anyway to indicate an exclusive lock
*/
void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
spin_lock_irq(&sem->wait_lock);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
spin_unlock_irq(&sem->wait_lock);
goto out;
}
tsk = current;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
if (!waiter.task)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
out:
;
}
void fastcall __sched __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
ret = 1;
}
spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
/*
* release a read lock on the semaphore
*/
void fastcall __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
* release a write lock on the semaphore
*/
void fastcall __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
void fastcall __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
EXPORT_SYMBOL(__init_rwsem);
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write_nested);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);

257
lib/rwsem.c Normal file
View File

@@ -0,0 +1,257 @@
/* rwsem.c: R/W semaphores: contention handling functions
*
* Written by David Howells (dhowells@redhat.com).
* Derived from arch/i386/kernel/semaphore.c
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
/*
* Initialize an rwsem:
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_rwsem);
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then:
* - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
* - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
* - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
*/
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
struct list_head *next;
signed long oldcount, woken, loop;
if (downgrading)
goto dont_wake_writers;
/* if we came through an up_xxxx() call, we only only wake someone up
* if we can transition the active part of the count from 0 -> 1
*/
try_again:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK)
goto undo;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
/* try to grant a single write lock if there's a writer at the front
* of the queue - note we leave the 'active part' of the count
* incremented by 1 and the waiting part incremented by 0x00010000
*/
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only;
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is an allocated on the waiter's stack and may become invalid at
* any time after that point (due to a wakeup from another source).
*/
list_del(&waiter->list);
tsk = waiter->task;
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
goto out;
/* don't want to wake any writers */
dont_wake_writers:
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out;
/* grant an infinite number of read locks to the readers at the front
* of the queue
* - note we increment the 'active part' of the count by the number of
* readers before waking any processes up
*/
readers_only:
woken = 0;
do {
woken++;
if (waiter->list.next == &sem->wait_list)
break;
waiter = list_entry(waiter->list.next,
struct rwsem_waiter, list);
} while (waiter->flags & RWSEM_WAITING_FOR_READ);
loop = woken;
woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
if (!downgrading)
/* we'd already done one increment earlier */
woken -= RWSEM_ACTIVE_BIAS;
rwsem_atomic_add(woken, sem);
next = sem->wait_list.next;
for (; loop > 0; loop--) {
waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next;
tsk = waiter->task;
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
}
sem->wait_list.next = next;
next->prev = &sem->wait_list;
out:
return sem;
/* undo the change to count, but check for a transition 1->0 */
undo:
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
goto out;
goto try_again;
}
/*
* wait for a lock to be granted
*/
static struct rw_semaphore *
rwsem_down_failed_common(struct rw_semaphore *sem,
struct rwsem_waiter *waiter, signed long adjustment)
{
struct task_struct *tsk = current;
signed long count;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
spin_lock_irq(&sem->wait_lock);
waiter->task = tsk;
get_task_struct(tsk);
list_add_tail(&waiter->list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively read-locking */
count = rwsem_atomic_update(adjustment, sem);
/* if there are no active locks, wake the front queued process(es) up */
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem, 0);
spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
if (!waiter->task)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
return sem;
}
/*
* wait for the read lock to be granted
*/
struct rw_semaphore fastcall __sched *
rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
waiter.flags = RWSEM_WAITING_FOR_READ;
rwsem_down_failed_common(sem, &waiter,
RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
return sem;
}
/*
* wait for the write lock to be granted
*/
struct rw_semaphore fastcall __sched *
rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
return sem;
}
/*
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
/*
* downgrade a write lock into a read lock
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
EXPORT_SYMBOL(rwsem_down_read_failed);
EXPORT_SYMBOL(rwsem_down_write_failed);
EXPORT_SYMBOL(rwsem_wake);
EXPORT_SYMBOL(rwsem_downgrade_wake);

176
lib/semaphore-sleepers.c Normal file
View File

@@ -0,0 +1,176 @@
/*
* i386 and x86-64 semaphore implementation.
*
* (C) Copyright 1999 Linus Torvalds
*
* Portions Copyright 1999 Red Hat, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
*/
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
fastcall void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
fastcall void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
}
fastcall int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* wait_queue_head. The "-1" is because we're
* still hoping to get the semaphore.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
return retval;
}
/*
* Trylock failed - make sure we correct for
* having decremented the count.
*
* We could have done the trylock with a
* single "cmpxchg" without failure cases,
* but then it wouldn't work on a 386.
*/
fastcall int __down_trylock(struct semaphore * sem)
{
int sleepers;
unsigned long flags;
spin_lock_irqsave(&sem->wait.lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock in the
* wait_queue_head.
*/
if (!atomic_add_negative(sleepers, &sem->count)) {
wake_up_locked(&sem->wait);
}
spin_unlock_irqrestore(&sem->wait.lock, flags);
return 1;
}

95
lib/sha1.c Normal file
View File

@@ -0,0 +1,95 @@
/*
* SHA transform algorithm, originally taken from code written by
* Peter Gutmann, and placed in the public domain.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cryptohash.h>
/* The SHA f()-functions. */
#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define f2(x,y,z) (x ^ y ^ z) /* XOR */
#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */
/* The SHA Mysterious Constants */
#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */
#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */
#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
/**
* sha_transform - single block SHA1 transform
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @W: 80 words of workspace (see note)
*
* This function generates a SHA1 digest for a single 512-bit block.
* Be warned, it does not handle padding and message digest, do not
* confuse it with the full FIPS 180-1 digest algorithm for variable
* length messages.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
void sha_transform(__u32 *digest, const char *in, __u32 *W)
{
__u32 a, b, c, d, e, t, i;
for (i = 0; i < 16; i++)
W[i] = be32_to_cpu(((const __be32 *)in)[i]);
for (i = 0; i < 64; i++)
W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1);
a = digest[0];
b = digest[1];
c = digest[2];
d = digest[3];
e = digest[4];
for (i = 0; i < 20; i++) {
t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i];
e = d; d = c; c = rol32(b, 30); b = a; a = t;
}
for (; i < 40; i ++) {
t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i];
e = d; d = c; c = rol32(b, 30); b = a; a = t;
}
for (; i < 60; i ++) {
t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i];
e = d; d = c; c = rol32(b, 30); b = a; a = t;
}
for (; i < 80; i ++) {
t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i];
e = d; d = c; c = rol32(b, 30); b = a; a = t;
}
digest[0] += a;
digest[1] += b;
digest[2] += c;
digest[3] += d;
digest[4] += e;
}
EXPORT_SYMBOL(sha_transform);
/**
* sha_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
void sha_init(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}

56
lib/smp_processor_id.c Normal file
View File

@@ -0,0 +1,56 @@
/*
* lib/smp_processor_id.c
*
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/sched.h>
unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
cpumask_t this_mask;
if (likely(preempt_count))
goto out;
if (irqs_disabled())
goto out;
/*
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
this_mask = cpumask_of_cpu(this_cpu);
if (cpus_equal(current->cpus_allowed, this_mask))
goto out;
/*
* It is valid to assume CPU-locality during early bootup:
*/
if (system_state != SYSTEM_RUNNING)
goto out;
/*
* Avoid recursion:
*/
preempt_disable();
if (!printk_ratelimit())
goto out_enable;
printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
out_enable:
preempt_enable_no_resched();
out:
return this_cpu;
}
EXPORT_SYMBOL(debug_smp_processor_id);

121
lib/sort.c Normal file
View File

@@ -0,0 +1,121 @@
/*
* A fast, small, non-recursive O(nlog n) sort for the Linux kernel
*
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/slab.h>
static void u32_swap(void *a, void *b, int size)
{
u32 t = *(u32 *)a;
*(u32 *)a = *(u32 *)b;
*(u32 *)b = t;
}
static void generic_swap(void *a, void *b, int size)
{
char t;
do {
t = *(char *)a;
*(char *)a++ = *(char *)b;
*(char *)b++ = t;
} while (--size > 0);
}
/**
* sort - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp: pointer to comparison function
* @swap: pointer to swap function or NULL
*
* This function does a heapsort on the given array. You may provide a
* swap function optimized to your element type.
*
* Sorting time is O(n log n) both on average and worst-case. While
* qsort is about 20% faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
void sort(void *base, size_t num, size_t size,
int (*cmp)(const void *, const void *),
void (*swap)(void *, void *, int size))
{
/* pre-scale counters for performance */
int i = (num/2 - 1) * size, n = num * size, c, r;
if (!swap)
swap = (size == 4 ? u32_swap : generic_swap);
/* heapify */
for ( ; i >= 0; i -= size) {
for (r = i; r * 2 + size < n; r = c) {
c = r * 2 + size;
if (c < n - size && cmp(base + c, base + c + size) < 0)
c += size;
if (cmp(base + r, base + c) >= 0)
break;
swap(base + r, base + c, size);
}
}
/* sort */
for (i = n - size; i >= 0; i -= size) {
swap(base, base + i, size);
for (r = 0; r * 2 + size < i; r = c) {
c = r * 2 + size;
if (c < i - size && cmp(base + c, base + c + size) < 0)
c += size;
if (cmp(base + r, base + c) >= 0)
break;
swap(base + r, base + c, size);
}
}
}
EXPORT_SYMBOL(sort);
#if 0
/* a simple boot-time regression test */
int cmpint(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static int sort_test(void)
{
int *a, i, r = 1;
a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
BUG_ON(!a);
printk("testing sort()\n");
for (i = 0; i < 1000; i++) {
r = (r * 725861) % 6599;
a[i] = r;
}
sort(a, 1000, sizeof(int), cmpint, NULL);
for (i = 0; i < 999; i++)
if (a[i] > a[i+1]) {
printk("sort() failed!\n");
break;
}
kfree(a);
return 0;
}
module_init(sort_test);
#endif

297
lib/spinlock_debug.c Normal file
View File

@@ -0,0 +1,297 @@
/*
* Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*
* This file contains the spinlock/rwlock implementations for
* DEBUG_SPINLOCK.
*/
#include <linux/spinlock.h>
#include <linux/nmi.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/module.h>
void __spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__rwlock_init);
static void spin_bug(spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
if (!debug_locks_off())
return;
if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
owner = lock->owner;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, current->pid);
printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
owner ? owner->pid : -1,
lock->owner_cpu);
dump_stack();
}
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
debug_spin_lock_before(spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_spin_unlock(spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
static void __spin_lock_debug(spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (__raw_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
#endif
}
}
}
void _raw_spin_lock(spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
int _raw_spin_trylock(spinlock_t *lock)
{
int ret = __raw_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void _raw_spin_unlock(spinlock_t *lock)
{
debug_spin_unlock(lock);
__raw_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
return;
printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
msg, raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (__raw_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
}
}
#endif
void _raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
__raw_read_lock(&lock->raw_lock);
}
int _raw_read_trylock(rwlock_t *lock)
{
int ret = __raw_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void _raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
__raw_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_write_lock_after(rwlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_write_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (__raw_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
}
}
#endif
void _raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
__raw_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
int _raw_write_trylock(rwlock_t *lock)
{
int ret = __raw_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void _raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
__raw_write_unlock(&lock->raw_lock);
}

634
lib/string.c Normal file
View File

@@ -0,0 +1,634 @@
/*
* linux/lib/string.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* stupid library routines.. The optimized versions should generally be found
* as inline code in <asm-xx/string.h>
*
* These are buggy as well..
*
* * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de>
* - Added strsep() which will replace strtok() soon (because strsep() is
* reentrant and should be faster). Use only strsep() in new code, please.
*
* * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>,
* Matthew Hawkins <matt@mh.dropbear.id.au>
* - Kissed strtok() goodbye
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/module.h>
#ifndef __HAVE_ARCH_STRNICMP
/**
* strnicmp - Case insensitive, length-limited string comparison
* @s1: One string
* @s2: The other string
* @len: the maximum number of characters to compare
*/
int strnicmp(const char *s1, const char *s2, size_t len)
{
/* Yes, Virginia, it had better be unsigned */
unsigned char c1, c2;
c1 = c2 = 0;
if (len) {
do {
c1 = *s1;
c2 = *s2;
s1++;
s2++;
if (!c1)
break;
if (!c2)
break;
if (c1 == c2)
continue;
c1 = tolower(c1);
c2 = tolower(c2);
if (c1 != c2)
break;
} while (--len);
}
return (int)c1 - (int)c2;
}
EXPORT_SYMBOL(strnicmp);
#endif
#ifndef __HAVE_ARCH_STRCPY
/**
* strcpy - Copy a %NUL terminated string
* @dest: Where to copy the string to
* @src: Where to copy the string from
*/
#undef strcpy
char *strcpy(char *dest, const char *src)
{
char *tmp = dest;
while ((*dest++ = *src++) != '\0')
/* nothing */;
return tmp;
}
EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
/**
* strncpy - Copy a length-limited, %NUL-terminated string
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: The maximum number of bytes to copy
*
* The result is not %NUL-terminated if the source exceeds
* @count bytes.
*
* In the case where the length of @src is less than that of
* count, the remainder of @dest will be padded with %NUL.
*
*/
char *strncpy(char *dest, const char *src, size_t count)
{
char *tmp = dest;
while (count) {
if ((*tmp = *src) != 0)
src++;
tmp++;
count--;
}
return dest;
}
EXPORT_SYMBOL(strncpy);
#endif
#ifndef __HAVE_ARCH_STRLCPY
/**
* strlcpy - Copy a %NUL terminated string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
*
* Compatible with *BSD: the result is always a valid
* NUL-terminated string that fits in the buffer (unless,
* of course, the buffer size is zero). It does not pad
* out the result like strncpy() does.
*/
size_t strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
memcpy(dest, src, len);
dest[len] = '\0';
}
return ret;
}
EXPORT_SYMBOL(strlcpy);
#endif
#ifndef __HAVE_ARCH_STRCAT
/**
* strcat - Append one %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
*/
#undef strcat
char *strcat(char *dest, const char *src)
{
char *tmp = dest;
while (*dest)
dest++;
while ((*dest++ = *src++) != '\0')
;
return tmp;
}
EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
/**
* strncat - Append a length-limited, %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The maximum numbers of bytes to copy
*
* Note that in contrast to strncpy(), strncat() ensures the result is
* terminated.
*/
char *strncat(char *dest, const char *src, size_t count)
{
char *tmp = dest;
if (count) {
while (*dest)
dest++;
while ((*dest++ = *src++) != 0) {
if (--count == 0) {
*dest = '\0';
break;
}
}
}
return tmp;
}
EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
/**
* strlcat - Append a length-limited, %NUL-terminated string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The size of the destination buffer.
*/
size_t strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = strlen(dest);
size_t len = strlen(src);
size_t res = dsize + len;
/* This would be a bug */
BUG_ON(dsize >= count);
dest += dsize;
count -= dsize;
if (len >= count)
len = count-1;
memcpy(dest, src, len);
dest[len] = 0;
return res;
}
EXPORT_SYMBOL(strlcat);
#endif
#ifndef __HAVE_ARCH_STRCMP
/**
* strcmp - Compare two strings
* @cs: One string
* @ct: Another string
*/
#undef strcmp
int strcmp(const char *cs, const char *ct)
{
signed char __res;
while (1) {
if ((__res = *cs - *ct++) != 0 || !*cs++)
break;
}
return __res;
}
EXPORT_SYMBOL(strcmp);
#endif
#ifndef __HAVE_ARCH_STRNCMP
/**
* strncmp - Compare two length-limited strings
* @cs: One string
* @ct: Another string
* @count: The maximum number of bytes to compare
*/
int strncmp(const char *cs, const char *ct, size_t count)
{
signed char __res = 0;
while (count) {
if ((__res = *cs - *ct++) != 0 || !*cs++)
break;
count--;
}
return __res;
}
EXPORT_SYMBOL(strncmp);
#endif
#ifndef __HAVE_ARCH_STRCHR
/**
* strchr - Find the first occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
*/
char *strchr(const char *s, int c)
{
for (; *s != (char)c; ++s)
if (*s == '\0')
return NULL;
return (char *)s;
}
EXPORT_SYMBOL(strchr);
#endif
#ifndef __HAVE_ARCH_STRRCHR
/**
* strrchr - Find the last occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
*/
char *strrchr(const char *s, int c)
{
const char *p = s + strlen(s);
do {
if (*p == (char)c)
return (char *)p;
} while (--p >= s);
return NULL;
}
EXPORT_SYMBOL(strrchr);
#endif
#ifndef __HAVE_ARCH_STRNCHR
/**
* strnchr - Find a character in a length limited string
* @s: The string to be searched
* @count: The number of characters to be searched
* @c: The character to search for
*/
char *strnchr(const char *s, size_t count, int c)
{
for (; count-- && *s != '\0'; ++s)
if (*s == (char)c)
return (char *)s;
return NULL;
}
EXPORT_SYMBOL(strnchr);
#endif
/**
* strstrip - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
char *strstrip(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
while (*s && isspace(*s))
s++;
return s;
}
EXPORT_SYMBOL(strstrip);
#ifndef __HAVE_ARCH_STRLEN
/**
* strlen - Find the length of a string
* @s: The string to be sized
*/
size_t strlen(const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
/**
* strnlen - Find the length of a length-limited string
* @s: The string to be sized
* @count: The maximum number of bytes to search
*/
size_t strnlen(const char *s, size_t count)
{
const char *sc;
for (sc = s; count-- && *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
EXPORT_SYMBOL(strnlen);
#endif
#ifndef __HAVE_ARCH_STRSPN
/**
* strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
* @s: The string to be searched
* @accept: The string to search for
*/
size_t strspn(const char *s, const char *accept)
{
const char *p;
const char *a;
size_t count = 0;
for (p = s; *p != '\0'; ++p) {
for (a = accept; *a != '\0'; ++a) {
if (*p == *a)
break;
}
if (*a == '\0')
return count;
++count;
}
return count;
}
EXPORT_SYMBOL(strspn);
#endif
#ifndef __HAVE_ARCH_STRCSPN
/**
* strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
* @s: The string to be searched
* @reject: The string to avoid
*/
size_t strcspn(const char *s, const char *reject)
{
const char *p;
const char *r;
size_t count = 0;
for (p = s; *p != '\0'; ++p) {
for (r = reject; *r != '\0'; ++r) {
if (*p == *r)
return count;
}
++count;
}
return count;
}
EXPORT_SYMBOL(strcspn);
#endif
#ifndef __HAVE_ARCH_STRPBRK
/**
* strpbrk - Find the first occurrence of a set of characters
* @cs: The string to be searched
* @ct: The characters to search for
*/
char *strpbrk(const char *cs, const char *ct)
{
const char *sc1, *sc2;
for (sc1 = cs; *sc1 != '\0'; ++sc1) {
for (sc2 = ct; *sc2 != '\0'; ++sc2) {
if (*sc1 == *sc2)
return (char *)sc1;
}
}
return NULL;
}
EXPORT_SYMBOL(strpbrk);
#endif
#ifndef __HAVE_ARCH_STRSEP
/**
* strsep - Split a string into tokens
* @s: The string to be searched
* @ct: The characters to search for
*
* strsep() updates @s to point after the token, ready for the next call.
*
* It returns empty tokens, too, behaving exactly like the libc function
* of that name. In fact, it was stolen from glibc2 and de-fancy-fied.
* Same semantics, slimmer shape. ;)
*/
char *strsep(char **s, const char *ct)
{
char *sbegin = *s;
char *end;
if (sbegin == NULL)
return NULL;
end = strpbrk(sbegin, ct);
if (end)
*end++ = '\0';
*s = end;
return sbegin;
}
EXPORT_SYMBOL(strsep);
#endif
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
* @s: Pointer to the start of the area.
* @c: The byte to fill the area with
* @count: The size of the area.
*
* Do not use memset() to access IO space, use memset_io() instead.
*/
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
EXPORT_SYMBOL(memset);
#endif
#ifndef __HAVE_ARCH_MEMCPY
/**
* memcpy - Copy one area of memory to another
* @dest: Where to copy to
* @src: Where to copy from
* @count: The size of the area.
*
* You should not use this function to access IO space, use memcpy_toio()
* or memcpy_fromio() instead.
*/
void *memcpy(void *dest, const void *src, size_t count)
{
char *tmp = dest;
const char *s = src;
while (count--)
*tmp++ = *s++;
return dest;
}
EXPORT_SYMBOL(memcpy);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
/**
* memmove - Copy one area of memory to another
* @dest: Where to copy to
* @src: Where to copy from
* @count: The size of the area.
*
* Unlike memcpy(), memmove() copes with overlapping areas.
*/
void *memmove(void *dest, const void *src, size_t count)
{
char *tmp;
const char *s;
if (dest <= src) {
tmp = dest;
s = src;
while (count--)
*tmp++ = *s++;
} else {
tmp = dest;
tmp += count;
s = src;
s += count;
while (count--)
*--tmp = *--s;
}
return dest;
}
EXPORT_SYMBOL(memmove);
#endif
#ifndef __HAVE_ARCH_MEMCMP
/**
* memcmp - Compare two areas of memory
* @cs: One area of memory
* @ct: Another area of memory
* @count: The size of the area.
*/
#undef memcmp
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
if ((res = *su1 - *su2) != 0)
break;
return res;
}
EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.
* @addr: The memory area
* @c: The byte to search for
* @size: The size of the area.
*
* returns the address of the first occurrence of @c, or 1 byte past
* the area if @c is not found
*/
void *memscan(void *addr, int c, size_t size)
{
unsigned char *p = addr;
while (size) {
if (*p == c)
return (void *)p;
p++;
size--;
}
return (void *)p;
}
EXPORT_SYMBOL(memscan);
#endif
#ifndef __HAVE_ARCH_STRSTR
/**
* strstr - Find the first substring in a %NUL terminated string
* @s1: The string to be searched
* @s2: The string to search for
*/
char *strstr(const char *s1, const char *s2)
{
int l1, l2;
l2 = strlen(s2);
if (!l2)
return (char *)s1;
l1 = strlen(s1);
while (l1 >= l2) {
l1--;
if (!memcmp(s1, s2, l2))
return (char *)s1;
s1++;
}
return NULL;
}
EXPORT_SYMBOL(strstr);
#endif
#ifndef __HAVE_ARCH_MEMCHR
/**
* memchr - Find a character in an area of memory.
* @s: The memory area
* @c: The byte to search for
* @n: The size of the area.
*
* returns the address of the first occurrence of @c, or %NULL
* if @c is not found
*/
void *memchr(const void *s, int c, size_t n)
{
const unsigned char *p = s;
while (n-- != 0) {
if ((unsigned char)c == *p++) {
return (void *)(p - 1);
}
}
return NULL;
}
EXPORT_SYMBOL(memchr);
#endif

795
lib/swiotlb.c Normal file
View File

@@ -0,0 +1,795 @@
/*
* Dynamic DMA mapping support.
*
* This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
*/
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
* The complexity of {map,unmap}_single is linearly dependent on this value.
*/
#define IO_TLB_SEGSIZE 128
/*
* log of the size of each IO TLB slab. The number of slabs is command line
* controllable.
*/
#define IO_TLB_SHIFT 11
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with. Systems with mainly
* 64bit capable cards will only lightly use the swiotlb. If we can't
* allocate a contiguous 1MB, we're probably in trouble anyway.
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
/*
* Enumeration for sync targets
*/
enum dma_sync_target {
SYNC_FOR_CPU = 0,
SYNC_FOR_DEVICE = 1,
};
int swiotlb_force;
/*
* Used to do a quick range check in swiotlb_unmap_single and
* swiotlb_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static char *io_tlb_start, *io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
* io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
*/
static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
void *io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
* each index
*/
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
static unsigned char **io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
static int __init
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
io_tlb_nslabs = simple_strtoul(str, &str, 0);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
if (*str == ',')
++str;
if (!strcmp(str, "force"))
swiotlb_force = 1;
return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
void __init
swiotlb_init_with_default_size(size_t default_size)
{
unsigned long i, bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from the low pages
*/
io_tlb_start = alloc_bootmem_low_pages(bytes);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + bytes;
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
}
void __init
swiotlb_init(void)
{
swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
swiotlb_late_init_with_default_size(size_t default_size)
{
unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
unsigned int order;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
order);
if (io_tlb_start)
break;
order--;
}
if (!io_tlb_start)
goto cleanup1;
if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
}
io_tlb_end = io_tlb_start + bytes;
memset(io_tlb_start, 0, bytes);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
goto cleanup2;
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(char *)));
if (!io_tlb_orig_addr)
goto cleanup3;
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
get_order(io_tlb_overflow));
if (!io_tlb_overflow_buffer)
goto cleanup4;
printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
"0x%lx\n", bytes >> 20,
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
return 0;
cleanup4:
free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
sizeof(char *)));
io_tlb_orig_addr = NULL;
cleanup3:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
cleanup2:
io_tlb_end = NULL;
free_pages((unsigned long)io_tlb_start, order);
io_tlb_start = NULL;
cleanup1:
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
static int
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
dma_addr_t mask = 0xffffffff;
/* If the device has a mask, use it, otherwise default to 32 bits */
if (hwdev && hwdev->dma_mask)
mask = *hwdev->dma_mask;
return (addr & ~mask) != 0;
}
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
map_single(struct device *hwdev, char *buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
unsigned int nslots, stride, index, wrap;
int i;
/*
* For mappings greater than a page, we limit the stride (and
* hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
BUG_ON(!nslots);
/*
* Find suitable number of IO TLB entries size that will fit this
* request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
wrap = index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
wrap = index = 0;
do {
/*
* If we find a slot that indicates we have 'nslots'
* number of contiguous buffers, we allocate the
* buffers from that slot and mark the entries as '0'
* indicating unavailable.
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in
* the next round.
*/
io_tlb_index = ((index + nslots) < io_tlb_nslabs
? (index + nslots) : 0);
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
spin_unlock_irqrestore(&io_tlb_lock, flags);
return NULL;
}
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
memcpy(dma_addr, buffer, size);
return dma_addr;
}
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
static void
unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
/*
* bounce... copy the data back into the original buffer * and
* delete the bounce buffer.
*/
memcpy(buffer, dma_addr, size);
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contigous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
io_tlb_list[index + nslots] : 0);
/*
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--)
io_tlb_list[i] = ++count;
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
*/
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
static void
sync_single(struct device *hwdev, char *dma_addr, size_t size,
int dir, int target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index];
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
memcpy(buffer, dma_addr, size);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
memcpy(dma_addr, buffer, size);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
default:
BUG();
}
}
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
/*
* XXX fix me: the DMA API should pass us an explicit DMA mask
* instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
* bit range instead of a 16MB one).
*/
flags |= GFP_DMA;
ret = (void *)__get_free_pages(flags, order);
if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
*/
free_pages((unsigned long) ret, order);
ret = NULL;
}
if (!ret) {
/*
* We are either out of memory or the device can't DMA
* to GFP_DMA memory; fall back on
* swiotlb_map_single(), which will grab memory from
* the lowest available address range.
*/
dma_addr_t handle;
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
if (swiotlb_dma_mapping_error(handle))
return NULL;
ret = bus_to_virt(handle);
}
memset(ret, 0, size);
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device");
}
*dma_handle = dev_addr;
return ret;
}
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
if (!(vaddr >= (void *)io_tlb_start
&& vaddr < (void *)io_tlb_end))
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
}
static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) {
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
panic("DMA: Memory would be corrupted\n");
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
panic("DMA: Random memory would be DMAed\n");
}
}
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/
dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
dma_addr_t dev_addr = virt_to_bus(ptr);
void *map;
BUG_ON(dir == DMA_NONE);
/*
* If the pointer passed in happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
map = map_single(hwdev, ptr, size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer;
}
dev_addr = virt_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
*/
if (address_needs_mapping(hwdev, dev_addr))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
}
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous swiotlb_map_single call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir)
{
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
}
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
*
* If you perform a swiotlb_map_single() but wish to interrogate the buffer
* using the cpu, yet do not wish to teardown the dma mapping, you must
* call this function before doing so. At the next point you give the dma
* address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
}
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
/*
* Same as above, but for a sub-range of the mapping.
*/
static void
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
int dir, int target)
{
char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
}
void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size, int dir)
{
swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
SYNC_FOR_CPU);
}
void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size, int dir)
{
swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
SYNC_FOR_DEVICE);
}
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_single
* interface. Here the scatter gather list elements are each tagged with the
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for swiotlb_map_single are the
* same here.
*/
int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
void *addr;
dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++) {
addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_bus(addr);
if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
swiotlb_unmap_sg(hwdev, sg - i, i, dir);
sg[0].dma_length = 0;
return 0;
}
sg->dma_address = virt_to_bus(map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
}
return nelems;
}
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
* concerning calls here are the same as for swiotlb_unmap_single() above.
*/
void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
unmap_single(hwdev, bus_to_virt(sg->dma_address),
sg->dma_length, dir);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
/*
* Make physical memory consistent for a set of streaming mode DMA translations
* after a transfer.
*
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* and usage.
*/
static void
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir, int target)
{
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, bus_to_virt(sg->dma_address),
sg->dma_length, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
int
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return virt_to_bus(io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_init);
EXPORT_SYMBOL(swiotlb_map_single);
EXPORT_SYMBOL(swiotlb_unmap_single);
EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
EXPORT_SYMBOL(swiotlb_alloc_coherent);
EXPORT_SYMBOL(swiotlb_free_coherent);
EXPORT_SYMBOL(swiotlb_dma_supported);

316
lib/textsearch.c Normal file
View File

@@ -0,0 +1,316 @@
/*
* lib/textsearch.c Generic text search interface
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
* Pablo Neira Ayuso <pablo@eurodev.net>
*
* ==========================================================================
*
* INTRODUCTION
*
* The textsearch infrastructure provides text searching facitilies for
* both linear and non-linear data. Individual search algorithms are
* implemented in modules and chosen by the user.
*
* ARCHITECTURE
*
* User
* +----------------+
* | finish()|<--------------(6)-----------------+
* |get_next_block()|<--------------(5)---------------+ |
* | | Algorithm | |
* | | +------------------------------+
* | | | init() find() destroy() |
* | | +------------------------------+
* | | Core API ^ ^ ^
* | | +---------------+ (2) (4) (8)
* | (1)|----->| prepare() |---+ | |
* | (3)|----->| find()/next() |-----------+ |
* | (7)|----->| destroy() |----------------------+
* +----------------+ +---------------+
*
* (1) User configures a search by calling _prepare() specifying the
* search parameters such as the pattern and algorithm name.
* (2) Core requests the algorithm to allocate and initialize a search
* configuration according to the specified parameters.
* (3) User starts the search(es) by calling _find() or _next() to
* fetch subsequent occurrences. A state variable is provided
* to the algorithm to store persistent variables.
* (4) Core eventually resets the search offset and forwards the find()
* request to the algorithm.
* (5) Algorithm calls get_next_block() provided by the user continously
* to fetch the data to be searched in block by block.
* (6) Algorithm invokes finish() after the last call to get_next_block
* to clean up any leftovers from get_next_block. (Optional)
* (7) User destroys the configuration by calling _destroy().
* (8) Core notifies the algorithm to destroy algorithm specific
* allocations. (Optional)
*
* USAGE
*
* Before a search can be performed, a configuration must be created
* by calling textsearch_prepare() specyfing the searching algorithm and
* the pattern to look for. The returned configuration may then be used
* for an arbitary amount of times and even in parallel as long as a
* separate struct ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
* implementation and calling textsearch_find(). Both functions return
* the position of the first occurrence of the patern or UINT_MAX if
* no match was found. Subsequent occurences can be found by calling
* textsearch_next() regardless of the linearity of the data.
*
* Once you're done using a configuration it must be given back via
* textsearch_destroy.
*
* EXAMPLE
*
* int pos;
* struct ts_config *conf;
* struct ts_state state;
* const char *pattern = "chicken";
* const char *example = "We dance the funky chicken";
*
* conf = textsearch_prepare("kmp", pattern, strlen(pattern),
* GFP_KERNEL, TS_AUTOLOAD);
* if (IS_ERR(conf)) {
* err = PTR_ERR(conf);
* goto errout;
* }
*
* pos = textsearch_find_continuous(conf, &state, example, strlen(example));
* if (pos != UINT_MAX)
* panic("Oh my god, dancing chickens at %d\n", pos);
*
* textsearch_destroy(conf);
*
* ==========================================================================
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/err.h>
#include <linux/textsearch.h>
static LIST_HEAD(ts_ops);
static DEFINE_SPINLOCK(ts_mod_lock);
static inline struct ts_ops *lookup_ts_algo(const char *name)
{
struct ts_ops *o;
rcu_read_lock();
list_for_each_entry_rcu(o, &ts_ops, list) {
if (!strcmp(name, o->name)) {
if (!try_module_get(o->owner))
o = NULL;
rcu_read_unlock();
return o;
}
}
rcu_read_unlock();
return NULL;
}
/**
* textsearch_register - register a textsearch module
* @ops: operations lookup table
*
* This function must be called by textsearch modules to announce
* their presence. The specified &@ops must have %name set to a
* unique identifier and the callbacks find(), init(), get_pattern(),
* and get_pattern_len() must be implemented.
*
* Returns 0 or -EEXISTS if another module has already registered
* with same name.
*/
int textsearch_register(struct ts_ops *ops)
{
int err = -EEXIST;
struct ts_ops *o;
if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||
ops->get_pattern == NULL || ops->get_pattern_len == NULL)
return -EINVAL;
spin_lock(&ts_mod_lock);
list_for_each_entry(o, &ts_ops, list) {
if (!strcmp(ops->name, o->name))
goto errout;
}
list_add_tail_rcu(&ops->list, &ts_ops);
err = 0;
errout:
spin_unlock(&ts_mod_lock);
return err;
}
/**
* textsearch_unregister - unregister a textsearch module
* @ops: operations lookup table
*
* This function must be called by textsearch modules to announce
* their disappearance for examples when the module gets unloaded.
* The &ops parameter must be the same as the one during the
* registration.
*
* Returns 0 on success or -ENOENT if no matching textsearch
* registration was found.
*/
int textsearch_unregister(struct ts_ops *ops)
{
int err = 0;
struct ts_ops *o;
spin_lock(&ts_mod_lock);
list_for_each_entry(o, &ts_ops, list) {
if (o == ops) {
list_del_rcu(&o->list);
goto out;
}
}
err = -ENOENT;
out:
spin_unlock(&ts_mod_lock);
return err;
}
struct ts_linear_state
{
unsigned int len;
const void *data;
};
static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
struct ts_config *conf,
struct ts_state *state)
{
struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
if (likely(consumed < st->len)) {
*dst = st->data + consumed;
return st->len - consumed;
}
return 0;
}
/**
* textsearch_find_continuous - search a pattern in continuous/linear data
* @conf: search configuration
* @state: search state
* @data: data to search in
* @len: length of data
*
* A simplified version of textsearch_find() for continuous/linear data.
* Call textsearch_next() to retrieve subsequent matches.
*
* Returns the position of first occurrence of the pattern or
* %UINT_MAX if no occurrence was found.
*/
unsigned int textsearch_find_continuous(struct ts_config *conf,
struct ts_state *state,
const void *data, unsigned int len)
{
struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
conf->get_next_block = get_linear_data;
st->data = data;
st->len = len;
return textsearch_find(conf, state);
}
/**
* textsearch_prepare - Prepare a search
* @algo: name of search algorithm
* @pattern: pattern data
* @len: length of pattern
* @gfp_mask: allocation mask
* @flags: search flags
*
* Looks up the search algorithm module and creates a new textsearch
* configuration for the specified pattern. Upon completion all
* necessary refcnts are held and the configuration must be put back
* using textsearch_put() after usage.
*
* Note: The format of the pattern may not be compatible between
* the various search algorithms.
*
* Returns a new textsearch configuration according to the specified
* parameters or a ERR_PTR().
*/
struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
unsigned int len, gfp_t gfp_mask, int flags)
{
int err = -ENOENT;
struct ts_config *conf;
struct ts_ops *ops;
ops = lookup_ts_algo(algo);
#ifdef CONFIG_KMOD
/*
* Why not always autoload you may ask. Some users are
* in a situation where requesting a module may deadlock,
* especially when the module is located on a NFS mount.
*/
if (ops == NULL && flags & TS_AUTOLOAD) {
request_module("ts_%s", algo);
ops = lookup_ts_algo(algo);
}
#endif
if (ops == NULL)
goto errout;
conf = ops->init(pattern, len, gfp_mask);
if (IS_ERR(conf)) {
err = PTR_ERR(conf);
goto errout;
}
conf->ops = ops;
return conf;
errout:
if (ops)
module_put(ops->owner);
return ERR_PTR(err);
}
/**
* textsearch_destroy - destroy a search configuration
* @conf: search configuration
*
* Releases all references of the configuration and frees
* up the memory.
*/
void textsearch_destroy(struct ts_config *conf)
{
if (conf->ops) {
if (conf->ops->destroy)
conf->ops->destroy(conf);
module_put(conf->ops->owner);
}
kfree(conf);
}
EXPORT_SYMBOL(textsearch_register);
EXPORT_SYMBOL(textsearch_unregister);
EXPORT_SYMBOL(textsearch_prepare);
EXPORT_SYMBOL(textsearch_find_continuous);
EXPORT_SYMBOL(textsearch_destroy);

193
lib/ts_bm.c Normal file
View File

@@ -0,0 +1,193 @@
/*
* lib/ts_bm.c Boyer-Moore text search implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Pablo Neira Ayuso <pablo@eurodev.net>
*
* ==========================================================================
*
* Implements Boyer-Moore string matching algorithm:
*
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
* http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
*
* Note: Since Boyer-Moore (BM) performs searches for matchings from right
* to left, it's still possible that a matching could be spread over
* multiple blocks, in that case this algorithm won't find any coincidence.
*
* If you're willing to ensure that such thing won't ever happen, use the
* Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
* the proper string search algorithm depending on your setting.
*
* Say you're using the textsearch infrastructure for filtering, NIDS or
* any similar security focused purpose, then go KMP. Otherwise, if you
* really care about performance, say you're classifying packets to apply
* Quality of Service (QoS) policies, and you don't mind about possible
* matchings spread over multiple fragments, then go BM.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
#define ASIZE 256
#if 0
#define DEBUGP printk
#else
#define DEBUGP(args, format...)
#endif
struct ts_bm
{
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
unsigned int good_shift[0];
};
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int shift = bm->patlen, bs;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
while (shift < text_len) {
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
for (i = 0; i < bm->patlen; i++)
if (text[shift-i] != bm->pattern[bm->patlen-1-i])
goto next;
/* London calling... */
DEBUGP("found!\n");
return consumed += (shift-(bm->patlen-1));
next: bs = bm->bad_shift[text[shift-i]];
/* Now jumping to... */
shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
}
consumed += text_len;
}
return UINT_MAX;
}
static int subpattern(u8 *pattern, int i, int j, int g)
{
int x = i+g-1, y = j+g-1, ret = 0;
while(pattern[x--] == pattern[y--]) {
if (y < 0) {
ret = 1;
break;
}
if (--g == 0) {
ret = pattern[i-1] != pattern[j-1];
break;
}
}
return ret;
}
static void compute_prefix_tbl(struct ts_bm *bm)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
for (i = 0; i < bm->patlen - 1; i++)
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
bm->good_shift[0] = 1;
for (i = 1; i < bm->patlen; i++)
bm->good_shift[i] = bm->patlen;
for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
for (j = i-1; j >= 1-g ; j--)
if (subpattern(bm->pattern, i, j, g)) {
bm->good_shift[g] = bm->patlen-j-g;
break;
}
}
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
gfp_t gfp_mask)
{
struct ts_config *conf;
struct ts_bm *bm;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
memcpy(bm->pattern, pattern, len);
compute_prefix_tbl(bm);
return conf;
}
static void *bm_get_pattern(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->pattern;
}
static unsigned int bm_get_pattern_len(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->patlen;
}
static struct ts_ops bm_ops = {
.name = "bm",
.find = bm_find,
.init = bm_init,
.get_pattern = bm_get_pattern,
.get_pattern_len = bm_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(bm_ops.list)
};
static int __init init_bm(void)
{
return textsearch_register(&bm_ops);
}
static void __exit exit_bm(void)
{
textsearch_unregister(&bm_ops);
}
MODULE_LICENSE("GPL");
module_init(init_bm);
module_exit(exit_bm);

337
lib/ts_fsm.c Normal file
View File

@@ -0,0 +1,337 @@
/*
* lib/ts_fsm.c A naive finite state machine text search approach
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
*
* ==========================================================================
*
* A finite state machine consists of n states (struct ts_fsm_token)
* representing the pattern as a finite automation. The data is read
* sequentially on an octet basis. Every state token specifies the number
* of recurrences and the type of value accepted which can be either a
* specific character or ctype based set of characters. The available
* type of recurrences include 1, (0|1), [0 n], and [1 n].
*
* The algorithm differs between strict/non-strict mode specifying
* whether the pattern has to start at the first octet. Strict mode
* is enabled by default and can be disabled by inserting
* TS_FSM_HEAD_IGNORE as the first token in the chain.
*
* The runtime performance of the algorithm should be around O(n),
* however while in strict mode the average runtime can be better.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/textsearch.h>
#include <linux/textsearch_fsm.h>
struct ts_fsm
{
unsigned int ntokens;
struct ts_fsm_token tokens[0];
};
/* other values derived from ctype.h */
#define _A 0x100 /* ascii */
#define _W 0x200 /* wildcard */
/* Map to _ctype flags and some magic numbers */
static const u16 token_map[TS_FSM_TYPE_MAX+1] = {
[TS_FSM_SPECIFIC] = 0,
[TS_FSM_WILDCARD] = _W,
[TS_FSM_CNTRL] = _C,
[TS_FSM_LOWER] = _L,
[TS_FSM_UPPER] = _U,
[TS_FSM_PUNCT] = _P,
[TS_FSM_SPACE] = _S,
[TS_FSM_DIGIT] = _D,
[TS_FSM_XDIGIT] = _D | _X,
[TS_FSM_ALPHA] = _U | _L,
[TS_FSM_ALNUM] = _U | _L | _D,
[TS_FSM_PRINT] = _P | _U | _L | _D | _SP,
[TS_FSM_GRAPH] = _P | _U | _L | _D,
[TS_FSM_ASCII] = _A,
};
static const u16 token_lookup_tbl[256] = {
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */
_W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */
_W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */
_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */
_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */
_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */
_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */
_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */
_W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */
_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */
_W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */
_W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */
_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */
_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */
_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */
_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */
_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */
_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */
_W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */
_W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */
_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */
_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */
_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */
_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */
_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */
_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */
_W, _W, _W, _W, /* 128-131 */
_W, _W, _W, _W, /* 132-135 */
_W, _W, _W, _W, /* 136-139 */
_W, _W, _W, _W, /* 140-143 */
_W, _W, _W, _W, /* 144-147 */
_W, _W, _W, _W, /* 148-151 */
_W, _W, _W, _W, /* 152-155 */
_W, _W, _W, _W, /* 156-159 */
_W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */
_W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */
_W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */
_W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */
_W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */
_W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */
_W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */
_W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */
_W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */
_W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */
_W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */
_W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */
_W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */
_W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */
_W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */
_W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */
_W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */
_W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */
static inline int match_token(struct ts_fsm_token *t, u8 d)
{
if (t->type)
return (token_lookup_tbl[d] & t->type) != 0;
else
return t->value == d;
}
static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_fsm *fsm = ts_config_priv(conf);
struct ts_fsm_token *cur = NULL, *next;
unsigned int match_start, block_idx = 0, tok_idx;
unsigned block_len = 0, strict, consumed = state->offset;
const u8 *data;
#define GET_NEXT_BLOCK() \
({ consumed += block_idx; \
block_idx = 0; \
block_len = conf->get_next_block(consumed, &data, conf, state); })
#define TOKEN_MISMATCH() \
do { \
if (strict) \
goto no_match; \
block_idx++; \
goto startover; \
} while(0)
#define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK())
if (end_of_data())
goto no_match;
strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE;
startover:
match_start = consumed + block_idx;
for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) {
cur = &fsm->tokens[tok_idx];
if (likely(tok_idx < (fsm->ntokens - 1)))
next = &fsm->tokens[tok_idx + 1];
else
next = NULL;
switch (cur->recur) {
case TS_FSM_SINGLE:
if (end_of_data())
goto no_match;
if (!match_token(cur, data[block_idx]))
TOKEN_MISMATCH();
break;
case TS_FSM_PERHAPS:
if (end_of_data() ||
!match_token(cur, data[block_idx]))
continue;
break;
case TS_FSM_MULTI:
if (end_of_data())
goto no_match;
if (!match_token(cur, data[block_idx]))
TOKEN_MISMATCH();
block_idx++;
/* fall through */
case TS_FSM_ANY:
if (next == NULL)
goto found_match;
if (end_of_data())
continue;
while (!match_token(next, data[block_idx])) {
if (!match_token(cur, data[block_idx]))
TOKEN_MISMATCH();
block_idx++;
if (end_of_data())
goto no_match;
}
continue;
/*
* Optimization: Prefer small local loop over jumping
* back and forth until garbage at head is munched.
*/
case TS_FSM_HEAD_IGNORE:
if (end_of_data())
continue;
while (!match_token(next, data[block_idx])) {
/*
* Special case, don't start over upon
* a mismatch, give the user the
* chance to specify the type of data
* allowed to be ignored.
*/
if (!match_token(cur, data[block_idx]))
goto no_match;
block_idx++;
if (end_of_data())
goto no_match;
}
match_start = consumed + block_idx;
continue;
}
block_idx++;
}
if (end_of_data())
goto found_match;
no_match:
return UINT_MAX;
found_match:
state->offset = consumed + block_idx;
return match_start;
}
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
gfp_t gfp_mask)
{
int i, err = -EINVAL;
struct ts_config *conf;
struct ts_fsm *fsm;
struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern;
unsigned int ntokens = len / sizeof(*tokens);
size_t priv_size = sizeof(*fsm) + len;
if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
goto errout;
for (i = 0; i < ntokens; i++) {
struct ts_fsm_token *t = &tokens[i];
if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX)
goto errout;
if (t->recur == TS_FSM_HEAD_IGNORE &&
(i != 0 || i == (ntokens - 1)))
goto errout;
}
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
fsm = ts_config_priv(conf);
fsm->ntokens = ntokens;
memcpy(fsm->tokens, pattern, len);
for (i = 0; i < fsm->ntokens; i++) {
struct ts_fsm_token *t = &fsm->tokens[i];
t->type = token_map[t->type];
}
return conf;
errout:
return ERR_PTR(err);
}
static void *fsm_get_pattern(struct ts_config *conf)
{
struct ts_fsm *fsm = ts_config_priv(conf);
return fsm->tokens;
}
static unsigned int fsm_get_pattern_len(struct ts_config *conf)
{
struct ts_fsm *fsm = ts_config_priv(conf);
return fsm->ntokens * sizeof(struct ts_fsm_token);
}
static struct ts_ops fsm_ops = {
.name = "fsm",
.find = fsm_find,
.init = fsm_init,
.get_pattern = fsm_get_pattern,
.get_pattern_len = fsm_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(fsm_ops.list)
};
static int __init init_fsm(void)
{
return textsearch_register(&fsm_ops);
}
static void __exit exit_fsm(void)
{
textsearch_unregister(&fsm_ops);
}
MODULE_LICENSE("GPL");
module_init(init_fsm);
module_exit(exit_fsm);

144
lib/ts_kmp.c Normal file
View File

@@ -0,0 +1,144 @@
/*
* lib/ts_kmp.c Knuth-Morris-Pratt text search implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
*
* ==========================================================================
*
* Implements a linear-time string-matching algorithm due to Knuth,
* Morris, and Pratt [1]. Their algorithm avoids the explicit
* computation of the transition function DELTA altogether. Its
* matching time is O(n), for n being length(text), using just an
* auxiliary function PI[1..m], for m being length(pattern),
* precomputed from the pattern in time O(m). The array PI allows
* the transition function DELTA to be computed efficiently
* "on the fly" as needed. Roughly speaking, for any state
* "q" = 0,1,...,m and any character "a" in SIGMA, the value
* PI["q"] contains the information that is independent of "a" and
* is needed to compute DELTA("q", "a") [2]. Since the array PI
* has only m entries, whereas DELTA has O(m|SIGMA|) entries, we
* save a factor of |SIGMA| in the preprocessing time by computing
* PI rather than DELTA.
*
* [1] Cormen, Leiserson, Rivest, Stein
* Introdcution to Algorithms, 2nd Edition, MIT Press
* [2] See finite automation theory
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/textsearch.h>
struct ts_kmp
{
u8 * pattern;
unsigned int pattern_len;
unsigned int prefix_tbl[0];
};
static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_kmp *kmp = ts_config_priv(conf);
unsigned int i, q = 0, text_len, consumed = state->offset;
const u8 *text;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
for (i = 0; i < text_len; i++) {
while (q > 0 && kmp->pattern[q] != text[i])
q = kmp->prefix_tbl[q - 1];
if (kmp->pattern[q] == text[i])
q++;
if (unlikely(q == kmp->pattern_len)) {
state->offset = consumed + i + 1;
return state->offset - kmp->pattern_len;
}
}
consumed += text_len;
}
return UINT_MAX;
}
static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
unsigned int *prefix_tbl)
{
unsigned int k, q;
for (k = 0, q = 1; q < len; q++) {
while (k > 0 && pattern[k] != pattern[q])
k = prefix_tbl[k-1];
if (pattern[k] == pattern[q])
k++;
prefix_tbl[q] = k;
}
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
gfp_t gfp_mask)
{
struct ts_config *conf;
struct ts_kmp *kmp;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
kmp = ts_config_priv(conf);
kmp->pattern_len = len;
compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
memcpy(kmp->pattern, pattern, len);
return conf;
}
static void *kmp_get_pattern(struct ts_config *conf)
{
struct ts_kmp *kmp = ts_config_priv(conf);
return kmp->pattern;
}
static unsigned int kmp_get_pattern_len(struct ts_config *conf)
{
struct ts_kmp *kmp = ts_config_priv(conf);
return kmp->pattern_len;
}
static struct ts_ops kmp_ops = {
.name = "kmp",
.find = kmp_find,
.init = kmp_init,
.get_pattern = kmp_get_pattern,
.get_pattern_len = kmp_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(kmp_ops.list)
};
static int __init init_kmp(void)
{
return textsearch_register(&kmp_ops);
}
static void __exit exit_kmp(void)
{
textsearch_unregister(&kmp_ops);
}
MODULE_LICENSE("GPL");
module_init(init_kmp);
module_exit(exit_kmp);

873
lib/vsprintf.c Normal file
View File

@@ -0,0 +1,873 @@
/*
* linux/lib/vsprintf.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
/*
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
/*
* Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
* - scnprintf and vscnprintf
*/
#include <stdarg.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/div64.h>
/**
* simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
{
unsigned long result = 0,value;
if (!base) {
base = 10;
if (*cp == '0') {
base = 8;
cp++;
if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
cp++;
base = 16;
}
}
} else if (base == 16) {
if (cp[0] == '0' && toupper(cp[1]) == 'X')
cp += 2;
}
while (isxdigit(*cp) &&
(value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) {
result = result*base + value;
cp++;
}
if (endp)
*endp = (char *)cp;
return result;
}
EXPORT_SYMBOL(simple_strtoul);
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
long simple_strtol(const char *cp,char **endp,unsigned int base)
{
if(*cp=='-')
return -simple_strtoul(cp+1,endp,base);
return simple_strtoul(cp,endp,base);
}
EXPORT_SYMBOL(simple_strtol);
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base)
{
unsigned long long result = 0,value;
if (!base) {
base = 10;
if (*cp == '0') {
base = 8;
cp++;
if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
cp++;
base = 16;
}
}
} else if (base == 16) {
if (cp[0] == '0' && toupper(cp[1]) == 'X')
cp += 2;
}
while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
? toupper(*cp) : *cp)-'A'+10) < base) {
result = result*base + value;
cp++;
}
if (endp)
*endp = (char *)cp;
return result;
}
EXPORT_SYMBOL(simple_strtoull);
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
long long simple_strtoll(const char *cp,char **endp,unsigned int base)
{
if(*cp=='-')
return -simple_strtoull(cp+1,endp,base);
return simple_strtoull(cp,endp,base);
}
static int skip_atoi(const char **s)
{
int i=0;
while (isdigit(**s))
i = i*10 + *((*s)++) - '0';
return i;
}
#define ZEROPAD 1 /* pad with zero */
#define SIGN 2 /* unsigned/signed long */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define LEFT 16 /* left justified */
#define SPECIAL 32 /* 0x */
#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
static char * number(char * buf, char * end, unsigned long long num, int base, int size, int precision, int type)
{
char c,sign,tmp[66];
const char *digits;
static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
int i;
digits = (type & LARGE) ? large_digits : small_digits;
if (type & LEFT)
type &= ~ZEROPAD;
if (base < 2 || base > 36)
return NULL;
c = (type & ZEROPAD) ? '0' : ' ';
sign = 0;
if (type & SIGN) {
if ((signed long long) num < 0) {
sign = '-';
num = - (signed long long) num;
size--;
} else if (type & PLUS) {
sign = '+';
size--;
} else if (type & SPACE) {
sign = ' ';
size--;
}
}
if (type & SPECIAL) {
if (base == 16)
size -= 2;
else if (base == 8)
size--;
}
i = 0;
if (num == 0)
tmp[i++]='0';
else while (num != 0)
tmp[i++] = digits[do_div(num,base)];
if (i > precision)
precision = i;
size -= precision;
if (!(type&(ZEROPAD+LEFT))) {
while(size-->0) {
if (buf < end)
*buf = ' ';
++buf;
}
}
if (sign) {
if (buf < end)
*buf = sign;
++buf;
}
if (type & SPECIAL) {
if (base==8) {
if (buf < end)
*buf = '0';
++buf;
} else if (base==16) {
if (buf < end)
*buf = '0';
++buf;
if (buf < end)
*buf = digits[33];
++buf;
}
}
if (!(type & LEFT)) {
while (size-- > 0) {
if (buf < end)
*buf = c;
++buf;
}
}
while (i < precision--) {
if (buf < end)
*buf = '0';
++buf;
}
while (i-- > 0) {
if (buf < end)
*buf = tmp[i];
++buf;
}
while (size-- > 0) {
if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*
* Call this function if you are already dealing with a va_list.
* You probably want snprintf() instead.
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int len;
unsigned long long num;
int i, base;
char *str, *end, c;
const char *s;
int flags; /* flags to number() */
int field_width; /* width of output field */
int precision; /* min. # of digits for integers; max
number of chars for from string */
int qualifier; /* 'h', 'l', or 'L' for integer fields */
/* 'z' support added 23/7/1999 S.H. */
/* 'z' changed to 'Z' --davidm 1/25/99 */
/* 't' added for ptrdiff_t */
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
if (unlikely((int) size < 0)) {
/* There can be only one.. */
static int warn = 1;
WARN_ON(warn);
warn = 0;
return 0;
}
str = buf;
end = buf + size;
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
for (; *fmt ; ++fmt) {
if (*fmt != '%') {
if (str < end)
*str = *fmt;
++str;
continue;
}
/* process flags */
flags = 0;
repeat:
++fmt; /* this also skips first '%' */
switch (*fmt) {
case '-': flags |= LEFT; goto repeat;
case '+': flags |= PLUS; goto repeat;
case ' ': flags |= SPACE; goto repeat;
case '#': flags |= SPECIAL; goto repeat;
case '0': flags |= ZEROPAD; goto repeat;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt))
field_width = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
field_width = va_arg(args, int);
if (field_width < 0) {
field_width = -field_width;
flags |= LEFT;
}
}
/* get the precision */
precision = -1;
if (*fmt == '.') {
++fmt;
if (isdigit(*fmt))
precision = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
precision = va_arg(args, int);
}
if (precision < 0)
precision = 0;
}
/* get the conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
*fmt =='Z' || *fmt == 'z' || *fmt == 't') {
qualifier = *fmt;
++fmt;
if (qualifier == 'l' && *fmt == 'l') {
qualifier = 'L';
++fmt;
}
}
/* default base */
base = 10;
switch (*fmt) {
case 'c':
if (!(flags & LEFT)) {
while (--field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) va_arg(args, int);
if (str < end)
*str = c;
++str;
while (--field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
continue;
case 's':
s = va_arg(args, char *);
if ((unsigned long)s < PAGE_SIZE)
s = "<NULL>";
len = strnlen(s, precision);
if (!(flags & LEFT)) {
while (len < field_width--) {
if (str < end)
*str = ' ';
++str;
}
}
for (i = 0; i < len; ++i) {
if (str < end)
*str = *s;
++str; ++s;
}
while (len < field_width--) {
if (str < end)
*str = ' ';
++str;
}
continue;
case 'p':
if (field_width == -1) {
field_width = 2*sizeof(void *);
flags |= ZEROPAD;
}
str = number(str, end,
(unsigned long) va_arg(args, void *),
16, field_width, precision, flags);
continue;
case 'n':
/* FIXME:
* What does C99 say about the overflow case here? */
if (qualifier == 'l') {
long * ip = va_arg(args, long *);
*ip = (str - buf);
} else if (qualifier == 'Z' || qualifier == 'z') {
size_t * ip = va_arg(args, size_t *);
*ip = (str - buf);
} else {
int * ip = va_arg(args, int *);
*ip = (str - buf);
}
continue;
case '%':
if (str < end)
*str = '%';
++str;
continue;
/* integer number formats - set up the flags and "break" */
case 'o':
base = 8;
break;
case 'X':
flags |= LARGE;
case 'x':
base = 16;
break;
case 'd':
case 'i':
flags |= SIGN;
case 'u':
break;
default:
if (str < end)
*str = '%';
++str;
if (*fmt) {
if (str < end)
*str = *fmt;
++str;
} else {
--fmt;
}
continue;
}
if (qualifier == 'L')
num = va_arg(args, long long);
else if (qualifier == 'l') {
num = va_arg(args, unsigned long);
if (flags & SIGN)
num = (signed long) num;
} else if (qualifier == 'Z' || qualifier == 'z') {
num = va_arg(args, size_t);
} else if (qualifier == 't') {
num = va_arg(args, ptrdiff_t);
} else if (qualifier == 'h') {
num = (unsigned short) va_arg(args, int);
if (flags & SIGN)
num = (signed short) num;
} else {
num = va_arg(args, unsigned int);
if (flags & SIGN)
num = (signed int) num;
}
str = number(str, end, num, base,
field_width, precision, flags);
}
if (size > 0) {
if (str < end)
*str = '\0';
else
end[-1] = '\0';
}
/* the trailing null byte doesn't count towards the total */
return str-buf;
}
EXPORT_SYMBOL(vsnprintf);
/**
* vscnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The return value is the number of characters which have been written into
* the @buf not including the trailing '\0'. If @size is <= 0 the function
* returns 0.
*
* Call this function if you are already dealing with a va_list.
* You probably want scnprintf() instead.
*/
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
i=vsnprintf(buf,size,fmt,args);
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(vscnprintf);
/**
* snprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters which would be
* generated for the given input, excluding the trailing null,
* as per ISO C99. If the return is greater than or equal to
* @size, the resulting string is truncated.
*/
int snprintf(char * buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i=vsnprintf(buf,size,fmt,args);
va_end(args);
return i;
}
EXPORT_SYMBOL(snprintf);
/**
* scnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
* the trailing '\0'. If @size is <= 0 the function returns 0.
*/
int scnprintf(char * buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(scnprintf);
/**
* vsprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use vsnprintf() or vscnprintf() in order to avoid
* buffer overflows.
*
* Call this function if you are already dealing with a va_list.
* You probably want sprintf() instead.
*/
int vsprintf(char *buf, const char *fmt, va_list args)
{
return vsnprintf(buf, INT_MAX, fmt, args);
}
EXPORT_SYMBOL(vsprintf);
/**
* sprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use snprintf() or scnprintf() in order to avoid
* buffer overflows.
*/
int sprintf(char * buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i=vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sprintf);
/**
* vsscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: format of buffer
* @args: arguments
*/
int vsscanf(const char * buf, const char * fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
int qualifier;
int base;
int field_width;
int is_sign = 0;
while(*fmt && *str) {
/* skip any white space in format */
/* white space in format matchs any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
while (isspace(*fmt))
++fmt;
while (isspace(*str))
++str;
}
/* anything that is not a conversion must match exactly */
if (*fmt != '%' && *fmt) {
if (*fmt++ != *str++)
break;
continue;
}
if (!*fmt)
break;
++fmt;
/* skip this conversion.
* advance both strings to next white space
*/
if (*fmt == '*') {
while (!isspace(*fmt) && *fmt)
fmt++;
while (!isspace(*str) && *str)
str++;
continue;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt))
field_width = skip_atoi(&fmt);
/* get conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
*fmt == 'Z' || *fmt == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
qualifier = 'H';
fmt++;
} else if (qualifier == 'l') {
qualifier = 'L';
fmt++;
}
}
}
base = 10;
is_sign = 0;
if (!*fmt || !*str)
break;
switch(*fmt++) {
case 'c':
{
char *s = (char *) va_arg(args,char*);
if (field_width == -1)
field_width = 1;
do {
*s++ = *str++;
} while (--field_width > 0 && *str);
num++;
}
continue;
case 's':
{
char *s = (char *) va_arg(args, char *);
if(field_width == -1)
field_width = INT_MAX;
/* first, skip leading white space in buffer */
while (isspace(*str))
str++;
/* now copy until next white space */
while (*str && !isspace(*str) && field_width--) {
*s++ = *str++;
}
*s = '\0';
num++;
}
continue;
case 'n':
/* return number of characters read so far */
{
int *i = (int *)va_arg(args,int*);
*i = str - buf;
}
continue;
case 'o':
base = 8;
break;
case 'x':
case 'X':
base = 16;
break;
case 'i':
base = 0;
case 'd':
is_sign = 1;
case 'u':
break;
case '%':
/* looking for '%' in str */
if (*str++ != '%')
return num;
continue;
default:
/* invalid format; stop here */
return num;
}
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
while (isspace(*str))
str++;
digit = *str;
if (is_sign && digit == '-')
digit = *(str + 1);
if (!digit
|| (base == 16 && !isxdigit(digit))
|| (base == 10 && !isdigit(digit))
|| (base == 8 && (!isdigit(digit) || digit > '7'))
|| (base == 0 && !isdigit(digit)))
break;
switch(qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign) {
signed char *s = (signed char *) va_arg(args,signed char *);
*s = (signed char) simple_strtol(str,&next,base);
} else {
unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
*s = (unsigned char) simple_strtoul(str, &next, base);
}
break;
case 'h':
if (is_sign) {
short *s = (short *) va_arg(args,short *);
*s = (short) simple_strtol(str,&next,base);
} else {
unsigned short *s = (unsigned short *) va_arg(args, unsigned short *);
*s = (unsigned short) simple_strtoul(str, &next, base);
}
break;
case 'l':
if (is_sign) {
long *l = (long *) va_arg(args,long *);
*l = simple_strtol(str,&next,base);
} else {
unsigned long *l = (unsigned long*) va_arg(args,unsigned long*);
*l = simple_strtoul(str,&next,base);
}
break;
case 'L':
if (is_sign) {
long long *l = (long long*) va_arg(args,long long *);
*l = simple_strtoll(str,&next,base);
} else {
unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*);
*l = simple_strtoull(str,&next,base);
}
break;
case 'Z':
case 'z':
{
size_t *s = (size_t*) va_arg(args,size_t*);
*s = (size_t) simple_strtoul(str,&next,base);
}
break;
default:
if (is_sign) {
int *i = (int *) va_arg(args, int*);
*i = (int) simple_strtol(str,&next,base);
} else {
unsigned int *i = (unsigned int*) va_arg(args, unsigned int*);
*i = (unsigned int) simple_strtoul(str,&next,base);
}
break;
}
num++;
if (!next)
break;
str = next;
}
return num;
}
EXPORT_SYMBOL(vsscanf);
/**
* sscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: formatting of buffer
* @...: resulting arguments
*/
int sscanf(const char * buf, const char * fmt, ...)
{
va_list args;
int i;
va_start(args,fmt);
i = vsscanf(buf,fmt,args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sscanf);
/* Simplified asprintf. */
char *kasprintf(gfp_t gfp, const char *fmt, ...)
{
va_list ap;
unsigned int len;
char *p;
va_start(ap, fmt);
len = vsnprintf(NULL, 0, fmt, ap);
va_end(ap);
p = kmalloc(len+1, gfp);
if (!p)
return NULL;
va_start(ap, fmt);
vsnprintf(p, len+1, fmt, ap);
va_end(ap);
return p;
}
EXPORT_SYMBOL(kasprintf);

11
lib/zlib_deflate/Makefile Normal file
View File

@@ -0,0 +1,11 @@
#
# This is a modified version of zlib, which does all memory
# allocation ahead of time.
#
# This is the compression code, see zlib_inflate for the
# decompression code.
#
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate.o
zlib_deflate-objs := deflate.o deftree.o deflate_syms.o

1253
lib/zlib_deflate/deflate.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
/*
* linux/lib/zlib_deflate/deflate_syms.c
*
* Exported symbols for the deflate functionality.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/zlib.h>
EXPORT_SYMBOL(zlib_deflate_workspacesize);
EXPORT_SYMBOL(zlib_deflate);
EXPORT_SYMBOL(zlib_deflateInit2);
EXPORT_SYMBOL(zlib_deflateEnd);
EXPORT_SYMBOL(zlib_deflateReset);
MODULE_LICENSE("GPL");

1113
lib/zlib_deflate/deftree.c Normal file

File diff suppressed because it is too large Load Diff

334
lib/zlib_deflate/defutil.h Normal file
View File

@@ -0,0 +1,334 @@
#define Assert(err, str)
#define Trace(dummy)
#define Tracev(dummy)
#define Tracecv(err, dummy)
#define Tracevv(dummy)
#define LENGTH_CODES 29
/* number of length codes, not counting the special END_BLOCK code */
#define LITERALS 256
/* number of literal bytes 0..255 */
#define L_CODES (LITERALS+1+LENGTH_CODES)
/* number of Literal or Length codes, including the END_BLOCK code */
#define D_CODES 30
/* number of distance codes */
#define BL_CODES 19
/* number of codes used to transfer the bit lengths */
#define HEAP_SIZE (2*L_CODES+1)
/* maximum heap size */
#define MAX_BITS 15
/* All codes must not exceed MAX_BITS bits */
#define INIT_STATE 42
#define BUSY_STATE 113
#define FINISH_STATE 666
/* Stream status */
/* Data structure describing a single value and its code string. */
typedef struct ct_data_s {
union {
ush freq; /* frequency count */
ush code; /* bit string */
} fc;
union {
ush dad; /* father node in Huffman tree */
ush len; /* length of bit string */
} dl;
} ct_data;
#define Freq fc.freq
#define Code fc.code
#define Dad dl.dad
#define Len dl.len
typedef struct static_tree_desc_s static_tree_desc;
typedef struct tree_desc_s {
ct_data *dyn_tree; /* the dynamic tree */
int max_code; /* largest code with non zero frequency */
static_tree_desc *stat_desc; /* the corresponding static tree */
} tree_desc;
typedef ush Pos;
typedef unsigned IPos;
/* A Pos is an index in the character window. We use short instead of int to
* save space in the various tables. IPos is used only for parameter passing.
*/
typedef struct deflate_state {
z_streamp strm; /* pointer back to this zlib stream */
int status; /* as the name implies */
Byte *pending_buf; /* output still pending */
ulg pending_buf_size; /* size of pending_buf */
Byte *pending_out; /* next pending byte to output to the stream */
int pending; /* nb of bytes in the pending buffer */
int noheader; /* suppress zlib header and adler32 */
Byte data_type; /* UNKNOWN, BINARY or ASCII */
Byte method; /* STORED (for zip only) or DEFLATED */
int last_flush; /* value of flush param for previous deflate call */
/* used by deflate.c: */
uInt w_size; /* LZ77 window size (32K by default) */
uInt w_bits; /* log2(w_size) (8..16) */
uInt w_mask; /* w_size - 1 */
Byte *window;
/* Sliding window. Input bytes are read into the second half of the window,
* and move to the first half later to keep a dictionary of at least wSize
* bytes. With this organization, matches are limited to a distance of
* wSize-MAX_MATCH bytes, but this ensures that IO is always
* performed with a length multiple of the block size. Also, it limits
* the window size to 64K, which is quite useful on MSDOS.
* To do: use the user input buffer as sliding window.
*/
ulg window_size;
/* Actual size of window: 2*wSize, except when the user input buffer
* is directly used as sliding window.
*/
Pos *prev;
/* Link to older string with same hash index. To limit the size of this
* array to 64K, this link is maintained only for the last 32K strings.
* An index in this array is thus a window index modulo 32K.
*/
Pos *head; /* Heads of the hash chains or NIL. */
uInt ins_h; /* hash index of string to be inserted */
uInt hash_size; /* number of elements in hash table */
uInt hash_bits; /* log2(hash_size) */
uInt hash_mask; /* hash_size-1 */
uInt hash_shift;
/* Number of bits by which ins_h must be shifted at each input
* step. It must be such that after MIN_MATCH steps, the oldest
* byte no longer takes part in the hash key, that is:
* hash_shift * MIN_MATCH >= hash_bits
*/
long block_start;
/* Window position at the beginning of the current output block. Gets
* negative when the window is moved backwards.
*/
uInt match_length; /* length of best match */
IPos prev_match; /* previous match */
int match_available; /* set if previous match exists */
uInt strstart; /* start of string to insert */
uInt match_start; /* start of matching string */
uInt lookahead; /* number of valid bytes ahead in window */
uInt prev_length;
/* Length of the best match at previous step. Matches not greater than this
* are discarded. This is used in the lazy match evaluation.
*/
uInt max_chain_length;
/* To speed up deflation, hash chains are never searched beyond this
* length. A higher limit improves compression ratio but degrades the
* speed.
*/
uInt max_lazy_match;
/* Attempt to find a better match only when the current match is strictly
* smaller than this value. This mechanism is used only for compression
* levels >= 4.
*/
# define max_insert_length max_lazy_match
/* Insert new strings in the hash table only if the match length is not
* greater than this length. This saves time but degrades compression.
* max_insert_length is used only for compression levels <= 3.
*/
int level; /* compression level (1..9) */
int strategy; /* favor or force Huffman coding*/
uInt good_match;
/* Use a faster search when the previous match is longer than this */
int nice_match; /* Stop searching when current match exceeds this */
/* used by trees.c: */
/* Didn't use ct_data typedef below to supress compiler warning */
struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
struct tree_desc_s l_desc; /* desc. for literal tree */
struct tree_desc_s d_desc; /* desc. for distance tree */
struct tree_desc_s bl_desc; /* desc. for bit length tree */
ush bl_count[MAX_BITS+1];
/* number of codes at each bit length for an optimal tree */
int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
int heap_len; /* number of elements in the heap */
int heap_max; /* element of largest frequency */
/* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
* The same heap array is used to build all trees.
*/
uch depth[2*L_CODES+1];
/* Depth of each subtree used as tie breaker for trees of equal frequency
*/
uch *l_buf; /* buffer for literals or lengths */
uInt lit_bufsize;
/* Size of match buffer for literals/lengths. There are 4 reasons for
* limiting lit_bufsize to 64K:
* - frequencies can be kept in 16 bit counters
* - if compression is not successful for the first block, all input
* data is still in the window so we can still emit a stored block even
* when input comes from standard input. (This can also be done for
* all blocks if lit_bufsize is not greater than 32K.)
* - if compression is not successful for a file smaller than 64K, we can
* even emit a stored file instead of a stored block (saving 5 bytes).
* This is applicable only for zip (not gzip or zlib).
* - creating new Huffman trees less frequently may not provide fast
* adaptation to changes in the input data statistics. (Take for
* example a binary file with poorly compressible code followed by
* a highly compressible string table.) Smaller buffer sizes give
* fast adaptation but have of course the overhead of transmitting
* trees more frequently.
* - I can't count above 4
*/
uInt last_lit; /* running index in l_buf */
ush *d_buf;
/* Buffer for distances. To simplify the code, d_buf and l_buf have
* the same number of elements. To use different lengths, an extra flag
* array would be necessary.
*/
ulg opt_len; /* bit length of current block with optimal trees */
ulg static_len; /* bit length of current block with static trees */
ulg compressed_len; /* total bit length of compressed file */
uInt matches; /* number of string matches in current block */
int last_eob_len; /* bit length of EOB code for last block */
#ifdef DEBUG_ZLIB
ulg bits_sent; /* bit length of the compressed data */
#endif
ush bi_buf;
/* Output buffer. bits are inserted starting at the bottom (least
* significant bits).
*/
int bi_valid;
/* Number of valid bits in bi_buf. All bits above the last valid bit
* are always zero.
*/
} deflate_state;
typedef struct deflate_workspace {
/* State memory for the deflator */
deflate_state deflate_memory;
Byte window_memory[2 * (1 << MAX_WBITS)];
Pos prev_memory[1 << MAX_WBITS];
Pos head_memory[1 << (MAX_MEM_LEVEL + 7)];
char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)];
} deflate_workspace;
/* Output a byte on the stream.
* IN assertion: there is enough room in pending_buf.
*/
#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
/* Minimum amount of lookahead, except at the end of the input file.
* See deflate.c for comments about the MIN_MATCH+1.
*/
#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
/* In order to simplify the code, particularly on 16 bit machines, match
* distances are limited to MAX_DIST instead of WSIZE.
*/
/* in trees.c */
void zlib_tr_init (deflate_state *s);
int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc);
ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len,
int eof);
void zlib_tr_align (deflate_state *s);
void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
int eof);
void zlib_tr_stored_type_only (deflate_state *);
/* ===========================================================================
* Output a short LSB first on the stream.
* IN assertion: there is enough room in pendingBuf.
*/
#define put_short(s, w) { \
put_byte(s, (uch)((w) & 0xff)); \
put_byte(s, (uch)((ush)(w) >> 8)); \
}
/* ===========================================================================
* Reverse the first len bits of a code, using straightforward code (a faster
* method would use a table)
* IN assertion: 1 <= len <= 15
*/
static inline unsigned bi_reverse(unsigned code, /* the value to invert */
int len) /* its bit length */
{
register unsigned res = 0;
do {
res |= code & 1;
code >>= 1, res <<= 1;
} while (--len > 0);
return res >> 1;
}
/* ===========================================================================
* Flush the bit buffer, keeping at most 7 bits in it.
*/
static inline void bi_flush(deflate_state *s)
{
if (s->bi_valid == 16) {
put_short(s, s->bi_buf);
s->bi_buf = 0;
s->bi_valid = 0;
} else if (s->bi_valid >= 8) {
put_byte(s, (Byte)s->bi_buf);
s->bi_buf >>= 8;
s->bi_valid -= 8;
}
}
/* ===========================================================================
* Flush the bit buffer and align the output on a byte boundary
*/
static inline void bi_windup(deflate_state *s)
{
if (s->bi_valid > 8) {
put_short(s, s->bi_buf);
} else if (s->bi_valid > 0) {
put_byte(s, (Byte)s->bi_buf);
}
s->bi_buf = 0;
s->bi_valid = 0;
#ifdef DEBUG_ZLIB
s->bits_sent = (s->bits_sent+7) & ~7;
#endif
}

19
lib/zlib_inflate/Makefile Normal file
View File

@@ -0,0 +1,19 @@
#
# This is a modified version of zlib, which does all memory
# allocation ahead of time.
#
# This is only the decompression, see zlib_deflate for the
# the compression
#
# Decompression needs to be serialized for each memory
# allocation.
#
# (The upsides of the simplification is that you can't get in
# any nasty situations wrt memory management, and that the
# uncompression can be done without blocking on allocation).
#
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate.o
zlib_inflate-objs := inffast.o inflate.o \
inftrees.o inflate_syms.o

312
lib/zlib_inflate/inffast.c Normal file
View File

@@ -0,0 +1,312 @@
/* inffast.c -- fast decoding
* Copyright (C) 1995-2004 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include <linux/zutil.h>
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#ifndef ASMINF
/* Allow machine dependent optimization for post-increment or pre-increment.
Based on testing to date,
Pre-increment preferred for:
- PowerPC G3 (Adler)
- MIPS R5000 (Randers-Pehrson)
Post-increment preferred for:
- none
No measurable difference:
- Pentium III (Anderson)
- M68060 (Nikl)
*/
#ifdef POSTINC
# define OFF 0
# define PUP(a) *(a)++
#else
# define OFF 1
# define PUP(a) *++(a)
#endif
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
available, an end-of-block is encountered, or a data error is encountered.
When large enough input and output buffers are supplied to inflate(), for
example, a 16K input buffer and a 64K output buffer, more than 95% of the
inflate execution time is spent in this routine.
Entry assumptions:
state->mode == LEN
strm->avail_in >= 6
strm->avail_out >= 258
start >= strm->avail_out
state->bits < 8
On return, state->mode is one of:
LEN -- ran out of enough output space or enough available input
TYPE -- reached end of block code, inflate() to interpret next block
BAD -- error in block data
Notes:
- The maximum input bits used by a length/distance pair is 15 bits for the
length code, 5 bits for the length extra, 15 bits for the distance code,
and 13 bits for the distance extra. This totals 48 bits, or six bytes.
Therefore if strm->avail_in >= 6, then there is enough input to avoid
checking for available input while decoding.
- The maximum bytes that a single length/distance pair can output is 258
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
- @start: inflate()'s starting value for strm->avail_out
*/
void inflate_fast(z_streamp strm, unsigned start)
{
struct inflate_state *state;
unsigned char *in; /* local strm->next_in */
unsigned char *last; /* while in < last, enough input available */
unsigned char *out; /* local strm->next_out */
unsigned char *beg; /* inflate()'s initial strm->next_out */
unsigned char *end; /* while out < end, enough space available */
#ifdef INFLATE_STRICT
unsigned dmax; /* maximum distance from zlib header */
#endif
unsigned wsize; /* window size or zero if not using window */
unsigned whave; /* valid bytes in the window */
unsigned write; /* window write index */
unsigned char *window; /* allocated sliding window, if wsize != 0 */
unsigned long hold; /* local strm->hold */
unsigned bits; /* local strm->bits */
code const *lcode; /* local strm->lencode */
code const *dcode; /* local strm->distcode */
unsigned lmask; /* mask for first level of length codes */
unsigned dmask; /* mask for first level of distance codes */
code this; /* retrieved table entry */
unsigned op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
unsigned len; /* match length, unused bytes */
unsigned dist; /* match distance */
unsigned char *from; /* where to copy match from */
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
in = strm->next_in - OFF;
last = in + (strm->avail_in - 5);
out = strm->next_out - OFF;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
dmax = state->dmax;
#endif
wsize = state->wsize;
whave = state->whave;
write = state->write;
window = state->window;
hold = state->hold;
bits = state->bits;
lcode = state->lencode;
dcode = state->distcode;
lmask = (1U << state->lenbits) - 1;
dmask = (1U << state->distbits) - 1;
/* decode literals and length/distances until end-of-block or not enough
input data or output space */
do {
if (bits < 15) {
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
}
this = lcode[hold & lmask];
dolen:
op = (unsigned)(this.bits);
hold >>= op;
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
PUP(out) = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
if (bits < 15) {
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
}
this = dcode[hold & dmask];
dodist:
op = (unsigned)(this.bits);
hold >>= op;
bits -= op;
op = (unsigned)(this.op);
if (op & 16) { /* distance base */
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
if (bits < op) {
hold += (unsigned long)(PUP(in)) << bits;
bits += 8;
}
}
dist += (unsigned)hold & ((1U << op) - 1);
#ifdef INFLATE_STRICT
if (dist > dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
hold >>= op;
bits -= op;
op = (unsigned)(out - beg); /* max distance in output */
if (dist > op) { /* see if copy from window */
op = dist - op; /* distance back in window */
if (op > whave) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
from = window - OFF;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
PUP(out) = PUP(from);
} while (--op);
from = out - dist; /* rest from output */
}
}
else if (write < op) { /* wrap around window */
from += wsize + write - op;
op -= write;
if (op < len) { /* some from end of window */
len -= op;
do {
PUP(out) = PUP(from);
} while (--op);
from = window - OFF;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
PUP(out) = PUP(from);
} while (--op);
from = out - dist; /* rest from output */
}
}
}
else { /* contiguous in window */
from += write - op;
if (op < len) { /* some from window */
len -= op;
do {
PUP(out) = PUP(from);
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
PUP(out) = PUP(from);
PUP(out) = PUP(from);
PUP(out) = PUP(from);
len -= 3;
}
if (len) {
PUP(out) = PUP(from);
if (len > 1)
PUP(out) = PUP(from);
}
}
else {
from = out - dist; /* copy direct from output */
do { /* minimum length is three */
PUP(out) = PUP(from);
PUP(out) = PUP(from);
PUP(out) = PUP(from);
len -= 3;
} while (len > 2);
if (len) {
PUP(out) = PUP(from);
if (len > 1)
PUP(out) = PUP(from);
}
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
this = dcode[this.val + (hold & ((1U << op) - 1))];
goto dodist;
}
else {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
}
else if ((op & 64) == 0) { /* 2nd level length code */
this = lcode[this.val + (hold & ((1U << op) - 1))];
goto dolen;
}
else if (op & 32) { /* end-of-block */
state->mode = TYPE;
break;
}
else {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
} while (in < last && out < end);
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
len = bits >> 3;
in -= len;
bits -= len << 3;
hold &= (1U << bits) - 1;
/* update state and return */
strm->next_in = in + OFF;
strm->next_out = out + OFF;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));
state->hold = hold;
state->bits = bits;
return;
}
/*
inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
- Using bit fields for code structure
- Different op definition to avoid & for extra bits (do & for table bits)
- Three separate decoding do-loops for direct, window, and write == 0
- Special case for distance > 1 copies to do overlapped load and store copy
- Explicit branch predictions (based on measured branch probabilities)
- Deferring match copy and interspersed it with decoding subsequent codes
- Swapping literal/length else
- Swapping window/direct else
- Larger unrolled copy loops (three is about right)
- Moving len -= 3 statement into middle of loop
*/
#endif /* !ASMINF */

View File

@@ -0,0 +1,11 @@
/* inffast.h -- header to use inffast.c
* Copyright (C) 1995-2003 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
part of the implementation of the compression library and is
subject to change. Applications should only use zlib.h.
*/
void inflate_fast (z_streamp strm, unsigned start);

View File

@@ -0,0 +1,94 @@
/* inffixed.h -- table for decoding fixed codes
* Generated automatically by makefixed().
*/
/* WARNING: this file should *not* be used by applications. It
is part of the implementation of the compression library and
is subject to change. Applications should only use zlib.h.
*/
static const code lenfix[512] = {
{96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
{0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
{0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
{0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
{0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
{21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
{0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
{0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
{18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
{0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
{0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
{0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
{20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
{0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
{0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
{0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
{16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
{0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
{0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
{0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
{0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
{0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
{0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
{0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
{17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
{0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
{0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
{0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
{19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
{0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
{0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
{0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
{16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
{0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
{0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
{0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
{0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
{20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
{0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
{0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
{17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
{0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
{0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
{0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
{20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
{0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
{0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
{0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
{16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
{0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
{0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
{0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
{0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
{0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
{0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
{0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
{16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
{0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
{0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
{0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
{19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
{0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
{0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
{0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
{16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
{0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
{0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
{0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
{0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
{64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
{0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
{0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
{18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
{0,9,255}
};
static const code distfix[32] = {
{16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
{21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
{18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
{19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
{16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
{22,5,193},{64,5,0}
};

918
lib/zlib_inflate/inflate.c Normal file
View File

@@ -0,0 +1,918 @@
/* inflate.c -- zlib decompression
* Copyright (C) 1995-2005 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Based on zlib 1.2.3 but modified for the Linux Kernel by
* Richard Purdie <richard@openedhand.com>
*
* Changes mainly for static instead of dynamic memory allocation
*
*/
#include <linux/zutil.h>
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#include "infutil.h"
int zlib_inflate_workspacesize(void)
{
return sizeof(struct inflate_workspace);
}
int zlib_inflateReset(z_streamp strm)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
strm->total_in = strm->total_out = state->total = 0;
strm->msg = NULL;
strm->adler = 1; /* to support ill-conceived Java test suite */
state->mode = HEAD;
state->last = 0;
state->havedict = 0;
state->dmax = 32768U;
state->hold = 0;
state->bits = 0;
state->lencode = state->distcode = state->next = state->codes;
/* Initialise Window */
state->wsize = 1U << state->wbits;
state->write = 0;
state->whave = 0;
return Z_OK;
}
#if 0
int zlib_inflatePrime(z_streamp strm, int bits, int value)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
value &= (1L << bits) - 1;
state->hold += value << state->bits;
state->bits += bits;
return Z_OK;
}
#endif
int zlib_inflateInit2(z_streamp strm, int windowBits)
{
struct inflate_state *state;
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL; /* in case we return an error */
state = &WS(strm)->inflate_state;
strm->state = (struct internal_state *)state;
if (windowBits < 0) {
state->wrap = 0;
windowBits = -windowBits;
}
else {
state->wrap = (windowBits >> 4) + 1;
}
if (windowBits < 8 || windowBits > 15) {
return Z_STREAM_ERROR;
}
state->wbits = (unsigned)windowBits;
state->window = &WS(strm)->working_window[0];
return zlib_inflateReset(strm);
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. This returns fixed tables from inffixed.h.
*/
static void zlib_fixedtables(struct inflate_state *state)
{
# include "inffixed.h"
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
/*
Update the window with the last wsize (normally 32K) bytes written before
returning. This is only called when a window is already in use, or when
output has been written during this inflate call, but the end of the deflate
stream has not been reached yet. It is also called to window dictionary data
when a dictionary is loaded.
Providing output buffers larger than 32K to inflate() should provide a speed
advantage, since only the last 32K of output is copied to the sliding window
upon return from inflate(), and since all distances after the first 32K of
output will fall in the output data, making match copies simpler and faster.
The advantage may be dependent on the size of the processor's data caches.
*/
static void zlib_updatewindow(z_streamp strm, unsigned out)
{
struct inflate_state *state;
unsigned copy, dist;
state = (struct inflate_state *)strm->state;
/* copy state->wsize or less output bytes into the circular window */
copy = out - strm->avail_out;
if (copy >= state->wsize) {
memcpy(state->window, strm->next_out - state->wsize, state->wsize);
state->write = 0;
state->whave = state->wsize;
}
else {
dist = state->wsize - state->write;
if (dist > copy) dist = copy;
memcpy(state->window + state->write, strm->next_out - copy, dist);
copy -= dist;
if (copy) {
memcpy(state->window, strm->next_out - copy, copy);
state->write = copy;
state->whave = state->wsize;
}
else {
state->write += dist;
if (state->write == state->wsize) state->write = 0;
if (state->whave < state->wsize) state->whave += dist;
}
}
}
/*
* At the end of a Deflate-compressed PPP packet, we expect to have seen
* a `stored' block type value but not the (zero) length bytes.
*/
/*
Returns true if inflate is currently at the end of a block generated by
Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
implementation to provide an additional safety check. PPP uses
Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
block. When decompressing, PPP checks that at the end of input packet,
inflate is waiting for these length bytes.
*/
static int zlib_inflateSyncPacket(z_streamp strm)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->mode == STORED && state->bits == 0) {
state->mode = TYPE;
return Z_OK;
}
return Z_DATA_ERROR;
}
/* Macros for inflate(): */
/* check function to use adler32() for zlib or crc32() for gzip */
#define UPDATE(check, buf, len) zlib_adler32(check, buf, len)
/* Load registers with state in inflate() for speed */
#define LOAD() \
do { \
put = strm->next_out; \
left = strm->avail_out; \
next = strm->next_in; \
have = strm->avail_in; \
hold = state->hold; \
bits = state->bits; \
} while (0)
/* Restore state from registers in inflate() */
#define RESTORE() \
do { \
strm->next_out = put; \
strm->avail_out = left; \
strm->next_in = next; \
strm->avail_in = have; \
state->hold = hold; \
state->bits = bits; \
} while (0)
/* Clear the input bit accumulator */
#define INITBITS() \
do { \
hold = 0; \
bits = 0; \
} while (0)
/* Get a byte of input into the bit accumulator, or return from inflate()
if there is no input available. */
#define PULLBYTE() \
do { \
if (have == 0) goto inf_leave; \
have--; \
hold += (unsigned long)(*next++) << bits; \
bits += 8; \
} while (0)
/* Assure that there are at least n bits in the bit accumulator. If there is
not enough available input to do that, then return from inflate(). */
#define NEEDBITS(n) \
do { \
while (bits < (unsigned)(n)) \
PULLBYTE(); \
} while (0)
/* Return the low n bits of the bit accumulator (n < 16) */
#define BITS(n) \
((unsigned)hold & ((1U << (n)) - 1))
/* Remove n bits from the bit accumulator */
#define DROPBITS(n) \
do { \
hold >>= (n); \
bits -= (unsigned)(n); \
} while (0)
/* Remove zero to seven bits as needed to go to a byte boundary */
#define BYTEBITS() \
do { \
hold >>= bits & 7; \
bits -= bits & 7; \
} while (0)
/* Reverse the bytes in a 32-bit value */
#define REVERSE(q) \
((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
(((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
/*
inflate() uses a state machine to process as much input data and generate as
much output data as possible before returning. The state machine is
structured roughly as follows:
for (;;) switch (state) {
...
case STATEn:
if (not enough input data or output space to make progress)
return;
... make progress ...
state = STATEm;
break;
...
}
so when inflate() is called again, the same case is attempted again, and
if the appropriate resources are provided, the machine proceeds to the
next state. The NEEDBITS() macro is usually the way the state evaluates
whether it can proceed or should return. NEEDBITS() does the return if
the requested bits are not available. The typical use of the BITS macros
is:
NEEDBITS(n);
... do something with BITS(n) ...
DROPBITS(n);
where NEEDBITS(n) either returns from inflate() if there isn't enough
input left to load n bits into the accumulator, or it continues. BITS(n)
gives the low n bits in the accumulator. When done, DROPBITS(n) drops
the low n bits off the accumulator. INITBITS() clears the accumulator
and sets the number of available bits to zero. BYTEBITS() discards just
enough bits to put the accumulator on a byte boundary. After BYTEBITS()
and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
if there is no input available. The decoding of variable length codes uses
PULLBYTE() directly in order to pull just enough bytes to decode the next
code, and no more.
Some states loop until they get enough input, making sure that enough
state information is maintained to continue the loop where it left off
if NEEDBITS() returns in the loop. For example, want, need, and keep
would all have to actually be part of the saved state in case NEEDBITS()
returns:
case STATEw:
while (want < need) {
NEEDBITS(n);
keep[want++] = BITS(n);
DROPBITS(n);
}
state = STATEx;
case STATEx:
As shown above, if the next state is also the next case, then the break
is omitted.
A state may also return if there is not enough output space available to
complete that state. Those states are copying stored data, writing a
literal byte, and copying a matching string.
When returning, a "goto inf_leave" is used to update the total counters,
update the check value, and determine whether any progress has been made
during that inflate() call in order to return the proper return code.
Progress is defined as a change in either strm->avail_in or strm->avail_out.
When there is a window, goto inf_leave will update the window with the last
output written. If a goto inf_leave occurs in the middle of decompression
and there is no window currently, goto inf_leave will create one and copy
output to the window for the next call of inflate().
In this implementation, the flush parameter of inflate() only affects the
return code (per zlib.h). inflate() always writes as much as possible to
strm->next_out, given the space available and the provided input--the effect
documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
the allocation of and copying into a sliding window until necessary, which
provides the effect documented in zlib.h for Z_FINISH when the entire input
stream available. So the only thing the flush parameter actually does is:
when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
will return Z_BUF_ERROR if it has not reached the end of the stream.
*/
int zlib_inflate(z_streamp strm, int flush)
{
struct inflate_state *state;
unsigned char *next; /* next input */
unsigned char *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
unsigned bits; /* bits in bit buffer */
unsigned in, out; /* save starting available input and output */
unsigned copy; /* number of stored or match bytes to copy */
unsigned char *from; /* where to copy match bytes from */
code this; /* current decoding table entry */
code last; /* parent table entry */
unsigned len; /* length to copy for repeats, bits to drop */
int ret; /* return code */
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
/* Do not check for strm->next_out == NULL here as ppc zImage
inflates to strm->next_out = 0 */
if (strm == NULL || strm->state == NULL ||
(strm->next_in == NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
LOAD();
in = have;
out = left;
ret = Z_OK;
for (;;)
switch (state->mode) {
case HEAD:
if (state->wrap == 0) {
state->mode = TYPEDO;
break;
}
NEEDBITS(16);
if (
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
DROPBITS(4);
len = BITS(4) + 8;
if (len > state->wbits) {
strm->msg = (char *)"invalid window size";
state->mode = BAD;
break;
}
state->dmax = 1U << len;
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
INITBITS();
break;
case DICTID:
NEEDBITS(32);
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
case DICT:
if (state->havedict == 0) {
RESTORE();
return Z_NEED_DICT;
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
case TYPEDO:
if (state->last) {
BYTEBITS();
state->mode = CHECK;
break;
}
NEEDBITS(3);
state->last = BITS(1);
DROPBITS(1);
switch (BITS(2)) {
case 0: /* stored block */
state->mode = STORED;
break;
case 1: /* fixed block */
zlib_fixedtables(state);
state->mode = LEN; /* decode codes */
break;
case 2: /* dynamic block */
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
break;
case STORED:
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
case COPY:
copy = state->length;
if (copy) {
if (copy > have) copy = have;
if (copy > left) copy = left;
if (copy == 0) goto inf_leave;
memcpy(put, next, copy);
have -= copy;
next += copy;
left -= copy;
put += copy;
state->length -= copy;
break;
}
state->mode = TYPE;
break;
case TABLE:
NEEDBITS(14);
state->nlen = BITS(5) + 257;
DROPBITS(5);
state->ndist = BITS(5) + 1;
DROPBITS(5);
state->ncode = BITS(4) + 4;
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
state->mode = BAD;
break;
}
#endif
state->have = 0;
state->mode = LENLENS;
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
state->lens[order[state->have++]] = (unsigned short)BITS(3);
DROPBITS(3);
}
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (code const *)(state->next);
state->lenbits = 7;
ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
state->mode = BAD;
break;
}
state->have = 0;
state->mode = CODELENS;
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
this = state->lencode[BITS(state->lenbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if (this.val < 16) {
NEEDBITS(this.bits);
DROPBITS(this.bits);
state->lens[state->have++] = this.val;
}
else {
if (this.val == 16) {
NEEDBITS(this.bits + 2);
DROPBITS(this.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
len = state->lens[state->have - 1];
copy = 3 + BITS(2);
DROPBITS(2);
}
else if (this.val == 17) {
NEEDBITS(this.bits + 3);
DROPBITS(this.bits);
len = 0;
copy = 3 + BITS(3);
DROPBITS(3);
}
else {
NEEDBITS(this.bits + 7);
DROPBITS(this.bits);
len = 0;
copy = 11 + BITS(7);
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
while (copy--)
state->lens[state->have++] = (unsigned short)len;
}
}
/* handle error breaks in while */
if (state->mode == BAD) break;
/* build code tables */
state->next = state->codes;
state->lencode = (code const *)(state->next);
state->lenbits = 9;
ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
state->distcode = (code const *)(state->next);
state->distbits = 6;
ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
state->mode = BAD;
break;
}
state->mode = LEN;
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
inflate_fast(strm, out);
LOAD();
break;
}
for (;;) {
this = state->lencode[BITS(state->lenbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if (this.op && (this.op & 0xf0) == 0) {
last = this;
for (;;) {
this = state->lencode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + this.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
}
DROPBITS(this.bits);
state->length = (unsigned)this.val;
if ((int)(this.op) == 0) {
state->mode = LIT;
break;
}
if (this.op & 32) {
state->mode = TYPE;
break;
}
if (this.op & 64) {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->length += BITS(state->extra);
DROPBITS(state->extra);
}
state->mode = DIST;
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if ((this.op & 0xf0) == 0) {
last = this;
for (;;) {
this = state->distcode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + this.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
}
DROPBITS(this.bits);
if (this.op & 64) {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->offset += BITS(state->extra);
DROPBITS(state->extra);
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
if (state->offset > state->whave + out - left) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
state->mode = MATCH;
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
if (state->offset > copy) { /* copy from window */
copy = state->offset - copy;
if (copy > state->write) {
copy -= state->write;
from = state->window + (state->wsize - copy);
}
else
from = state->window + (state->write - copy);
if (copy > state->length) copy = state->length;
}
else { /* copy from output */
from = put - state->offset;
copy = state->length;
}
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = *from++;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:
if (left == 0) goto inf_leave;
*put++ = (unsigned char)(state->length);
left--;
state->mode = LEN;
break;
case CHECK:
if (state->wrap) {
NEEDBITS(32);
out -= left;
strm->total_out += out;
state->total += out;
if (out)
strm->adler = state->check =
UPDATE(state->check, put - out, out);
out = left;
if ((
REVERSE(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
state->mode = BAD;
break;
}
INITBITS();
}
state->mode = DONE;
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
default:
return Z_STREAM_ERROR;
}
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call zlib_updatewindow() to create and/or update the window state.
*/
inf_leave:
RESTORE();
if (state->wsize || (state->mode < CHECK && out != strm->avail_out))
zlib_updatewindow(strm, out);
in -= strm->avail_in;
out -= strm->avail_out;
strm->total_in += in;
strm->total_out += out;
state->total += out;
if (state->wrap && out)
strm->adler = state->check =
UPDATE(state->check, strm->next_out - out, out);
strm->data_type = state->bits + (state->last ? 64 : 0) +
(state->mode == TYPE ? 128 : 0);
if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
strm->avail_out != 0 && strm->avail_in == 0)
return zlib_inflateSyncPacket(strm);
if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
ret = Z_BUF_ERROR;
return ret;
}
int zlib_inflateEnd(z_streamp strm)
{
if (strm == NULL || strm->state == NULL)
return Z_STREAM_ERROR;
return Z_OK;
}
#if 0
int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary,
uInt dictLength)
{
struct inflate_state *state;
unsigned long id;
/* check state */
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->wrap != 0 && state->mode != DICT)
return Z_STREAM_ERROR;
/* check for correct dictionary id */
if (state->mode == DICT) {
id = zlib_adler32(0L, NULL, 0);
id = zlib_adler32(id, dictionary, dictLength);
if (id != state->check)
return Z_DATA_ERROR;
}
/* copy dictionary to window */
zlib_updatewindow(strm, strm->avail_out);
if (dictLength > state->wsize) {
memcpy(state->window, dictionary + dictLength - state->wsize,
state->wsize);
state->whave = state->wsize;
}
else {
memcpy(state->window + state->wsize - dictLength, dictionary,
dictLength);
state->whave = dictLength;
}
state->havedict = 1;
return Z_OK;
}
#endif
#if 0
/*
Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
or when out of input. When called, *have is the number of pattern bytes
found in order so far, in 0..3. On return *have is updated to the new
state. If on return *have equals four, then the pattern was found and the
return value is how many bytes were read including the last byte of the
pattern. If *have is less than four, then the pattern has not been found
yet and the return value is len. In the latter case, zlib_syncsearch() can be
called again with more data and the *have state. *have is initialized to
zero for the first call.
*/
static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf,
unsigned len)
{
unsigned got;
unsigned next;
got = *have;
next = 0;
while (next < len && got < 4) {
if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
got++;
else if (buf[next])
got = 0;
else
got = 4 - got;
next++;
}
*have = got;
return next;
}
#endif
#if 0
int zlib_inflateSync(z_streamp strm)
{
unsigned len; /* number of bytes to look at or looked at */
unsigned long in, out; /* temporary to save total_in and total_out */
unsigned char buf[4]; /* to restore bit buffer to byte string */
struct inflate_state *state;
/* check parameters */
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
/* if first time, start search in bit buffer */
if (state->mode != SYNC) {
state->mode = SYNC;
state->hold <<= state->bits & 7;
state->bits -= state->bits & 7;
len = 0;
while (state->bits >= 8) {
buf[len++] = (unsigned char)(state->hold);
state->hold >>= 8;
state->bits -= 8;
}
state->have = 0;
zlib_syncsearch(&(state->have), buf, len);
}
/* search available input */
len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in);
strm->avail_in -= len;
strm->next_in += len;
strm->total_in += len;
/* return no joy or set up to restart inflate() on a new block */
if (state->have != 4) return Z_DATA_ERROR;
in = strm->total_in; out = strm->total_out;
zlib_inflateReset(strm);
strm->total_in = in; strm->total_out = out;
state->mode = TYPE;
return Z_OK;
}
#endif
/*
* This subroutine adds the data at next_in/avail_in to the output history
* without performing any output. The output buffer must be "caught up";
* i.e. no pending output but this should always be the case. The state must
* be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit,
* the output will also be caught up, and the checksum will have been updated
* if need be.
*/
int zlib_inflateIncomp(z_stream *z)
{
struct inflate_state *state = (struct inflate_state *)z->state;
Byte *saved_no = z->next_out;
uInt saved_ao = z->avail_out;
if (state->mode != TYPE && state->mode != HEAD)
return Z_DATA_ERROR;
/* Setup some variables to allow misuse of updateWindow */
z->avail_out = 0;
z->next_out = z->next_in + z->avail_in;
zlib_updatewindow(z, z->avail_in);
/* Restore saved variables */
z->avail_out = saved_ao;
z->next_out = saved_no;
z->adler = state->check =
UPDATE(state->check, z->next_in, z->avail_in);
z->total_out += z->avail_in;
z->total_in += z->avail_in;
z->next_in += z->avail_in;
state->total += z->avail_in;
z->avail_in = 0;
return Z_OK;
}

107
lib/zlib_inflate/inflate.h Normal file
View File

@@ -0,0 +1,107 @@
/* inflate.h -- internal inflate state definition
* Copyright (C) 1995-2004 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
part of the implementation of the compression library and is
subject to change. Applications should only use zlib.h.
*/
/* Possible inflate modes between inflate() calls */
typedef enum {
HEAD, /* i: waiting for magic header */
FLAGS, /* i: waiting for method and flags (gzip) */
TIME, /* i: waiting for modification time (gzip) */
OS, /* i: waiting for extra flags and operating system (gzip) */
EXLEN, /* i: waiting for extra length (gzip) */
EXTRA, /* i: waiting for extra bytes (gzip) */
NAME, /* i: waiting for end of file name (gzip) */
COMMENT, /* i: waiting for end of comment (gzip) */
HCRC, /* i: waiting for header crc (gzip) */
DICTID, /* i: waiting for dictionary check value */
DICT, /* waiting for inflateSetDictionary() call */
TYPE, /* i: waiting for type bits, including last-flag bit */
TYPEDO, /* i: same, but skip check to exit inflate on new block */
STORED, /* i: waiting for stored size (length and complement) */
COPY, /* i/o: waiting for input or output to copy stored block */
TABLE, /* i: waiting for dynamic block table lengths */
LENLENS, /* i: waiting for code length code lengths */
CODELENS, /* i: waiting for length/lit and distance code lengths */
LEN, /* i: waiting for length/lit code */
LENEXT, /* i: waiting for length extra bits */
DIST, /* i: waiting for distance code */
DISTEXT, /* i: waiting for distance extra bits */
MATCH, /* o: waiting for output space to copy string */
LIT, /* o: waiting for output space to write literal */
CHECK, /* i: waiting for 32-bit check value */
LENGTH, /* i: waiting for 32-bit length (gzip) */
DONE, /* finished check, done -- remain here until reset */
BAD, /* got a data error -- remain here until reset */
MEM, /* got an inflate() memory error -- remain here until reset */
SYNC /* looking for synchronization bytes to restart inflate() */
} inflate_mode;
/*
State transitions between above modes -
(most modes can go to the BAD or MEM mode -- not shown for clarity)
Process header:
HEAD -> (gzip) or (zlib)
(gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME
NAME -> COMMENT -> HCRC -> TYPE
(zlib) -> DICTID or TYPE
DICTID -> DICT -> TYPE
Read deflate blocks:
TYPE -> STORED or TABLE or LEN or CHECK
STORED -> COPY -> TYPE
TABLE -> LENLENS -> CODELENS -> LEN
Read deflate codes:
LEN -> LENEXT or LIT or TYPE
LENEXT -> DIST -> DISTEXT -> MATCH -> LEN
LIT -> LEN
Process trailer:
CHECK -> LENGTH -> DONE
*/
/* state maintained between inflate() calls. Approximately 7K bytes. */
struct inflate_state {
inflate_mode mode; /* current inflate mode */
int last; /* true if processing last block */
int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
int havedict; /* true if dictionary provided */
int flags; /* gzip header method and flags (0 if zlib) */
unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
unsigned long check; /* protected copy of check value */
unsigned long total; /* protected copy of output count */
/* gz_headerp head; */ /* where to save gzip header information */
/* sliding window */
unsigned wbits; /* log base 2 of requested window size */
unsigned wsize; /* window size or zero if not using window */
unsigned whave; /* valid bytes in the window */
unsigned write; /* window write index */
unsigned char *window; /* allocated sliding window, if needed */
/* bit accumulator */
unsigned long hold; /* input bit accumulator */
unsigned bits; /* number of bits in "in" */
/* for string and stored block copying */
unsigned length; /* literal or length of data to copy */
unsigned offset; /* distance back to copy string from */
/* for table and code decoding */
unsigned extra; /* extra bits needed */
/* fixed and dynamic code tables */
code const *lencode; /* starting table for length/literal codes */
code const *distcode; /* starting table for distance codes */
unsigned lenbits; /* index bits for lencode */
unsigned distbits; /* index bits for distcode */
/* dynamic table building */
unsigned ncode; /* number of code length code lengths */
unsigned nlen; /* number of length code lengths */
unsigned ndist; /* number of distance code lengths */
unsigned have; /* number of code lengths in lens[] */
code *next; /* next available space in codes[] */
unsigned short lens[320]; /* temporary storage for code lengths */
unsigned short work[288]; /* work area for code table building */
code codes[ENOUGH]; /* space for code tables */
};

View File

@@ -0,0 +1,19 @@
/*
* linux/lib/zlib_inflate/inflate_syms.c
*
* Exported symbols for the inflate functionality.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/zlib.h>
EXPORT_SYMBOL(zlib_inflate_workspacesize);
EXPORT_SYMBOL(zlib_inflate);
EXPORT_SYMBOL(zlib_inflateInit2);
EXPORT_SYMBOL(zlib_inflateEnd);
EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateIncomp);
MODULE_LICENSE("GPL");

315
lib/zlib_inflate/inftrees.c Normal file
View File

@@ -0,0 +1,315 @@
/* inftrees.c -- generate Huffman trees for efficient decoding
* Copyright (C) 1995-2005 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include <linux/zutil.h>
#include "inftrees.h"
#define MAXBITS 15
/*
Build a set of tables to decode the provided canonical Huffman code.
The code lengths are lens[0..codes-1]. The result starts at *table,
whose indices are 0..2^bits-1. work is a writable array of at least
lens shorts, which is used as a work area. type is the type of code
to be generated, CODES, LENS, or DISTS. On return, zero is success,
-1 is an invalid code, and +1 means that ENOUGH isn't enough. table
on return points to the next available entry's address. bits is the
requested root table index bits, and on return it is the actual root
table index bits. It will differ if the request is greater than the
longest code or if it is less than the shortest code.
*/
int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
code **table, unsigned *bits, unsigned short *work)
{
unsigned len; /* a code's length in bits */
unsigned sym; /* index of code symbols */
unsigned min, max; /* minimum and maximum code lengths */
unsigned root; /* number of index bits for root table */
unsigned curr; /* number of index bits for current table */
unsigned drop; /* code bits to drop for sub-table */
int left; /* number of prefix codes available */
unsigned used; /* code entries in table used */
unsigned huff; /* Huffman code */
unsigned incr; /* for incrementing code, index */
unsigned fill; /* index for replicating entries */
unsigned low; /* low bits for current root entry */
unsigned mask; /* mask for low root bits */
code this; /* table entry for duplication */
code *next; /* next available space in table */
const unsigned short *base; /* base value table to use */
const unsigned short *extra; /* extra bits table to use */
int end; /* use base and extra for symbol > end */
unsigned short count[MAXBITS+1]; /* number of codes of each length */
unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
static const unsigned short lbase[31] = { /* Length codes 257..285 base */
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const unsigned short lext[31] = { /* Length codes 257..285 extra */
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196};
static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577, 0, 0};
static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
28, 28, 29, 29, 64, 64};
/*
Process a set of code lengths to create a canonical Huffman code. The
code lengths are lens[0..codes-1]. Each length corresponds to the
symbols 0..codes-1. The Huffman code is generated by first sorting the
symbols by length from short to long, and retaining the symbol order
for codes with equal lengths. Then the code starts with all zero bits
for the first code of the shortest length, and the codes are integer
increments for the same length, and zeros are appended as the length
increases. For the deflate format, these bits are stored backwards
from their more natural integer increment ordering, and so when the
decoding tables are built in the large loop below, the integer codes
are incremented backwards.
This routine assumes, but does not check, that all of the entries in
lens[] are in the range 0..MAXBITS. The caller must assure this.
1..MAXBITS is interpreted as that code length. zero means that that
symbol does not occur in this code.
The codes are sorted by computing a count of codes for each length,
creating from that a table of starting indices for each length in the
sorted table, and then entering the symbols in order in the sorted
table. The sorted table is work[], with that space being provided by
the caller.
The length counts are used for other purposes as well, i.e. finding
the minimum and maximum length codes, determining if there are any
codes at all, checking for a valid set of lengths, and looking ahead
at length counts to determine sub-table sizes when building the
decoding tables.
*/
/* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
for (len = 0; len <= MAXBITS; len++)
count[len] = 0;
for (sym = 0; sym < codes; sym++)
count[lens[sym]]++;
/* bound code lengths, force root to be within code lengths */
root = *bits;
for (max = MAXBITS; max >= 1; max--)
if (count[max] != 0) break;
if (root > max) root = max;
if (max == 0) { /* no symbols to code at all */
this.op = (unsigned char)64; /* invalid code marker */
this.bits = (unsigned char)1;
this.val = (unsigned short)0;
*(*table)++ = this; /* make a table to force an error */
*(*table)++ = this;
*bits = 1;
return 0; /* no symbols, but wait for decoding to report error */
}
for (min = 1; min <= MAXBITS; min++)
if (count[min] != 0) break;
if (root < min) root = min;
/* check for an over-subscribed or incomplete set of lengths */
left = 1;
for (len = 1; len <= MAXBITS; len++) {
left <<= 1;
left -= count[len];
if (left < 0) return -1; /* over-subscribed */
}
if (left > 0 && (type == CODES || max != 1))
return -1; /* incomplete set */
/* generate offsets into symbol table for each length for sorting */
offs[1] = 0;
for (len = 1; len < MAXBITS; len++)
offs[len + 1] = offs[len] + count[len];
/* sort symbols by length, by symbol order within each length */
for (sym = 0; sym < codes; sym++)
if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
/*
Create and fill in decoding tables. In this loop, the table being
filled is at next and has curr index bits. The code being used is huff
with length len. That code is converted to an index by dropping drop
bits off of the bottom. For codes where len is less than drop + curr,
those top drop + curr - len bits are incremented through all values to
fill the table with replicated entries.
root is the number of index bits for the root table. When len exceeds
root, sub-tables are created pointed to by the root entry with an index
of the low root bits of huff. This is saved in low to check for when a
new sub-table should be started. drop is zero when the root table is
being filled, and drop is root when sub-tables are being filled.
When a new sub-table is needed, it is necessary to look ahead in the
code lengths to determine what size sub-table is needed. The length
counts are used for this, and so count[] is decremented as codes are
entered in the tables.
used keeps track of how many table entries have been allocated from the
provided *table space. It is checked when a LENS table is being made
against the space in *table, ENOUGH, minus the maximum space needed by
the worst case distance code, MAXD. This should never happen, but the
sufficiency of ENOUGH has not been proven exhaustively, hence the check.
This assumes that when type == LENS, bits == 9.
sym increments through all symbols, and the loop terminates when
all codes of length max, i.e. all codes, have been processed. This
routine permits incomplete codes, so another loop after this one fills
in the rest of the decoding tables with invalid code markers.
*/
/* set up for code type */
switch (type) {
case CODES:
base = extra = work; /* dummy value--not used */
end = 19;
break;
case LENS:
base = lbase;
base -= 257;
extra = lext;
extra -= 257;
end = 256;
break;
default: /* DISTS */
base = dbase;
extra = dext;
end = -1;
}
/* initialize state for loop */
huff = 0; /* starting code */
sym = 0; /* starting code symbol */
len = min; /* starting code length */
next = *table; /* current table to fill in */
curr = root; /* current table index bits */
drop = 0; /* current bits to drop from code for index */
low = (unsigned)(-1); /* trigger new sub-table when len > root */
used = 1U << root; /* use root table entries */
mask = used - 1; /* mask for comparing low */
/* check available table space */
if (type == LENS && used >= ENOUGH - MAXD)
return 1;
/* process all codes and make table entries */
for (;;) {
/* create table entry */
this.bits = (unsigned char)(len - drop);
if ((int)(work[sym]) < end) {
this.op = (unsigned char)0;
this.val = work[sym];
}
else if ((int)(work[sym]) > end) {
this.op = (unsigned char)(extra[work[sym]]);
this.val = base[work[sym]];
}
else {
this.op = (unsigned char)(32 + 64); /* end of block */
this.val = 0;
}
/* replicate for those indices with low len bits equal to huff */
incr = 1U << (len - drop);
fill = 1U << curr;
min = fill; /* save offset to next table */
do {
fill -= incr;
next[(huff >> drop) + fill] = this;
} while (fill != 0);
/* backwards increment the len-bit code huff */
incr = 1U << (len - 1);
while (huff & incr)
incr >>= 1;
if (incr != 0) {
huff &= incr - 1;
huff += incr;
}
else
huff = 0;
/* go to next symbol, update count, len */
sym++;
if (--(count[len]) == 0) {
if (len == max) break;
len = lens[work[sym]];
}
/* create new sub-table if needed */
if (len > root && (huff & mask) != low) {
/* if first time, transition to sub-tables */
if (drop == 0)
drop = root;
/* increment past last table */
next += min; /* here min is 1 << curr */
/* determine length of next table */
curr = len - drop;
left = (int)(1 << curr);
while (curr + drop < max) {
left -= count[curr + drop];
if (left <= 0) break;
curr++;
left <<= 1;
}
/* check for enough space */
used += 1U << curr;
if (type == LENS && used >= ENOUGH - MAXD)
return 1;
/* point entry in root table to sub-table */
low = huff & mask;
(*table)[low].op = (unsigned char)curr;
(*table)[low].bits = (unsigned char)root;
(*table)[low].val = (unsigned short)(next - *table);
}
}
/*
Fill in rest of table for incomplete codes. This loop is similar to the
loop above in incrementing huff for table indices. It is assumed that
len is equal to curr + drop, so there is no loop needed to increment
through high index bits. When the current sub-table is filled, the loop
drops back to the root table to fill in any remaining entries there.
*/
this.op = (unsigned char)64; /* invalid code marker */
this.bits = (unsigned char)(len - drop);
this.val = (unsigned short)0;
while (huff != 0) {
/* when done with sub-table, drop back to root table */
if (drop != 0 && (huff & mask) != low) {
drop = 0;
len = root;
next = *table;
this.bits = (unsigned char)len;
}
/* put invalid code marker in table */
next[huff >> drop] = this;
/* backwards increment the len-bit code huff */
incr = 1U << (len - 1);
while (huff & incr)
incr >>= 1;
if (incr != 0) {
huff &= incr - 1;
huff += incr;
}
else
huff = 0;
}
/* set return parameters */
*table += used;
*bits = root;
return 0;
}

View File

@@ -0,0 +1,55 @@
/* inftrees.h -- header to use inftrees.c
* Copyright (C) 1995-2005 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
part of the implementation of the compression library and is
subject to change. Applications should only use zlib.h.
*/
/* Structure for decoding tables. Each entry provides either the
information needed to do the operation requested by the code that
indexed that table entry, or it provides a pointer to another
table that indexes more bits of the code. op indicates whether
the entry is a pointer to another table, a literal, a length or
distance, an end-of-block, or an invalid code. For a table
pointer, the low four bits of op is the number of index bits of
that table. For a length or distance, the low four bits of op
is the number of extra bits to get after the code. bits is
the number of bits in this code or part of the code to drop off
of the bit buffer. val is the actual byte to output in the case
of a literal, the base length or distance, or the offset from
the current table to the next table. Each entry is four bytes. */
typedef struct {
unsigned char op; /* operation, extra bits, table bits */
unsigned char bits; /* bits in this part of the code */
unsigned short val; /* offset in table or code value */
} code;
/* op values as set by inflate_table():
00000000 - literal
0000tttt - table link, tttt != 0 is the number of table index bits
0001eeee - length or distance, eeee is the number of extra bits
01100000 - end of block
01000000 - invalid code
*/
/* Maximum size of dynamic tree. The maximum found in a long but non-
exhaustive search was 1444 code structures (852 for length/literals
and 592 for distances, the latter actually the result of an
exhaustive search). The true maximum is not known, but the value
below is more than safe. */
#define ENOUGH 2048
#define MAXD 592
/* Type of code to build for inftable() */
typedef enum {
CODES,
LENS,
DISTS
} codetype;
extern int zlib_inflate_table (codetype type, unsigned short *lens,
unsigned codes, code **table,
unsigned *bits, unsigned short *work);

View File

@@ -0,0 +1,25 @@
/* infutil.h -- types and macros common to blocks and codes
* Copyright (C) 1995-1998 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
part of the implementation of the compression library and is
subject to change. Applications should only use zlib.h.
*/
#ifndef _INFUTIL_H
#define _INFUTIL_H
#include <linux/zlib.h>
/* memory allocation for inflation */
struct inflate_workspace {
struct inflate_state inflate_state;
unsigned char working_window[1 << MAX_WBITS];
};
#define WS(z) ((struct inflate_workspace *)(z->workspace))
#endif