r640 - trunk/linux

jim at linuxfromscratch.org jim at linuxfromscratch.org
Mon Sep 13 19:26:55 PDT 2004


Author: jim
Date: 2004-09-13 20:26:53 -0600 (Mon, 13 Sep 2004)
New Revision: 640

Added:
   trunk/linux/linux-2.6.8.1-frandom-1.patch
Log:
Added: linux-2.6.8.1-frandom-1.patch

Added: trunk/linux/linux-2.6.8.1-frandom-1.patch
===================================================================
--- trunk/linux/linux-2.6.8.1-frandom-1.patch	2004-09-14 02:26:24 UTC (rev 639)
+++ trunk/linux/linux-2.6.8.1-frandom-1.patch	2004-09-14 02:26:53 UTC (rev 640)
@@ -0,0 +1,3871 @@
+Submitted By: Robert Connolly <robert at linuxfromscratch dot org> (ashes)
+Date: 2004-09-11
+Initial Package Version: 2.6.8.1
+Origin: http://frandom.sourceforge.net/ - frandom-0.8
+Description: This is usefull for smashing stack protector, wiping
+discs, mktemp, and more.
+
+Thanks to Eli Billauer
+http://www.billauer.co.il/
+http://frandom.sourceforge.net/
+
+Also see:
+http://www.linuxfromscratch.org/hlfs/
+http://www.linuxfromscratch.org/hints/downloads/files/entropy.txt
+http://www.linuxfromscratch.org/~robert/hlfs/hints/attachments/entropy/\
+        frandom-0.8.tar.gz
+
+diff -Naur linux-2.6.8.1.orig/drivers/char/Kconfig linux-2.6.8.1.frandom/drivers/char/Kconfig
+--- linux-2.6.8.1.orig/drivers/char/Kconfig	2004-08-14 10:54:47.000000000 +0000
++++ linux-2.6.8.1.frandom/drivers/char/Kconfig	2004-09-11 05:02:11.000000000 +0000
+@@ -57,6 +57,19 @@
+ 
+ 	  If unsure, say Y.
+ 
++config FRANDOM
++	tristate "Fast random data generator suite (/dev/frandom and /dev/erandom)"
++	default y
++	---help---
++	Fast random data/number generator support in kernel. This random
++	generator is 10-50 times faster than /dev/urandom, and saves kernel
++	entropy.
++
++	If unsure, say Y unless you're tight on kernel size. This module is
++	small and harmless otherwise.
++
++	If you choose M, the sysctl interface will be disabled.
++
+ config HW_CONSOLE
+ 	bool
+ 	depends on VT && !S390 && !UM
+diff -Naur linux-2.6.8.1.orig/drivers/char/Makefile linux-2.6.8.1.frandom/drivers/char/Makefile
+--- linux-2.6.8.1.orig/drivers/char/Makefile	2004-08-14 10:56:22.000000000 +0000
++++ linux-2.6.8.1.frandom/drivers/char/Makefile	2004-09-11 05:02:11.000000000 +0000
+@@ -82,6 +82,8 @@
+ obj-$(CONFIG_PCMCIA) += pcmcia/
+ obj-$(CONFIG_IPMI_HANDLER) += ipmi/
+ 
++obj-$(CONFIG_FRANDOM) += frandom.o
++
+ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
+ 
+ # Files generated that shall be removed upon make clean
+diff -Naur linux-2.6.8.1.orig/drivers/char/frandom.c linux-2.6.8.1.frandom/drivers/char/frandom.c
+--- linux-2.6.8.1.orig/drivers/char/frandom.c	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.8.1.frandom/drivers/char/frandom.c	2004-09-11 05:02:11.000000000 +0000
+@@ -0,0 +1,362 @@
++/*
++** frandom.c
++**      Fast pseudo-random generator 
++**
++**      (c) Copyright 2003 Eli Billauer
++**      http://www.billauer.co.il
++**
++** This program is free software; you can redistribute it and/or modify
++** it under the terms of the GNU General Public License as published by
++** the Free Software Foundation; either version 2 of the License, or
++** (at your option) any later version.
++**
++** Usage: mknod /dev/frandom c 235 11
++**        mknod /dev/erandom c 235 12
++**        insmod frandom
++**
++** This code is highly based upon the examples given in the book "Linux
++** Device Drivers" by Alessandro Rubini and Jonathan Corbet, published
++** by O'Reilly & Associates.
++** O'Reilly's release of this book on the web for free is highly
++** appreciated.
++**
++*/
++
++#include <linux/version.h>
++#include <linux/config.h>
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/slab.h> 
++#include <linux/fs.h> 
++#include <linux/errno.h>
++#include <linux/types.h> 
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++
++#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++#endif
++
++#define INTERNAL_SEED 0
++#define EXTERNAL_SEED 1
++
++#define FRANDOM_MAJOR 235
++#define FRANDOM_MINOR 11 
++#define ERANDOM_MINOR 12 
++
++static struct file_operations frandom_fops; /* Values assigned below */
++
++static int erandom_seeded = 0; /* Internal flag */
++
++static int frandom_major = FRANDOM_MAJOR;
++static int frandom_minor = FRANDOM_MINOR;
++static int erandom_minor = ERANDOM_MINOR;
++static int frandom_bufsize = 256;
++static int frandom_chunklimit = 0; /* =0 means unlimited */
++
++MODULE_DESCRIPTION("Fast pseudo-random number generator");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eli Billauer");
++MODULE_PARM(frandom_major,"i");
++MODULE_PARM_DESC(frandom_major,"Major number of /dev/frandom and /dev/erandom");
++MODULE_PARM(frandom_minor,"i");
++MODULE_PARM_DESC(frandom_minor,"Minor number of /dev/frandom");
++MODULE_PARM(erandom_minor,"i");
++MODULE_PARM_DESC(erandom_minor,"Minor number of /dev/erandom");
++MODULE_PARM(frandom_bufsize,"i");
++MODULE_PARM_DESC(frandom_bufsize,"Internal buffer size in bytes. Default is 256. Must be >= 256");
++MODULE_PARM(frandom_chunklimit,"i");
++MODULE_PARM_DESC(frandom_chunklimit,"Limit for read() blocks size. 0 (default) is unlimited, otherwise must be >= 256");
++
++struct frandom_state
++{
++	struct semaphore sem; /* Semaphore on the state structure */
++
++	u8 S[256]; /* The state array */
++	u8 i;        
++	u8 j;
++
++	char *buf;
++};
++
++static struct frandom_state *erandom_state;
++
++static inline void swap_byte(u8 *a, u8 *b)
++{
++	u8 swapByte; 
++  
++	swapByte = *a; 
++	*a = *b;      
++	*b = swapByte;
++}
++
++static void init_rand_state(struct frandom_state *state, int seedflag);
++
++void erandom_get_random_bytes(char *buf, size_t count)
++{
++	struct frandom_state *state = erandom_state;
++	int k;
++
++	unsigned int i;
++	unsigned int j;
++	u8 *S;
++  
++	/* If we fail to get the semaphore, we revert to external random data.
++	   Since semaphore blocking is expected to be very rare, and interrupts
++	   during these rare and very short periods of time even less frequent,
++	   we take the better-safe-than-sorry approach, and fill the buffer
++	   some expensive random data, in case the caller wasn't aware of this
++	   possibility, and expects random data anyhow.
++	*/
++
++	if (down_interruptible(&state->sem)) {
++		get_random_bytes(buf, count);
++		return;
++	}
++
++	/* We seed erandom as late as possible, hoping that the kernel's main
++	   RNG is already restored in the boot sequence (not critical, but
++	   better.
++	*/
++	
++	if (!erandom_seeded) {
++		erandom_seeded = 1;
++		init_rand_state(state, EXTERNAL_SEED);
++		printk(KERN_INFO "frandom: Seeded global generator now (used by erandom)\n");
++	}
++
++	i = state->i;     
++	j = state->j;
++	S = state->S;  
++
++	for (k=0; k<count; k++) {
++		i = (i + 1) & 0xff;
++		j = (j + S[i]) & 0xff;
++		swap_byte(&S[i], &S[j]);
++		*buf++ = S[(S[i] + S[j]) & 0xff];
++	}
++ 
++	state->i = i;     
++	state->j = j;
++
++	up(&state->sem);
++}
++
++static void init_rand_state(struct frandom_state *state, int seedflag)
++{
++	unsigned int i, j, k;
++	u8 *S;
++	u8 *seed = state->buf;
++
++	if (seedflag == INTERNAL_SEED)
++		erandom_get_random_bytes(seed, 256);
++	else
++		get_random_bytes(seed, 256);
++
++	S = state->S;
++	for (i=0; i<256; i++)
++		*S++=i;
++
++	j=0;
++	S = state->S;
++
++	for (i=0; i<256; i++) {
++		j = (j + S[i] + *seed++) & 0xff;
++		swap_byte(&S[i], &S[j]);
++	}
++
++	/* It's considered good practice to discard the first 256 bytes
++	   generated. So we do it:
++	*/
++
++	i=0; j=0;
++	for (k=0; k<256; k++) {
++		i = (i + 1) & 0xff;
++		j = (j + S[i]) & 0xff;
++		swap_byte(&S[i], &S[j]);
++	}
++
++	state->i = i; /* Save state */
++	state->j = j;
++}
++
++static int frandom_open(struct inode *inode, struct file *filp)
++{
++  
++	struct frandom_state *state;
++
++	int num =MINOR(inode->i_rdev);
++	if ((num != frandom_minor) && (num != erandom_minor)) return -ENODEV;
++  
++	state = kmalloc(sizeof(struct frandom_state), GFP_KERNEL);
++	if (!state)
++		return -ENOMEM;
++
++	state->buf = kmalloc(frandom_bufsize, GFP_KERNEL);
++	if (!state->buf) {
++		kfree(state);
++		return -ENOMEM;
++	}
++
++	sema_init(&state->sem, 1); /* Init semaphore as a mutex */
++
++	if (num == frandom_minor)
++		init_rand_state(state, EXTERNAL_SEED);
++	else
++		init_rand_state(state, INTERNAL_SEED);
++
++	filp->private_data = state;
++
++#if (LINUX_VERSION_CODE<KERNEL_VERSION(2,4,0))
++	MOD_INC_USE_COUNT; 
++#endif
++  
++	return 0; /* Success */
++}
++
++static int frandom_release(struct inode *inode, struct file *filp)
++{
++
++	struct frandom_state *state = filp->private_data;
++
++	kfree(state->buf);
++	kfree(state);
++  
++#if (LINUX_VERSION_CODE<KERNEL_VERSION(2,4,0))
++	MOD_DEC_USE_COUNT;
++#endif
++
++	return 0;
++}
++
++static ssize_t frandom_read(struct file *filp, char *buf, size_t count,
++			    loff_t *f_pos)
++{
++	struct frandom_state *state = filp->private_data;
++	ssize_t ret;
++	int dobytes, k;
++	char *localbuf;
++
++	unsigned int i;
++	unsigned int j;
++	u8 *S;
++  
++	if (down_interruptible(&state->sem))
++		return -ERESTARTSYS;
++  
++	if ((frandom_chunklimit > 0) && (count > frandom_chunklimit))
++		count = frandom_chunklimit;
++
++	ret = count; /* It's either everything or an error... */
++  
++	i = state->i;     
++	j = state->j;
++	S = state->S;  
++
++	while (count) {
++		if (count > frandom_bufsize)
++			dobytes = frandom_bufsize;
++		else
++			dobytes = count;
++
++		localbuf = state->buf;
++
++		for (k=0; k<dobytes; k++) {
++			i = (i + 1) & 0xff;
++			j = (j + S[i]) & 0xff;
++			swap_byte(&S[i], &S[j]);
++			*localbuf++ = S[(S[i] + S[j]) & 0xff];
++		}
++ 
++		if (copy_to_user(buf, state->buf, dobytes)) {
++			ret = -EFAULT;
++			goto out;
++		}
++
++		buf += dobytes;
++		count -= dobytes;
++	}
++
++ out:
++	state->i = i;     
++	state->j = j;
++
++	up(&state->sem);
++	return ret;
++}
++
++static struct file_operations frandom_fops = {
++	read:       frandom_read,
++	open:       frandom_open,
++	release:    frandom_release,
++};
++
++static void frandom_cleanup_module(void) {
++	kfree(erandom_state->buf);
++	kfree(erandom_state);
++ 
++	unregister_chrdev(frandom_major, "frandom");
++}
++
++
++static int frandom_init_module(void)
++{
++	int result;
++
++	/* The buffer size MUST be at least 256 bytes, because we assume that
++	   minimal length in init_rand_state().
++	*/       
++	if (frandom_bufsize < 256) {
++		printk(KERN_ERR "frandom: Refused to load because frandom_bufsize=%d < 256\n",frandom_bufsize);
++		return -EINVAL;
++	}
++	if ((frandom_chunklimit != 0) && (frandom_chunklimit < 256)) {
++		printk(KERN_ERR "frandom: Refused to load because frandom_chunklimit=%d < 256 and != 0\n",frandom_chunklimit);
++		return -EINVAL;
++	}
++
++	erandom_state = kmalloc(sizeof(struct frandom_state), GFP_KERNEL);
++	if (!erandom_state)
++		return -ENOMEM;
++
++	/* This specific buffer is only used for seeding, so we need
++	   256 bytes exactly */
++	erandom_state->buf = kmalloc(256, GFP_KERNEL);
++	if (!erandom_state->buf) {
++		kfree(erandom_state);
++		return -ENOMEM;
++	}
++
++	sema_init(&erandom_state->sem, 1); /* Init semaphore as a mutex */
++
++	erandom_seeded = 0;
++
++#ifdef SET_MODULE_OWNER
++	SET_MODULE_OWNER(&frandom_fops);
++#endif
++	/*
++	 * Register your major, and accept a dynamic number. This is the
++	 * first thing to do, in order to avoid releasing other module's
++	 * fops in frandom_cleanup_module()
++	 */
++	result = register_chrdev(frandom_major, "frandom", &frandom_fops);
++	if (result < 0) {
++		printk(KERN_WARNING "frandom: can't get major %d\n",frandom_major);
++
++		kfree(erandom_state->buf);
++		kfree(erandom_state);
++	
++		return result;
++	}
++	if (frandom_major == 0) frandom_major = result; /* dynamic */
++    
++	return 0; /* succeed */
++}
++
++module_init(frandom_init_module);
++module_exit(frandom_cleanup_module);
++
++EXPORT_SYMBOL(erandom_get_random_bytes);
+diff -Naur linux-2.6.8.1.orig/drivers/char/random.c linux-2.6.8.1.frandom/drivers/char/random.c
+--- linux-2.6.8.1.orig/drivers/char/random.c	2004-08-14 10:54:48.000000000 +0000
++++ linux-2.6.8.1.frandom/drivers/char/random.c	2004-09-11 05:02:56.000000000 +0000
+@@ -2005,6 +2005,59 @@
+ 	return 1;
+ }
+ 
++#ifdef CONFIG_FRANDOM
++/* We don't really want to create a header file for frandom
++   at this stage, so here's the prototype: */
++
++void erandom_get_random_bytes(char *buf, size_t count);
++
++static int proc_do_erandom(ctl_table *table, int write, struct file *filp,
++                       void *buffer, size_t *lenp, loff_t *ppos)
++{
++       ctl_table       fake_table;
++       unsigned char   buf[64], random[16], *p;
++       int i;
++
++       erandom_get_random_bytes(random, 16);
++
++       p=buf;
++
++       for (i=0; i<16; i++) {
++               sprintf(p, "%02x", random[i]);
++               p+=2;
++       }
++
++       fake_table.data = buf;
++       fake_table.maxlen = sizeof(buf);
++
++       return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
++}
++
++static int erandom_strategy(ctl_table *table, int *name, int nlen,
++                        void *oldval, size_t *oldlenp,
++                        void *newval, size_t newlen, void **context)
++{
++       unsigned char   random[16];
++       unsigned int    len;
++
++       if (!oldval || !oldlenp)
++               return 1;
++
++       erandom_get_random_bytes(random, 16);
++
++       if (get_user(len, oldlenp))
++               return -EFAULT;
++       if (len) {
++               if (len > 16)
++                       len = 16;
++               if (copy_to_user(oldval, random, len) ||
++                   put_user(len, oldlenp))
++                       return -EFAULT;
++       }
++       return 1;
++}
++#endif
++
+ ctl_table random_table[] = {
+ 	{
+ 		.ctl_name	= RANDOM_POOLSIZE,
+@@ -2061,6 +2114,16 @@
+ 		.proc_handler	= &proc_do_uuid,
+ 		.strategy	= &uuid_strategy,
+ 	},
++#ifdef CONFIG_FRANDOM
++	{
++		.ctl_name	= RANDOM_ERANDOM,
++		.procname	= "erandom",
++		.maxlen		= 16,
++		.mode		= 0444,
++		.proc_handler	= &proc_do_erandom,
++		.strategy	= &erandom_strategy,
++	},
++#endif
+ 	{ .ctl_name = 0 }
+ };
+ 
+diff -Naur linux-2.6.8.1.orig/drivers/char/random.c~ linux-2.6.8.1.frandom/drivers/char/random.c~
+--- linux-2.6.8.1.orig/drivers/char/random.c~	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.8.1.frandom/drivers/char/random.c~	2004-09-11 05:00:17.000000000 +0000
+@@ -0,0 +1,2485 @@
++/*
++ * random.c -- A strong random number generator
++ *
++ * Version 1.89, last modified 19-Sep-99
++ * 
++ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
++ * rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, and the entire permission notice in its entirety,
++ *    including the disclaimer of warranties.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote
++ *    products derived from this software without specific prior
++ *    written permission.
++ * 
++ * ALTERNATIVELY, this product may be distributed under the terms of
++ * the GNU General Public License, in which case the provisions of the GPL are
++ * required INSTEAD OF the above restrictions.  (This clause is
++ * necessary due to a potential bad interaction between the GPL and
++ * the restrictions contained in a BSD-style copyright.)
++ * 
++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
++ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
++ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
++ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
++ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
++ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ */
++
++/*
++ * (now, with legal B.S. out of the way.....) 
++ * 
++ * This routine gathers environmental noise from device drivers, etc.,
++ * and returns good random numbers, suitable for cryptographic use.
++ * Besides the obvious cryptographic uses, these numbers are also good
++ * for seeding TCP sequence numbers, and other places where it is
++ * desirable to have numbers which are not only random, but hard to
++ * predict by an attacker.
++ *
++ * Theory of operation
++ * ===================
++ * 
++ * Computers are very predictable devices.  Hence it is extremely hard
++ * to produce truly random numbers on a computer --- as opposed to
++ * pseudo-random numbers, which can easily generated by using a
++ * algorithm.  Unfortunately, it is very easy for attackers to guess
++ * the sequence of pseudo-random number generators, and for some
++ * applications this is not acceptable.  So instead, we must try to
++ * gather "environmental noise" from the computer's environment, which
++ * must be hard for outside attackers to observe, and use that to
++ * generate random numbers.  In a Unix environment, this is best done
++ * from inside the kernel.
++ * 
++ * Sources of randomness from the environment include inter-keyboard
++ * timings, inter-interrupt timings from some interrupts, and other
++ * events which are both (a) non-deterministic and (b) hard for an
++ * outside observer to measure.  Randomness from these sources are
++ * added to an "entropy pool", which is mixed using a CRC-like function.
++ * This is not cryptographically strong, but it is adequate assuming
++ * the randomness is not chosen maliciously, and it is fast enough that
++ * the overhead of doing it on every interrupt is very reasonable.
++ * As random bytes are mixed into the entropy pool, the routines keep
++ * an *estimate* of how many bits of randomness have been stored into
++ * the random number generator's internal state.
++ * 
++ * When random bytes are desired, they are obtained by taking the SHA
++ * hash of the contents of the "entropy pool".  The SHA hash avoids
++ * exposing the internal state of the entropy pool.  It is believed to
++ * be computationally infeasible to derive any useful information
++ * about the input of SHA from its output.  Even if it is possible to
++ * analyze SHA in some clever way, as long as the amount of data
++ * returned from the generator is less than the inherent entropy in
++ * the pool, the output data is totally unpredictable.  For this
++ * reason, the routine decreases its internal estimate of how many
++ * bits of "true randomness" are contained in the entropy pool as it
++ * outputs random numbers.
++ * 
++ * If this estimate goes to zero, the routine can still generate
++ * random numbers; however, an attacker may (at least in theory) be
++ * able to infer the future output of the generator from prior
++ * outputs.  This requires successful cryptanalysis of SHA, which is
++ * not believed to be feasible, but there is a remote possibility.
++ * Nonetheless, these numbers should be useful for the vast majority
++ * of purposes.
++ * 
++ * Exported interfaces ---- output
++ * ===============================
++ * 
++ * There are three exported interfaces; the first is one designed to
++ * be used from within the kernel:
++ *
++ * 	void get_random_bytes(void *buf, int nbytes);
++ *
++ * This interface will return the requested number of random bytes,
++ * and place it in the requested buffer.
++ * 
++ * The two other interfaces are two character devices /dev/random and
++ * /dev/urandom.  /dev/random is suitable for use when very high
++ * quality randomness is desired (for example, for key generation or
++ * one-time pads), as it will only return a maximum of the number of
++ * bits of randomness (as estimated by the random number generator)
++ * contained in the entropy pool.
++ * 
++ * The /dev/urandom device does not have this limit, and will return
++ * as many bytes as are requested.  As more and more random bytes are
++ * requested without giving time for the entropy pool to recharge,
++ * this will result in random numbers that are merely cryptographically
++ * strong.  For many applications, however, this is acceptable.
++ *
++ * Exported interfaces ---- input
++ * ==============================
++ * 
++ * The current exported interfaces for gathering environmental noise
++ * from the devices are:
++ * 
++ * 	void add_keyboard_randomness(unsigned char scancode);
++ * 	void add_mouse_randomness(__u32 mouse_data);
++ * 	void add_interrupt_randomness(int irq);
++ * 
++ * add_keyboard_randomness() uses the inter-keypress timing, as well as the
++ * scancode as random inputs into the "entropy pool".
++ * 
++ * add_mouse_randomness() uses the mouse interrupt timing, as well as
++ * the reported position of the mouse from the hardware.
++ *
++ * add_interrupt_randomness() uses the inter-interrupt timing as random
++ * inputs to the entropy pool.  Note that not all interrupts are good
++ * sources of randomness!  For example, the timer interrupts is not a
++ * good choice, because the periodicity of the interrupts is too
++ * regular, and hence predictable to an attacker.  Disk interrupts are
++ * a better measure, since the timing of the disk interrupts are more
++ * unpredictable.
++ * 
++ * All of these routines try to estimate how many bits of randomness a
++ * particular randomness source.  They do this by keeping track of the
++ * first and second order deltas of the event timings.
++ *
++ * Ensuring unpredictability at system startup
++ * ============================================
++ * 
++ * When any operating system starts up, it will go through a sequence
++ * of actions that are fairly predictable by an adversary, especially
++ * if the start-up does not involve interaction with a human operator.
++ * This reduces the actual number of bits of unpredictability in the
++ * entropy pool below the value in entropy_count.  In order to
++ * counteract this effect, it helps to carry information in the
++ * entropy pool across shut-downs and start-ups.  To do this, put the
++ * following lines an appropriate script which is run during the boot
++ * sequence: 
++ *
++ *	echo "Initializing random number generator..."
++ *	random_seed=/var/run/random-seed
++ *	# Carry a random seed from start-up to start-up
++ *	# Load and then save the whole entropy pool
++ *	if [ -f $random_seed ]; then
++ *		cat $random_seed >/dev/urandom
++ *	else
++ *		touch $random_seed
++ *	fi
++ *	chmod 600 $random_seed
++ *	poolfile=/proc/sys/kernel/random/poolsize
++ *	[ -r $poolfile ] && bytes=`cat $poolfile` || bytes=512
++ *	dd if=/dev/urandom of=$random_seed count=1 bs=$bytes
++ *
++ * and the following lines in an appropriate script which is run as
++ * the system is shutdown:
++ *
++ *	# Carry a random seed from shut-down to start-up
++ *	# Save the whole entropy pool
++ *	echo "Saving random seed..."
++ *	random_seed=/var/run/random-seed
++ *	touch $random_seed
++ *	chmod 600 $random_seed
++ *	poolfile=/proc/sys/kernel/random/poolsize
++ *	[ -r $poolfile ] && bytes=`cat $poolfile` || bytes=512
++ *	dd if=/dev/urandom of=$random_seed count=1 bs=$bytes
++ *
++ * For example, on most modern systems using the System V init
++ * scripts, such code fragments would be found in
++ * /etc/rc.d/init.d/random.  On older Linux systems, the correct script
++ * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
++ * 
++ * Effectively, these commands cause the contents of the entropy pool
++ * to be saved at shut-down time and reloaded into the entropy pool at
++ * start-up.  (The 'dd' in the addition to the bootup script is to
++ * make sure that /etc/random-seed is different for every start-up,
++ * even if the system crashes without executing rc.0.)  Even with
++ * complete knowledge of the start-up activities, predicting the state
++ * of the entropy pool requires knowledge of the previous history of
++ * the system.
++ *
++ * Configuring the /dev/random driver under Linux
++ * ==============================================
++ *
++ * The /dev/random driver under Linux uses minor numbers 8 and 9 of
++ * the /dev/mem major number (#1).  So if your system does not have
++ * /dev/random and /dev/urandom created already, they can be created
++ * by using the commands:
++ *
++ * 	mknod /dev/random c 1 8
++ * 	mknod /dev/urandom c 1 9
++ * 
++ * Acknowledgements:
++ * =================
++ *
++ * Ideas for constructing this random number generator were derived
++ * from Pretty Good Privacy's random number generator, and from private
++ * discussions with Phil Karn.  Colin Plumb provided a faster random
++ * number generator, which speed up the mixing function of the entropy
++ * pool, taken from PGPfone.  Dale Worley has also contributed many
++ * useful ideas and suggestions to improve this driver.
++ * 
++ * Any flaws in the design are solely my responsibility, and should
++ * not be attributed to the Phil, Colin, or any of authors of PGP.
++ * 
++ * The code for SHA transform was taken from Peter Gutmann's
++ * implementation, which has been placed in the public domain.
++ * The code for MD5 transform was taken from Colin Plumb's
++ * implementation, which has been placed in the public domain.
++ * The MD5 cryptographic checksum was devised by Ronald Rivest, and is
++ * documented in RFC 1321, "The MD5 Message Digest Algorithm".
++ * 
++ * Further background information on this topic may be obtained from
++ * RFC 1750, "Randomness Recommendations for Security", by Donald
++ * Eastlake, Steve Crocker, and Jeff Schiller.
++ */
++
++#include <linux/utsname.h>
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/major.h>
++#include <linux/string.h>
++#include <linux/fcntl.h>
++#include <linux/slab.h>
++#include <linux/random.h>
++#include <linux/poll.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/workqueue.h>
++#include <linux/genhd.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/percpu.h>
++
++#include <asm/processor.h>
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++#include <asm/io.h>
++
++/*
++ * Configuration information
++ */
++#define DEFAULT_POOL_SIZE 512
++#define SECONDARY_POOL_SIZE 128
++#define BATCH_ENTROPY_SIZE 256
++#define USE_SHA
++
++/*
++ * The minimum number of bits of entropy before we wake up a read on
++ * /dev/random.  Should be enough to do a significant reseed.
++ */
++static int random_read_wakeup_thresh = 64;
++
++/*
++ * If the entropy count falls under this number of bits, then we
++ * should wake up processes which are selecting or polling on write
++ * access to /dev/random.
++ */
++static int random_write_wakeup_thresh = 128;
++
++/*
++ * When the input pool goes over trickle_thresh, start dropping most
++ * samples to avoid wasting CPU time and reduce lock contention.
++ */
++
++static int trickle_thresh = DEFAULT_POOL_SIZE * 7;
++
++static DEFINE_PER_CPU(int, trickle_count) = 0;
++
++/*
++ * A pool of size .poolwords is stirred with a primitive polynomial
++ * of degree .poolwords over GF(2).  The taps for various sizes are
++ * defined below.  They are chosen to be evenly spaced (minimum RMS
++ * distance from evenly spaced; the numbers in the comments are a
++ * scaled squared error sum) except for the last tap, which is 1 to
++ * get the twisting happening as fast as possible.
++ */
++static struct poolinfo {
++	int	poolwords;
++	int	tap1, tap2, tap3, tap4, tap5;
++} poolinfo_table[] = {
++	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
++	{ 2048,	1638,	1231,	819,	411,	1 },
++
++	/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
++	{ 1024,	817,	615,	412,	204,	1 },
++#if 0				/* Alternate polynomial */
++	/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
++	{ 1024,	819,	616,	410,	207,	2 },
++#endif
++
++	/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
++	{ 512,	411,	308,	208,	104,	1 },
++#if 0				/* Alternates */
++	/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
++	{ 512,	409,	307,	206,	102,	2 },
++	/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
++	{ 512,	409,	309,	205,	103,	2 },
++#endif
++
++	/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
++	{ 256,	205,	155,	101,	52,	1 },
++
++	/* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
++	{ 128,	103,	76,	51,	25,	1 },
++#if 0	/* Alternate polynomial */
++	/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
++	{ 128,	103,	78,	51,	27,	2 },
++#endif
++
++	/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
++	{ 64,	52,	39,	26,	14,	1 },
++
++	/* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
++	{ 32,	26,	20,	14,	7,	1 },
++
++	{ 0,	0,	0,	0,	0,	0 },
++};
++
++#define POOLBITS	poolwords*32
++#define POOLBYTES	poolwords*4
++
++/*
++ * For the purposes of better mixing, we use the CRC-32 polynomial as
++ * well to make a twisted Generalized Feedback Shift Reigster
++ *
++ * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
++ * Transactions on Modeling and Computer Simulation 2(3):179-194.
++ * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
++ * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
++ *
++ * Thanks to Colin Plumb for suggesting this.
++ * 
++ * We have not analyzed the resultant polynomial to prove it primitive;
++ * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
++ * of a random large-degree polynomial over GF(2) are more than large enough
++ * that periodicity is not a concern.
++ * 
++ * The input hash is much less sensitive than the output hash.  All
++ * that we want of it is that it be a good non-cryptographic hash;
++ * i.e. it not produce collisions when fed "random" data of the sort
++ * we expect to see.  As long as the pool state differs for different
++ * inputs, we have preserved the input entropy and done a good job.
++ * The fact that an intelligent attacker can construct inputs that
++ * will produce controlled alterations to the pool's state is not
++ * important because we don't consider such inputs to contribute any
++ * randomness.  The only property we need with respect to them is that
++ * the attacker can't increase his/her knowledge of the pool's state.
++ * Since all additions are reversible (knowing the final state and the
++ * input, you can reconstruct the initial state), if an attacker has
++ * any uncertainty about the initial state, he/she can only shuffle
++ * that uncertainty about, but never cause any collisions (which would
++ * decrease the uncertainty).
++ *
++ * The chosen system lets the state of the pool be (essentially) the input
++ * modulo the generator polymnomial.  Now, for random primitive polynomials,
++ * this is a universal class of hash functions, meaning that the chance
++ * of a collision is limited by the attacker's knowledge of the generator
++ * polynomail, so if it is chosen at random, an attacker can never force
++ * a collision.  Here, we use a fixed polynomial, but we *can* assume that
++ * ###--> it is unknown to the processes generating the input entropy. <-###
++ * Because of this important property, this is a good, collision-resistant
++ * hash; hash collisions will occur no more often than chance.
++ */
++
++/*
++ * Linux 2.2 compatibility
++ */
++#ifndef DECLARE_WAITQUEUE
++#define DECLARE_WAITQUEUE(WAIT, PTR)	struct wait_queue WAIT = { PTR, NULL }
++#endif
++#ifndef DECLARE_WAIT_QUEUE_HEAD
++#define DECLARE_WAIT_QUEUE_HEAD(WAIT) struct wait_queue *WAIT
++#endif
++
++/*
++ * Static global variables
++ */
++static struct entropy_store *random_state; /* The default global store */
++static struct entropy_store *sec_random_state; /* secondary store */
++static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
++static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
++
++/*
++ * Forward procedure declarations
++ */
++#ifdef CONFIG_SYSCTL
++static void sysctl_init_random(struct entropy_store *random_state);
++#endif
++
++/*****************************************************************
++ *
++ * Utility functions, with some ASM defined functions for speed
++ * purposes
++ * 
++ *****************************************************************/
++
++/*
++ * Unfortunately, while the GCC optimizer for the i386 understands how
++ * to optimize a static rotate left of x bits, it doesn't know how to
++ * deal with a variable rotate of x bits.  So we use a bit of asm magic.
++ */
++#if (!defined (__i386__))
++static inline __u32 rotate_left(int i, __u32 word)
++{
++	return (word << i) | (word >> (32 - i));
++	
++}
++#else
++static inline __u32 rotate_left(int i, __u32 word)
++{
++	__asm__("roll %%cl,%0"
++		:"=r" (word)
++		:"0" (word),"c" (i));
++	return word;
++}
++#endif
++
++/*
++ * More asm magic....
++ * 
++ * For entropy estimation, we need to do an integral base 2
++ * logarithm.  
++ *
++ * Note the "12bits" suffix - this is used for numbers between
++ * 0 and 4095 only.  This allows a few shortcuts.
++ */
++#if 0	/* Slow but clear version */
++static inline __u32 int_ln_12bits(__u32 word)
++{
++	__u32 nbits = 0;
++	
++	while (word >>= 1)
++		nbits++;
++	return nbits;
++}
++#else	/* Faster (more clever) version, courtesy Colin Plumb */
++static inline __u32 int_ln_12bits(__u32 word)
++{
++	/* Smear msbit right to make an n-bit mask */
++	word |= word >> 8;
++	word |= word >> 4;
++	word |= word >> 2;
++	word |= word >> 1;
++	/* Remove one bit to make this a logarithm */
++	word >>= 1;
++	/* Count the bits set in the word */
++	word -= (word >> 1) & 0x555;
++	word = (word & 0x333) + ((word >> 2) & 0x333);
++	word += (word >> 4);
++	word += (word >> 8);
++	return word & 15;
++}
++#endif
++
++#if 0
++#define DEBUG_ENT(fmt, arg...) printk(KERN_DEBUG "random: " fmt, ## arg)
++#else
++#define DEBUG_ENT(fmt, arg...) do {} while (0)
++#endif
++
++/**********************************************************************
++ *
++ * OS independent entropy store.   Here are the functions which handle
++ * storing entropy in an entropy pool.
++ * 
++ **********************************************************************/
++
++struct entropy_store {
++	/* mostly-read data: */
++	struct poolinfo poolinfo;
++	__u32		*pool;
++
++	/* read-write data: */
++	spinlock_t lock ____cacheline_aligned_in_smp;
++	unsigned	add_ptr;
++	int		entropy_count;
++	int		input_rotate;
++};
++
++/*
++ * Initialize the entropy store.  The input argument is the size of
++ * the random pool.
++ *
++ * Returns an negative error if there is a problem.
++ */
++static int create_entropy_store(int size, struct entropy_store **ret_bucket)
++{
++	struct	entropy_store	*r;
++	struct	poolinfo	*p;
++	int	poolwords;
++
++	poolwords = (size + 3) / 4; /* Convert bytes->words */
++	/* The pool size must be a multiple of 16 32-bit words */
++	poolwords = ((poolwords + 15) / 16) * 16;
++
++	for (p = poolinfo_table; p->poolwords; p++) {
++		if (poolwords == p->poolwords)
++			break;
++	}
++	if (p->poolwords == 0)
++		return -EINVAL;
++
++	r = kmalloc(sizeof(struct entropy_store), GFP_KERNEL);
++	if (!r)
++		return -ENOMEM;
++
++	memset (r, 0, sizeof(struct entropy_store));
++	r->poolinfo = *p;
++
++	r->pool = kmalloc(POOLBYTES, GFP_KERNEL);
++	if (!r->pool) {
++		kfree(r);
++		return -ENOMEM;
++	}
++	memset(r->pool, 0, POOLBYTES);
++	r->lock = SPIN_LOCK_UNLOCKED;
++	*ret_bucket = r;
++	return 0;
++}
++
++/* Clear the entropy pool and associated counters. */
++static void clear_entropy_store(struct entropy_store *r)
++{
++	r->add_ptr = 0;
++	r->entropy_count = 0;
++	r->input_rotate = 0;
++	memset(r->pool, 0, r->poolinfo.POOLBYTES);
++}
++#ifdef CONFIG_SYSCTL
++static void free_entropy_store(struct entropy_store *r)
++{
++	if (r->pool)
++		kfree(r->pool);
++	kfree(r);
++}
++#endif
++/*
++ * This function adds a byte into the entropy "pool".  It does not
++ * update the entropy estimate.  The caller should call
++ * credit_entropy_store if this is appropriate.
++ * 
++ * The pool is stirred with a primitive polynomial of the appropriate
++ * degree, and then twisted.  We twist by three bits at a time because
++ * it's cheap to do so and helps slightly in the expected case where
++ * the entropy is concentrated in the low-order bits.
++ */
++static void add_entropy_words(struct entropy_store *r, const __u32 *in,
++			      int nwords)
++{
++	static __u32 const twist_table[8] = {
++		         0, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
++		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++	unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
++	int new_rotate, input_rotate;
++	int wordmask = r->poolinfo.poolwords - 1;
++	__u32 w, next_w;
++	unsigned long flags;
++
++	/* Taps are constant, so we can load them without holding r->lock.  */
++	tap1 = r->poolinfo.tap1;
++	tap2 = r->poolinfo.tap2;
++	tap3 = r->poolinfo.tap3;
++	tap4 = r->poolinfo.tap4;
++	tap5 = r->poolinfo.tap5;
++	next_w = *in++;
++
++	spin_lock_irqsave(&r->lock, flags);
++	prefetch_range(r->pool, wordmask);
++	input_rotate = r->input_rotate;
++	add_ptr = r->add_ptr;
++
++	while (nwords--) {
++		w = rotate_left(input_rotate, next_w);
++		if (nwords > 0)
++			next_w = *in++;
++		i = add_ptr = (add_ptr - 1) & wordmask;
++		/*
++		 * Normally, we add 7 bits of rotation to the pool.
++		 * At the beginning of the pool, add an extra 7 bits
++		 * rotation, so that successive passes spread the
++		 * input bits across the pool evenly.
++		 */
++		new_rotate = input_rotate + 14;
++		if (i)
++			new_rotate = input_rotate + 7;
++		input_rotate = new_rotate & 31;
++
++		/* XOR in the various taps */
++		w ^= r->pool[(i + tap1) & wordmask];
++		w ^= r->pool[(i + tap2) & wordmask];
++		w ^= r->pool[(i + tap3) & wordmask];
++		w ^= r->pool[(i + tap4) & wordmask];
++		w ^= r->pool[(i + tap5) & wordmask];
++		w ^= r->pool[i];
++		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
++	}
++
++	r->input_rotate = input_rotate;
++	r->add_ptr = add_ptr;
++
++	spin_unlock_irqrestore(&r->lock, flags);
++}
++
++/*
++ * Credit (or debit) the entropy store with n bits of entropy
++ */
++static void credit_entropy_store(struct entropy_store *r, int nbits)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&r->lock, flags);
++
++	if (r->entropy_count + nbits < 0) {
++		DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
++			  r->entropy_count, nbits);
++		r->entropy_count = 0;
++	} else if (r->entropy_count + nbits > r->poolinfo.POOLBITS) {
++		r->entropy_count = r->poolinfo.POOLBITS;
++	} else {
++		r->entropy_count += nbits;
++		if (nbits)
++			DEBUG_ENT("%04d %04d : added %d bits to %s\n",
++				  random_state->entropy_count,
++				  sec_random_state->entropy_count,
++				  nbits,
++				  r == sec_random_state ? "secondary" :
++				  r == random_state ? "primary" : "unknown");
++	}
++
++	spin_unlock_irqrestore(&r->lock, flags);
++}
++
++/**********************************************************************
++ *
++ * Entropy batch input management
++ *
++ * We batch entropy to be added to avoid increasing interrupt latency
++ *
++ **********************************************************************/
++
++struct sample {
++	__u32 data[2];
++	int credit;
++};
++
++static struct sample *batch_entropy_pool, *batch_entropy_copy;
++static int	batch_head, batch_tail;
++static spinlock_t batch_lock = SPIN_LOCK_UNLOCKED;
++
++static int	batch_max;
++static void batch_entropy_process(void *private_);
++static DECLARE_WORK(batch_work, batch_entropy_process, NULL);
++
++/* note: the size must be a power of 2 */
++static int __init batch_entropy_init(int size, struct entropy_store *r)
++{
++	batch_entropy_pool = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
++	if (!batch_entropy_pool)
++		return -1;
++	batch_entropy_copy = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
++	if (!batch_entropy_copy) {
++		kfree(batch_entropy_pool);
++		return -1;
++	}
++	batch_head = batch_tail = 0;
++	batch_work.data = r;
++	batch_max = size;
++	return 0;
++}
++
++/*
++ * Changes to the entropy data is put into a queue rather than being added to
++ * the entropy counts directly.  This is presumably to avoid doing heavy
++ * hashing calculations during an interrupt in add_timer_randomness().
++ * Instead, the entropy is only added to the pool by keventd.
++ */
++void batch_entropy_store(u32 a, u32 b, int num)
++{
++	int new;
++	unsigned long flags;
++
++	if (!batch_max)
++		return;
++
++	spin_lock_irqsave(&batch_lock, flags);
++
++	batch_entropy_pool[batch_head].data[0] = a;
++	batch_entropy_pool[batch_head].data[1] = b;
++	batch_entropy_pool[batch_head].credit = num;
++
++	if (((batch_head - batch_tail) & (batch_max-1)) >= (batch_max / 2)) {
++		/*
++		 * Schedule it for the next timer tick:
++		 */
++		schedule_delayed_work(&batch_work, 1);
++	}
++
++	new = (batch_head+1) & (batch_max-1);
++	if (new == batch_tail) {
++		DEBUG_ENT("batch entropy buffer full\n");
++	} else {
++		batch_head = new;
++	}
++
++	spin_unlock_irqrestore(&batch_lock, flags);
++}
++
++EXPORT_SYMBOL(batch_entropy_store);
++
++/*
++ * Flush out the accumulated entropy operations, adding entropy to the passed
++ * store (normally random_state).  If that store has enough entropy, alternate
++ * between randomizing the data of the primary and secondary stores.
++ */
++static void batch_entropy_process(void *private_)
++{
++	struct entropy_store *r	= (struct entropy_store *) private_, *p;
++	int max_entropy = r->poolinfo.POOLBITS;
++	unsigned head, tail;
++
++	/* Mixing into the pool is expensive, so copy over the batch
++	 * data and release the batch lock. The pool is at least half
++	 * full, so don't worry too much about copying only the used
++	 * part.
++	 */
++	spin_lock_irq(&batch_lock);
++
++	memcpy(batch_entropy_copy, batch_entropy_pool,
++	       batch_max*sizeof(struct sample));
++
++	head = batch_head;
++	tail = batch_tail;
++	batch_tail = batch_head;
++
++	spin_unlock_irq(&batch_lock);
++
++	p = r;
++	while (head != tail) {
++		if (r->entropy_count >= max_entropy) {
++			r = (r == sec_random_state) ?	random_state :
++							sec_random_state;
++			max_entropy = r->poolinfo.POOLBITS;
++		}
++		add_entropy_words(r, batch_entropy_copy[tail].data, 2);
++		credit_entropy_store(r, batch_entropy_copy[tail].credit);
++		tail = (tail+1) & (batch_max-1);
++	}
++	if (p->entropy_count >= random_read_wakeup_thresh)
++		wake_up_interruptible(&random_read_wait);
++}
++
++/*********************************************************************
++ *
++ * Entropy input management
++ *
++ *********************************************************************/
++
++/* There is one of these per entropy source */
++struct timer_rand_state {
++	__u32		last_time;
++	__s32		last_delta,last_delta2;
++	int		dont_count_entropy:1;
++};
++
++static struct timer_rand_state keyboard_timer_state;
++static struct timer_rand_state mouse_timer_state;
++static struct timer_rand_state extract_timer_state;
++static struct timer_rand_state *irq_timer_state[NR_IRQS];
++
++/*
++ * This function adds entropy to the entropy "pool" by using timing
++ * delays.  It uses the timer_rand_state structure to make an estimate
++ * of how many bits of entropy this call has added to the pool.
++ *
++ * The number "num" is also added to the pool - it should somehow describe
++ * the type of event which just happened.  This is currently 0-255 for
++ * keyboard scan codes, and 256 upwards for interrupts.
++ * On the i386, this is assumed to be at most 16 bits, and the high bits
++ * are used for a high-resolution timer.
++ *
++ */
++static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
++{
++	__u32		time;
++	__s32		delta, delta2, delta3;
++	int		entropy = 0;
++
++	/* if over the trickle threshold, use only 1 in 4096 samples */
++	if ( random_state->entropy_count > trickle_thresh &&
++	     (__get_cpu_var(trickle_count)++ & 0xfff))
++		return;
++
++#if defined (__i386__) || defined (__x86_64__)
++	if (cpu_has_tsc) {
++		__u32 high;
++		rdtsc(time, high);
++		num ^= high;
++	} else {
++		time = jiffies;
++	}
++#elif defined (__sparc_v9__)
++	unsigned long tick = tick_ops->get_tick();
++
++	time = (unsigned int) tick;
++	num ^= (tick >> 32UL);
++#else
++	time = jiffies;
++#endif
++
++	/*
++	 * Calculate number of bits of randomness we probably added.
++	 * We take into account the first, second and third-order deltas
++	 * in order to make our estimate.
++	 */
++	if (!state->dont_count_entropy) {
++		delta = time - state->last_time;
++		state->last_time = time;
++
++		delta2 = delta - state->last_delta;
++		state->last_delta = delta;
++
++		delta3 = delta2 - state->last_delta2;
++		state->last_delta2 = delta2;
++
++		if (delta < 0)
++			delta = -delta;
++		if (delta2 < 0)
++			delta2 = -delta2;
++		if (delta3 < 0)
++			delta3 = -delta3;
++		if (delta > delta2)
++			delta = delta2;
++		if (delta > delta3)
++			delta = delta3;
++
++		/*
++		 * delta is now minimum absolute delta.
++		 * Round down by 1 bit on general principles,
++		 * and limit entropy entimate to 12 bits.
++		 */
++		delta >>= 1;
++		delta &= (1 << 12) - 1;
++
++		entropy = int_ln_12bits(delta);
++	}
++	batch_entropy_store(num, time, entropy);
++}
++
++void add_keyboard_randomness(unsigned char scancode)
++{
++	static unsigned char last_scancode;
++	/* ignore autorepeat (multiple key down w/o key up) */
++	if (scancode != last_scancode) {
++		last_scancode = scancode;
++		add_timer_randomness(&keyboard_timer_state, scancode);
++	}
++}
++
++EXPORT_SYMBOL(add_keyboard_randomness);
++
++void add_mouse_randomness(__u32 mouse_data)
++{
++	add_timer_randomness(&mouse_timer_state, mouse_data);
++}
++
++EXPORT_SYMBOL(add_mouse_randomness);
++
++void add_interrupt_randomness(int irq)
++{
++	if (irq >= NR_IRQS || irq_timer_state[irq] == 0)
++		return;
++
++	add_timer_randomness(irq_timer_state[irq], 0x100+irq);
++}
++
++EXPORT_SYMBOL(add_interrupt_randomness);
++
++void add_disk_randomness(struct gendisk *disk)
++{
++	if (!disk || !disk->random)
++		return;
++	/* first major is 1, so we get >= 0x200 here */
++	add_timer_randomness(disk->random, 0x100+MKDEV(disk->major, disk->first_minor));
++}
++
++EXPORT_SYMBOL(add_disk_randomness);
++
++/******************************************************************
++ *
++ * Hash function definition
++ *
++ *******************************************************************/
++
++/*
++ * This chunk of code defines a function
++ * void HASH_TRANSFORM(__u32 digest[HASH_BUFFER_SIZE + HASH_EXTRA_SIZE],
++ * 		__u32 const data[16])
++ * 
++ * The function hashes the input data to produce a digest in the first
++ * HASH_BUFFER_SIZE words of the digest[] array, and uses HASH_EXTRA_SIZE
++ * more words for internal purposes.  (This buffer is exported so the
++ * caller can wipe it once rather than this code doing it each call,
++ * and tacking it onto the end of the digest[] array is the quick and
++ * dirty way of doing it.)
++ *
++ * It so happens that MD5 and SHA share most of the initial vector
++ * used to initialize the digest[] array before the first call:
++ * 1) 0x67452301
++ * 2) 0xefcdab89
++ * 3) 0x98badcfe
++ * 4) 0x10325476
++ * 5) 0xc3d2e1f0 (SHA only)
++ * 
++ * For /dev/random purposes, the length of the data being hashed is
++ * fixed in length, so appending a bit count in the usual way is not
++ * cryptographically necessary.
++ */
++
++#ifdef USE_SHA
++
++#define HASH_BUFFER_SIZE 5
++#define HASH_EXTRA_SIZE 80
++#define HASH_TRANSFORM SHATransform
++
++/* Various size/speed tradeoffs are available.  Choose 0..3. */
++#define SHA_CODE_SIZE 0
++
++/*
++ * SHA transform algorithm, taken from code written by Peter Gutmann,
++ * and placed in the public domain.
++ */
++
++/* The SHA f()-functions.  */
++
++#define f1(x,y,z)   ( z ^ (x & (y^z)) )		/* Rounds  0-19: x ? y : z */
++#define f2(x,y,z)   (x ^ y ^ z)			/* Rounds 20-39: XOR */
++#define f3(x,y,z)   ( (x & y) + (z & (x ^ y)) )	/* Rounds 40-59: majority */
++#define f4(x,y,z)   (x ^ y ^ z)			/* Rounds 60-79: XOR */
++
++/* The SHA Mysterious Constants */
++
++#define K1  0x5A827999L			/* Rounds  0-19: sqrt(2) * 2^30 */
++#define K2  0x6ED9EBA1L			/* Rounds 20-39: sqrt(3) * 2^30 */
++#define K3  0x8F1BBCDCL			/* Rounds 40-59: sqrt(5) * 2^30 */
++#define K4  0xCA62C1D6L			/* Rounds 60-79: sqrt(10) * 2^30 */
++
++#define ROTL(n,X)  ( ( ( X ) << n ) | ( ( X ) >> ( 32 - n ) ) )
++
++#define subRound(a, b, c, d, e, f, k, data) \
++    ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) )
++
++
++static void SHATransform(__u32 digest[85], __u32 const data[16])
++{
++    __u32 A, B, C, D, E;     /* Local vars */
++    __u32 TEMP;
++    int	i;
++#define W (digest + HASH_BUFFER_SIZE)	/* Expanded data array */
++
++    /*
++     * Do the preliminary expansion of 16 to 80 words.  Doing it
++     * out-of-line line this is faster than doing it in-line on
++     * register-starved machines like the x86, and not really any
++     * slower on real processors.
++     */
++    memcpy(W, data, 16*sizeof(__u32));
++    for (i = 0; i < 64; i++) {
++	    TEMP = W[i] ^ W[i+2] ^ W[i+8] ^ W[i+13];
++	    W[i+16] = ROTL(1, TEMP);
++    }
++
++    /* Set up first buffer and local data buffer */
++    A = digest[ 0 ];
++    B = digest[ 1 ];
++    C = digest[ 2 ];
++    D = digest[ 3 ];
++    E = digest[ 4 ];
++
++    /* Heavy mangling, in 4 sub-rounds of 20 iterations each. */
++#if SHA_CODE_SIZE == 0
++    /*
++     * Approximately 50% of the speed of the largest version, but
++     * takes up 1/16 the space.  Saves about 6k on an i386 kernel.
++     */
++    for (i = 0; i < 80; i++) {
++	if (i < 40) {
++	    if (i < 20)
++		TEMP = f1(B, C, D) + K1;
++	    else
++		TEMP = f2(B, C, D) + K2;
++	} else {
++	    if (i < 60)
++		TEMP = f3(B, C, D) + K3;
++	    else
++		TEMP = f4(B, C, D) + K4;
++	}
++	TEMP += ROTL(5, A) + E + W[i];
++	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
++    }
++#elif SHA_CODE_SIZE == 1
++    for (i = 0; i < 20; i++) {
++	TEMP = f1(B, C, D) + K1 + ROTL(5, A) + E + W[i];
++	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
++    }
++    for (; i < 40; i++) {
++	TEMP = f2(B, C, D) + K2 + ROTL(5, A) + E + W[i];
++	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
++    }
++    for (; i < 60; i++) {
++	TEMP = f3(B, C, D) + K3 + ROTL(5, A) + E + W[i];
++	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
++    }
++    for (; i < 80; i++) {
++	TEMP = f4(B, C, D) + K4 + ROTL(5, A) + E + W[i];
++	E = D; D = C; C = ROTL(30, B); B = A; A = TEMP;
++    }
++#elif SHA_CODE_SIZE == 2
++    for (i = 0; i < 20; i += 5) {
++	subRound( A, B, C, D, E, f1, K1, W[ i   ] );
++	subRound( E, A, B, C, D, f1, K1, W[ i+1 ] );
++	subRound( D, E, A, B, C, f1, K1, W[ i+2 ] );
++	subRound( C, D, E, A, B, f1, K1, W[ i+3 ] );
++	subRound( B, C, D, E, A, f1, K1, W[ i+4 ] );
++    }
++    for (; i < 40; i += 5) {
++	subRound( A, B, C, D, E, f2, K2, W[ i   ] );
++	subRound( E, A, B, C, D, f2, K2, W[ i+1 ] );
++	subRound( D, E, A, B, C, f2, K2, W[ i+2 ] );
++	subRound( C, D, E, A, B, f2, K2, W[ i+3 ] );
++	subRound( B, C, D, E, A, f2, K2, W[ i+4 ] );
++    }
++    for (; i < 60; i += 5) {
++	subRound( A, B, C, D, E, f3, K3, W[ i   ] );
++	subRound( E, A, B, C, D, f3, K3, W[ i+1 ] );
++	subRound( D, E, A, B, C, f3, K3, W[ i+2 ] );
++	subRound( C, D, E, A, B, f3, K3, W[ i+3 ] );
++	subRound( B, C, D, E, A, f3, K3, W[ i+4 ] );
++    }
++    for (; i < 80; i += 5) {
++	subRound( A, B, C, D, E, f4, K4, W[ i   ] );
++	subRound( E, A, B, C, D, f4, K4, W[ i+1 ] );
++	subRound( D, E, A, B, C, f4, K4, W[ i+2 ] );
++	subRound( C, D, E, A, B, f4, K4, W[ i+3 ] );
++	subRound( B, C, D, E, A, f4, K4, W[ i+4 ] );
++    }
++#elif SHA_CODE_SIZE == 3 /* Really large version */
++    subRound( A, B, C, D, E, f1, K1, W[  0 ] );
++    subRound( E, A, B, C, D, f1, K1, W[  1 ] );
++    subRound( D, E, A, B, C, f1, K1, W[  2 ] );
++    subRound( C, D, E, A, B, f1, K1, W[  3 ] );
++    subRound( B, C, D, E, A, f1, K1, W[  4 ] );
++    subRound( A, B, C, D, E, f1, K1, W[  5 ] );
++    subRound( E, A, B, C, D, f1, K1, W[  6 ] );
++    subRound( D, E, A, B, C, f1, K1, W[  7 ] );
++    subRound( C, D, E, A, B, f1, K1, W[  8 ] );
++    subRound( B, C, D, E, A, f1, K1, W[  9 ] );
++    subRound( A, B, C, D, E, f1, K1, W[ 10 ] );
++    subRound( E, A, B, C, D, f1, K1, W[ 11 ] );
++    subRound( D, E, A, B, C, f1, K1, W[ 12 ] );
++    subRound( C, D, E, A, B, f1, K1, W[ 13 ] );
++    subRound( B, C, D, E, A, f1, K1, W[ 14 ] );
++    subRound( A, B, C, D, E, f1, K1, W[ 15 ] );
++    subRound( E, A, B, C, D, f1, K1, W[ 16 ] );
++    subRound( D, E, A, B, C, f1, K1, W[ 17 ] );
++    subRound( C, D, E, A, B, f1, K1, W[ 18 ] );
++    subRound( B, C, D, E, A, f1, K1, W[ 19 ] );
++
++    subRound( A, B, C, D, E, f2, K2, W[ 20 ] );
++    subRound( E, A, B, C, D, f2, K2, W[ 21 ] );
++    subRound( D, E, A, B, C, f2, K2, W[ 22 ] );
++    subRound( C, D, E, A, B, f2, K2, W[ 23 ] );
++    subRound( B, C, D, E, A, f2, K2, W[ 24 ] );
++    subRound( A, B, C, D, E, f2, K2, W[ 25 ] );
++    subRound( E, A, B, C, D, f2, K2, W[ 26 ] );
++    subRound( D, E, A, B, C, f2, K2, W[ 27 ] );
++    subRound( C, D, E, A, B, f2, K2, W[ 28 ] );
++    subRound( B, C, D, E, A, f2, K2, W[ 29 ] );
++    subRound( A, B, C, D, E, f2, K2, W[ 30 ] );
++    subRound( E, A, B, C, D, f2, K2, W[ 31 ] );
++    subRound( D, E, A, B, C, f2, K2, W[ 32 ] );
++    subRound( C, D, E, A, B, f2, K2, W[ 33 ] );
++    subRound( B, C, D, E, A, f2, K2, W[ 34 ] );
++    subRound( A, B, C, D, E, f2, K2, W[ 35 ] );
++    subRound( E, A, B, C, D, f2, K2, W[ 36 ] );
++    subRound( D, E, A, B, C, f2, K2, W[ 37 ] );
++    subRound( C, D, E, A, B, f2, K2, W[ 38 ] );
++    subRound( B, C, D, E, A, f2, K2, W[ 39 ] );
++    
++    subRound( A, B, C, D, E, f3, K3, W[ 40 ] );
++    subRound( E, A, B, C, D, f3, K3, W[ 41 ] );
++    subRound( D, E, A, B, C, f3, K3, W[ 42 ] );
++    subRound( C, D, E, A, B, f3, K3, W[ 43 ] );
++    subRound( B, C, D, E, A, f3, K3, W[ 44 ] );
++    subRound( A, B, C, D, E, f3, K3, W[ 45 ] );
++    subRound( E, A, B, C, D, f3, K3, W[ 46 ] );
++    subRound( D, E, A, B, C, f3, K3, W[ 47 ] );
++    subRound( C, D, E, A, B, f3, K3, W[ 48 ] );
++    subRound( B, C, D, E, A, f3, K3, W[ 49 ] );
++    subRound( A, B, C, D, E, f3, K3, W[ 50 ] );
++    subRound( E, A, B, C, D, f3, K3, W[ 51 ] );
++    subRound( D, E, A, B, C, f3, K3, W[ 52 ] );
++    subRound( C, D, E, A, B, f3, K3, W[ 53 ] );
++    subRound( B, C, D, E, A, f3, K3, W[ 54 ] );
++    subRound( A, B, C, D, E, f3, K3, W[ 55 ] );
++    subRound( E, A, B, C, D, f3, K3, W[ 56 ] );
++    subRound( D, E, A, B, C, f3, K3, W[ 57 ] );
++    subRound( C, D, E, A, B, f3, K3, W[ 58 ] );
++    subRound( B, C, D, E, A, f3, K3, W[ 59 ] );
++
++    subRound( A, B, C, D, E, f4, K4, W[ 60 ] );
++    subRound( E, A, B, C, D, f4, K4, W[ 61 ] );
++    subRound( D, E, A, B, C, f4, K4, W[ 62 ] );
++    subRound( C, D, E, A, B, f4, K4, W[ 63 ] );
++    subRound( B, C, D, E, A, f4, K4, W[ 64 ] );
++    subRound( A, B, C, D, E, f4, K4, W[ 65 ] );
++    subRound( E, A, B, C, D, f4, K4, W[ 66 ] );
++    subRound( D, E, A, B, C, f4, K4, W[ 67 ] );
++    subRound( C, D, E, A, B, f4, K4, W[ 68 ] );
++    subRound( B, C, D, E, A, f4, K4, W[ 69 ] );
++    subRound( A, B, C, D, E, f4, K4, W[ 70 ] );
++    subRound( E, A, B, C, D, f4, K4, W[ 71 ] );
++    subRound( D, E, A, B, C, f4, K4, W[ 72 ] );
++    subRound( C, D, E, A, B, f4, K4, W[ 73 ] );
++    subRound( B, C, D, E, A, f4, K4, W[ 74 ] );
++    subRound( A, B, C, D, E, f4, K4, W[ 75 ] );
++    subRound( E, A, B, C, D, f4, K4, W[ 76 ] );
++    subRound( D, E, A, B, C, f4, K4, W[ 77 ] );
++    subRound( C, D, E, A, B, f4, K4, W[ 78 ] );
++    subRound( B, C, D, E, A, f4, K4, W[ 79 ] );
++#else
++#error Illegal SHA_CODE_SIZE
++#endif
++
++    /* Build message digest */
++    digest[ 0 ] += A;
++    digest[ 1 ] += B;
++    digest[ 2 ] += C;
++    digest[ 3 ] += D;
++    digest[ 4 ] += E;
++
++	/* W is wiped by the caller */
++#undef W
++}
++
++#undef ROTL
++#undef f1
++#undef f2
++#undef f3
++#undef f4
++#undef K1	
++#undef K2
++#undef K3	
++#undef K4	
++#undef subRound
++	
++#else /* !USE_SHA - Use MD5 */
++
++#define HASH_BUFFER_SIZE 4
++#define HASH_EXTRA_SIZE 0
++#define HASH_TRANSFORM MD5Transform
++	
++/*
++ * MD5 transform algorithm, taken from code written by Colin Plumb,
++ * and put into the public domain
++ */
++
++/* The four core functions - F1 is optimized somewhat */
++
++/* #define F1(x, y, z) (x & y | ~x & z) */
++#define F1(x, y, z) (z ^ (x & (y ^ z)))
++#define F2(x, y, z) F1(z, x, y)
++#define F3(x, y, z) (x ^ y ^ z)
++#define F4(x, y, z) (y ^ (x | ~z))
++
++/* This is the central step in the MD5 algorithm. */
++#define MD5STEP(f, w, x, y, z, data, s) \
++	( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
++
++/*
++ * The core of the MD5 algorithm, this alters an existing MD5 hash to
++ * reflect the addition of 16 longwords of new data.  MD5Update blocks
++ * the data and converts bytes into longwords for this routine.
++ */
++static void MD5Transform(__u32 buf[HASH_BUFFER_SIZE], __u32 const in[16])
++{
++	__u32 a, b, c, d;
++
++	a = buf[0];
++	b = buf[1];
++	c = buf[2];
++	d = buf[3];
++
++	MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478,  7);
++	MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12);
++	MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17);
++	MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22);
++	MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf,  7);
++	MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12);
++	MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17);
++	MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22);
++	MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8,  7);
++	MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12);
++	MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17);
++	MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22);
++	MD5STEP(F1, a, b, c, d, in[12]+0x6b901122,  7);
++	MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12);
++	MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17);
++	MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22);
++
++	MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562,  5);
++	MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340,  9);
++	MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14);
++	MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20);
++	MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d,  5);
++	MD5STEP(F2, d, a, b, c, in[10]+0x02441453,  9);
++	MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14);
++	MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20);
++	MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6,  5);
++	MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6,  9);
++	MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14);
++	MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20);
++	MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905,  5);
++	MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8,  9);
++	MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14);
++	MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20);
++
++	MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942,  4);
++	MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11);
++	MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16);
++	MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23);
++	MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44,  4);
++	MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11);
++	MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16);
++	MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23);
++	MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6,  4);
++	MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11);
++	MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16);
++	MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23);
++	MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039,  4);
++	MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11);
++	MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16);
++	MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23);
++
++	MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244,  6);
++	MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10);
++	MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15);
++	MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21);
++	MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3,  6);
++	MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10);
++	MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15);
++	MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21);
++	MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f,  6);
++	MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10);
++	MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15);
++	MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21);
++	MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82,  6);
++	MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10);
++	MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15);
++	MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21);
++
++	buf[0] += a;
++	buf[1] += b;
++	buf[2] += c;
++	buf[3] += d;
++}
++
++#undef F1
++#undef F2
++#undef F3
++#undef F4
++#undef MD5STEP
++
++#endif /* !USE_SHA */
++
++/*********************************************************************
++ *
++ * Entropy extraction routines
++ *
++ *********************************************************************/
++
++#define EXTRACT_ENTROPY_USER		1
++#define EXTRACT_ENTROPY_SECONDARY	2
++#define EXTRACT_ENTROPY_LIMIT		4
++#define TMP_BUF_SIZE			(HASH_BUFFER_SIZE + HASH_EXTRA_SIZE)
++#define SEC_XFER_SIZE			(TMP_BUF_SIZE*4)
++
++static ssize_t extract_entropy(struct entropy_store *r, void * buf,
++			       size_t nbytes, int flags);
++
++/*
++ * This utility inline function is responsible for transfering entropy
++ * from the primary pool to the secondary extraction pool. We make
++ * sure we pull enough for a 'catastrophic reseed'.
++ */
++static inline void xfer_secondary_pool(struct entropy_store *r,
++				       size_t nbytes, __u32 *tmp)
++{
++	if (r->entropy_count < nbytes * 8 &&
++	    r->entropy_count < r->poolinfo.POOLBITS) {
++		int bytes = max_t(int, random_read_wakeup_thresh / 8,
++				min_t(int, nbytes, TMP_BUF_SIZE));
++
++		DEBUG_ENT("%04d %04d : going to reseed %s with %d bits "
++			  "(%d of %d requested)\n",
++			  random_state->entropy_count,
++			  sec_random_state->entropy_count,
++			  r == sec_random_state ? "secondary" : "unknown",
++			  bytes * 8, nbytes * 8, r->entropy_count);
++
++		bytes=extract_entropy(random_state, tmp, bytes,
++				      EXTRACT_ENTROPY_LIMIT);
++		add_entropy_words(r, tmp, bytes);
++		credit_entropy_store(r, bytes*8);
++	}
++}
++
++/*
++ * This function extracts randomness from the "entropy pool", and
++ * returns it in a buffer.  This function computes how many remaining
++ * bits of entropy are left in the pool, but it does not restrict the
++ * number of bytes that are actually obtained.  If the EXTRACT_ENTROPY_USER
++ * flag is given, then the buf pointer is assumed to be in user space.
++ *
++ * If the EXTRACT_ENTROPY_SECONDARY flag is given, then we are actually
++ * extracting entropy from the secondary pool, and can refill from the
++ * primary pool if needed.
++ *
++ * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
++ */
++static ssize_t extract_entropy(struct entropy_store *r, void * buf,
++			       size_t nbytes, int flags)
++{
++	ssize_t ret, i;
++	__u32 tmp[TMP_BUF_SIZE];
++	__u32 x;
++	unsigned long cpuflags;
++
++
++	/* Redundant, but just in case... */
++	if (r->entropy_count > r->poolinfo.POOLBITS)
++		r->entropy_count = r->poolinfo.POOLBITS;
++
++	if (flags & EXTRACT_ENTROPY_SECONDARY)
++		xfer_secondary_pool(r, nbytes, tmp);
++
++	/* Hold lock while accounting */
++	spin_lock_irqsave(&r->lock, cpuflags);
++
++	DEBUG_ENT("%04d %04d : trying to extract %d bits from %s\n",
++		  random_state->entropy_count,
++		  sec_random_state->entropy_count,
++		  nbytes * 8,
++		  r == sec_random_state ? "secondary" :
++		  r == random_state ? "primary" : "unknown");
++
++	if (flags & EXTRACT_ENTROPY_LIMIT && nbytes >= r->entropy_count / 8)
++		nbytes = r->entropy_count / 8;
++
++	if (r->entropy_count / 8 >= nbytes)
++		r->entropy_count -= nbytes*8;
++	else
++		r->entropy_count = 0;
++
++	if (r->entropy_count < random_write_wakeup_thresh)
++		wake_up_interruptible(&random_write_wait);
++
++	DEBUG_ENT("%04d %04d : debiting %d bits from %s%s\n",
++		  random_state->entropy_count,
++		  sec_random_state->entropy_count,
++		  nbytes * 8,
++		  r == sec_random_state ? "secondary" :
++		  r == random_state ? "primary" : "unknown",
++		  flags & EXTRACT_ENTROPY_LIMIT ? "" : " (unlimited)");
++
++	spin_unlock_irqrestore(&r->lock, cpuflags);
++
++	ret = 0;
++	while (nbytes) {
++		/*
++		 * Check if we need to break out or reschedule....
++		 */
++		if ((flags & EXTRACT_ENTROPY_USER) && need_resched()) {
++			if (signal_pending(current)) {
++				if (ret == 0)
++					ret = -ERESTARTSYS;
++				break;
++			}
++
++			DEBUG_ENT("%04d %04d : extract feeling sleepy (%d bytes left)\n",
++				  random_state->entropy_count,
++				  sec_random_state->entropy_count, nbytes);
++
++			schedule();
++
++			DEBUG_ENT("%04d %04d : extract woke up\n",
++				  random_state->entropy_count,
++				  sec_random_state->entropy_count);
++		}
++
++		/* Hash the pool to get the output */
++		tmp[0] = 0x67452301;
++		tmp[1] = 0xefcdab89;
++		tmp[2] = 0x98badcfe;
++		tmp[3] = 0x10325476;
++#ifdef USE_SHA
++		tmp[4] = 0xc3d2e1f0;
++#endif
++		/*
++		 * As we hash the pool, we mix intermediate values of
++		 * the hash back into the pool.  This eliminates
++		 * backtracking attacks (where the attacker knows
++		 * the state of the pool plus the current outputs, and
++		 * attempts to find previous ouputs), unless the hash
++		 * function can be inverted.
++		 */
++		for (i = 0, x = 0; i < r->poolinfo.poolwords; i += 16, x+=2) {
++			HASH_TRANSFORM(tmp, r->pool+i);
++			add_entropy_words(r, &tmp[x%HASH_BUFFER_SIZE], 1);
++		}
++		
++		/*
++		 * In case the hash function has some recognizable
++		 * output pattern, we fold it in half.
++		 */
++		for (i = 0; i <  HASH_BUFFER_SIZE/2; i++)
++			tmp[i] ^= tmp[i + (HASH_BUFFER_SIZE+1)/2];
++#if HASH_BUFFER_SIZE & 1	/* There's a middle word to deal with */
++		x = tmp[HASH_BUFFER_SIZE/2];
++		x ^= (x >> 16);		/* Fold it in half */
++		((__u16 *)tmp)[HASH_BUFFER_SIZE-1] = (__u16)x;
++#endif
++		
++		/* Copy data to destination buffer */
++		i = min(nbytes, HASH_BUFFER_SIZE*sizeof(__u32)/2);
++		if (flags & EXTRACT_ENTROPY_USER) {
++			i -= copy_to_user(buf, (__u8 const *)tmp, i);
++			if (!i) {
++				ret = -EFAULT;
++				break;
++			}
++		} else
++			memcpy(buf, (__u8 const *)tmp, i);
++		nbytes -= i;
++		buf += i;
++		ret += i;
++	}
++
++	/* Wipe data just returned from memory */
++	memset(tmp, 0, sizeof(tmp));
++	
++	return ret;
++}
++
++/*
++ * This function is the exported kernel interface.  It returns some
++ * number of good random numbers, suitable for seeding TCP sequence
++ * numbers, etc.
++ */
++void get_random_bytes(void *buf, int nbytes)
++{
++	if (sec_random_state)  
++		extract_entropy(sec_random_state, (char *) buf, nbytes, 
++				EXTRACT_ENTROPY_SECONDARY);
++	else if (random_state)
++		extract_entropy(random_state, (char *) buf, nbytes, 0);
++	else
++		printk(KERN_NOTICE "get_random_bytes called before "
++				   "random driver initialization\n");
++}
++
++EXPORT_SYMBOL(get_random_bytes);
++
++/*********************************************************************
++ *
++ * Functions to interface with Linux
++ *
++ *********************************************************************/
++
++/*
++ * Initialize the random pool with standard stuff.
++ *
++ * NOTE: This is an OS-dependent function.
++ */
++static void init_std_data(struct entropy_store *r)
++{
++	struct timeval 	tv;
++	__u32		words[2];
++	char 		*p;
++	int		i;
++
++	do_gettimeofday(&tv);
++	words[0] = tv.tv_sec;
++	words[1] = tv.tv_usec;
++	add_entropy_words(r, words, 2);
++
++	/*
++	 *	This doesn't lock system.utsname. However, we are generating
++	 *	entropy so a race with a name set here is fine.
++	 */
++	p = (char *) &system_utsname;
++	for (i = sizeof(system_utsname) / sizeof(words); i; i--) {
++		memcpy(words, p, sizeof(words));
++		add_entropy_words(r, words, sizeof(words)/4);
++		p += sizeof(words);
++	}
++}
++
++static int __init rand_initialize(void)
++{
++	int i;
++
++	if (create_entropy_store(DEFAULT_POOL_SIZE, &random_state))
++		goto err;
++	if (batch_entropy_init(BATCH_ENTROPY_SIZE, random_state))
++		goto err;
++	if (create_entropy_store(SECONDARY_POOL_SIZE, &sec_random_state))
++		goto err;
++	clear_entropy_store(random_state);
++	clear_entropy_store(sec_random_state);
++	init_std_data(random_state);
++#ifdef CONFIG_SYSCTL
++	sysctl_init_random(random_state);
++#endif
++	for (i = 0; i < NR_IRQS; i++)
++		irq_timer_state[i] = NULL;
++	memset(&keyboard_timer_state, 0, sizeof(struct timer_rand_state));
++	memset(&mouse_timer_state, 0, sizeof(struct timer_rand_state));
++	memset(&extract_timer_state, 0, sizeof(struct timer_rand_state));
++	extract_timer_state.dont_count_entropy = 1;
++	return 0;
++err:
++	return -1;
++}
++module_init(rand_initialize);
++
++void rand_initialize_irq(int irq)
++{
++	struct timer_rand_state *state;
++	
++	if (irq >= NR_IRQS || irq_timer_state[irq])
++		return;
++
++	/*
++	 * If kmalloc returns null, we just won't use that entropy
++	 * source.
++	 */
++	state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
++	if (state) {
++		memset(state, 0, sizeof(struct timer_rand_state));
++		irq_timer_state[irq] = state;
++	}
++}
++ 
++void rand_initialize_disk(struct gendisk *disk)
++{
++	struct timer_rand_state *state;
++	
++	/*
++	 * If kmalloc returns null, we just won't use that entropy
++	 * source.
++	 */
++	state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
++	if (state) {
++		memset(state, 0, sizeof(struct timer_rand_state));
++		disk->random = state;
++	}
++}
++
++static ssize_t
++random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
++{
++	DECLARE_WAITQUEUE(wait, current);
++	ssize_t			n, retval = 0, count = 0;
++	
++	if (nbytes == 0)
++		return 0;
++
++	while (nbytes > 0) {
++		n = nbytes;
++		if (n > SEC_XFER_SIZE)
++			n = SEC_XFER_SIZE;
++
++		DEBUG_ENT("%04d %04d : reading %d bits, p: %d s: %d\n",
++			  random_state->entropy_count,
++			  sec_random_state->entropy_count,
++			  n*8, random_state->entropy_count,
++			  sec_random_state->entropy_count);
++
++		n = extract_entropy(sec_random_state, buf, n,
++				    EXTRACT_ENTROPY_USER |
++				    EXTRACT_ENTROPY_LIMIT |
++				    EXTRACT_ENTROPY_SECONDARY);
++
++		DEBUG_ENT("%04d %04d : read got %d bits (%d still needed)\n",
++			  random_state->entropy_count,
++			  sec_random_state->entropy_count,
++			  n*8, (nbytes-n)*8);
++
++		if (n == 0) {
++			if (file->f_flags & O_NONBLOCK) {
++				retval = -EAGAIN;
++				break;
++			}
++			if (signal_pending(current)) {
++				retval = -ERESTARTSYS;
++				break;
++			}
++
++			DEBUG_ENT("%04d %04d : sleeping?\n",
++				  random_state->entropy_count,
++				  sec_random_state->entropy_count);
++
++			set_current_state(TASK_INTERRUPTIBLE);
++			add_wait_queue(&random_read_wait, &wait);
++
++			if (sec_random_state->entropy_count / 8 == 0)
++				schedule();
++
++			set_current_state(TASK_RUNNING);
++			remove_wait_queue(&random_read_wait, &wait);
++
++			DEBUG_ENT("%04d %04d : waking up\n",
++				  random_state->entropy_count,
++				  sec_random_state->entropy_count);
++
++			continue;
++		}
++
++		if (n < 0) {
++			retval = n;
++			break;
++		}
++		count += n;
++		buf += n;
++		nbytes -= n;
++		break;		/* This break makes the device work */
++				/* like a named pipe */
++	}
++
++	/*
++	 * If we gave the user some bytes, update the access time.
++	 */
++	if (count)
++		file_accessed(file);
++	
++	return (count ? count : retval);
++}
++
++static ssize_t
++urandom_read(struct file * file, char __user * buf,
++		      size_t nbytes, loff_t *ppos)
++{
++	return extract_entropy(sec_random_state, buf, nbytes,
++			       EXTRACT_ENTROPY_USER |
++			       EXTRACT_ENTROPY_SECONDARY);
++}
++
++static unsigned int
++random_poll(struct file *file, poll_table * wait)
++{
++	unsigned int mask;
++
++	poll_wait(file, &random_read_wait, wait);
++	poll_wait(file, &random_write_wait, wait);
++	mask = 0;
++	if (random_state->entropy_count >= random_read_wakeup_thresh)
++		mask |= POLLIN | POLLRDNORM;
++	if (random_state->entropy_count < random_write_wakeup_thresh)
++		mask |= POLLOUT | POLLWRNORM;
++	return mask;
++}
++
++static ssize_t
++random_write(struct file * file, const char __user * buffer,
++	     size_t count, loff_t *ppos)
++{
++	int		ret = 0;
++	size_t		bytes;
++	__u32 		buf[16];
++	const char 	__user *p = buffer;
++	size_t		c = count;
++
++	while (c > 0) {
++		bytes = min(c, sizeof(buf));
++
++		bytes -= copy_from_user(&buf, p, bytes);
++		if (!bytes) {
++			ret = -EFAULT;
++			break;
++		}
++		c -= bytes;
++		p += bytes;
++
++		add_entropy_words(random_state, buf, (bytes + 3) / 4);
++	}
++	if (p == buffer) {
++		return (ssize_t)ret;
++	} else {
++		file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
++		mark_inode_dirty(file->f_dentry->d_inode);
++		return (ssize_t)(p - buffer);
++	}
++}
++
++static int
++random_ioctl(struct inode * inode, struct file * file,
++	     unsigned int cmd, unsigned long arg)
++{
++	int *tmp, size, ent_count;
++	int __user *p = (int __user *)arg;
++	int retval;
++	unsigned long flags;
++	
++	switch (cmd) {
++	case RNDGETENTCNT:
++		ent_count = random_state->entropy_count;
++		if (put_user(ent_count, p))
++			return -EFAULT;
++		return 0;
++	case RNDADDTOENTCNT:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		if (get_user(ent_count, p))
++			return -EFAULT;
++		credit_entropy_store(random_state, ent_count);
++		/*
++		 * Wake up waiting processes if we have enough
++		 * entropy.
++		 */
++		if (random_state->entropy_count >= random_read_wakeup_thresh)
++			wake_up_interruptible(&random_read_wait);
++		return 0;
++	case RNDGETPOOL:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		if (get_user(size, p) ||
++		    put_user(random_state->poolinfo.poolwords, p++))
++			return -EFAULT;
++		if (size < 0)
++			return -EFAULT;
++		if (size > random_state->poolinfo.poolwords)
++			size = random_state->poolinfo.poolwords;
++
++		/* prepare to atomically snapshot pool */
++
++		tmp = kmalloc(size * sizeof(__u32), GFP_KERNEL);
++
++		if (!tmp)
++			return -ENOMEM;
++
++		spin_lock_irqsave(&random_state->lock, flags);
++		ent_count = random_state->entropy_count;
++		memcpy(tmp, random_state->pool, size * sizeof(__u32));
++		spin_unlock_irqrestore(&random_state->lock, flags);
++
++		if (!copy_to_user(p, tmp, size * sizeof(__u32))) {
++			kfree(tmp);
++			return -EFAULT;
++		}
++
++		kfree(tmp);
++
++		if(put_user(ent_count, p++))
++			return -EFAULT;
++
++		return 0;
++	case RNDADDENTROPY:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		if (get_user(ent_count, p++))
++			return -EFAULT;
++		if (ent_count < 0)
++			return -EINVAL;
++		if (get_user(size, p++))
++			return -EFAULT;
++		retval = random_write(file, (const char __user *) p,
++				      size, &file->f_pos);
++		if (retval < 0)
++			return retval;
++		credit_entropy_store(random_state, ent_count);
++		/*
++		 * Wake up waiting processes if we have enough
++		 * entropy.
++		 */
++		if (random_state->entropy_count >= random_read_wakeup_thresh)
++			wake_up_interruptible(&random_read_wait);
++		return 0;
++	case RNDZAPENTCNT:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		random_state->entropy_count = 0;
++		return 0;
++	case RNDCLEARPOOL:
++		/* Clear the entropy pool and associated counters. */
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		clear_entropy_store(random_state);
++		init_std_data(random_state);
++		return 0;
++	default:
++		return -EINVAL;
++	}
++}
++
++struct file_operations random_fops = {
++	.read		= random_read,
++	.write		= random_write,
++	.poll		= random_poll,
++	.ioctl		= random_ioctl,
++};
++
++struct file_operations urandom_fops = {
++	.read		= urandom_read,
++	.write		= random_write,
++	.ioctl		= random_ioctl,
++};
++
++/***************************************************************
++ * Random UUID interface
++ * 
++ * Used here for a Boot ID, but can be useful for other kernel 
++ * drivers.
++ ***************************************************************/
++
++/*
++ * Generate random UUID
++ */
++void generate_random_uuid(unsigned char uuid_out[16])
++{
++	get_random_bytes(uuid_out, 16);
++	/* Set UUID version to 4 --- truely random generation */
++	uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
++	/* Set the UUID variant to DCE */
++	uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
++}
++
++EXPORT_SYMBOL(generate_random_uuid);
++
++/********************************************************************
++ *
++ * Sysctl interface
++ *
++ ********************************************************************/
++
++#ifdef CONFIG_SYSCTL
++
++#include <linux/sysctl.h>
++
++static int sysctl_poolsize;
++static int min_read_thresh, max_read_thresh;
++static int min_write_thresh, max_write_thresh;
++static char sysctl_bootid[16];
++
++/*
++ * This function handles a request from the user to change the pool size 
++ * of the primary entropy store.
++ */
++static int change_poolsize(int poolsize)
++{
++	struct entropy_store	*new_store, *old_store;
++	int			ret;
++	
++	if ((ret = create_entropy_store(poolsize, &new_store)))
++		return ret;
++
++	add_entropy_words(new_store, random_state->pool,
++			  random_state->poolinfo.poolwords);
++	credit_entropy_store(new_store, random_state->entropy_count);
++
++	sysctl_init_random(new_store);
++	old_store = random_state;
++	random_state = batch_work.data = new_store;
++	free_entropy_store(old_store);
++	return 0;
++}
++
++static int proc_do_poolsize(ctl_table *table, int write, struct file *filp,
++			    void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	int	ret;
++
++	sysctl_poolsize = random_state->poolinfo.POOLBYTES;
++
++	ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
++	if (ret || !write ||
++	    (sysctl_poolsize == random_state->poolinfo.POOLBYTES))
++		return ret;
++
++	return change_poolsize(sysctl_poolsize);
++}
++
++static int poolsize_strategy(ctl_table *table, int __user *name, int nlen,
++			     void __user *oldval, size_t __user *oldlenp,
++			     void __user *newval, size_t newlen, void **context)
++{
++	int	len;
++	
++	sysctl_poolsize = random_state->poolinfo.POOLBYTES;
++
++	/*
++	 * We only handle the write case, since the read case gets
++	 * handled by the default handler (and we don't care if the
++	 * write case happens twice; it's harmless).
++	 */
++	if (newval && newlen) {
++		len = newlen;
++		if (len > table->maxlen)
++			len = table->maxlen;
++		if (copy_from_user(table->data, newval, len))
++			return -EFAULT;
++	}
++
++	if (sysctl_poolsize != random_state->poolinfo.POOLBYTES)
++		return change_poolsize(sysctl_poolsize);
++
++	return 0;
++}
++
++/*
++ * These functions is used to return both the bootid UUID, and random
++ * UUID.  The difference is in whether table->data is NULL; if it is,
++ * then a new UUID is generated and returned to the user.
++ * 
++ * If the user accesses this via the proc interface, it will be returned
++ * as an ASCII string in the standard UUID format.  If accesses via the 
++ * sysctl system call, it is returned as 16 bytes of binary data.
++ */
++static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
++			void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	ctl_table	fake_table;
++	unsigned char	buf[64], tmp_uuid[16], *uuid;
++
++	uuid = table->data;
++	if (!uuid) {
++		uuid = tmp_uuid;
++		uuid[8] = 0;
++	}
++	if (uuid[8] == 0)
++		generate_random_uuid(uuid);
++
++	sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
++		"%02x%02x%02x%02x%02x%02x",
++		uuid[0],  uuid[1],  uuid[2],  uuid[3],
++		uuid[4],  uuid[5],  uuid[6],  uuid[7],
++		uuid[8],  uuid[9],  uuid[10], uuid[11],
++		uuid[12], uuid[13], uuid[14], uuid[15]);
++	fake_table.data = buf;
++	fake_table.maxlen = sizeof(buf);
++
++	return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
++}
++
++static int uuid_strategy(ctl_table *table, int __user *name, int nlen,
++			 void __user *oldval, size_t __user *oldlenp,
++			 void __user *newval, size_t newlen, void **context)
++{
++	unsigned char	tmp_uuid[16], *uuid;
++	unsigned int	len;
++
++	if (!oldval || !oldlenp)
++		return 1;
++
++	uuid = table->data;
++	if (!uuid) {
++		uuid = tmp_uuid;
++		uuid[8] = 0;
++	}
++	if (uuid[8] == 0)
++		generate_random_uuid(uuid);
++
++	if (get_user(len, oldlenp))
++		return -EFAULT;
++	if (len) {
++		if (len > 16)
++			len = 16;
++		if (copy_to_user(oldval, uuid, len) ||
++		    put_user(len, oldlenp))
++			return -EFAULT;
++	}
++	return 1;
++}
++
++ctl_table random_table[] = {
++	{
++		.ctl_name	= RANDOM_POOLSIZE,
++		.procname	= "poolsize",
++		.data		= &sysctl_poolsize,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= &proc_do_poolsize,
++		.strategy	= &poolsize_strategy,
++	},
++	{
++		.ctl_name	= RANDOM_ENTROPY_COUNT,
++		.procname	= "entropy_avail",
++		.maxlen		= sizeof(int),
++		.mode		= 0444,
++		.proc_handler	= &proc_dointvec,
++	},
++	{
++		.ctl_name	= RANDOM_READ_THRESH,
++		.procname	= "read_wakeup_threshold",
++		.data		= &random_read_wakeup_thresh,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.strategy	= &sysctl_intvec,
++		.extra1		= &min_read_thresh,
++		.extra2		= &max_read_thresh,
++	},
++	{
++		.ctl_name	= RANDOM_WRITE_THRESH,
++		.procname	= "write_wakeup_threshold",
++		.data		= &random_write_wakeup_thresh,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.strategy	= &sysctl_intvec,
++		.extra1		= &min_write_thresh,
++		.extra2		= &max_write_thresh,
++	},
++	{
++		.ctl_name	= RANDOM_BOOT_ID,
++		.procname	= "boot_id",
++		.data		= &sysctl_bootid,
++		.maxlen		= 16,
++		.mode		= 0444,
++		.proc_handler	= &proc_do_uuid,
++		.strategy	= &uuid_strategy,
++	},
++	{
++		.ctl_name	= RANDOM_UUID,
++		.procname	= "uuid",
++		.maxlen		= 16,
++		.mode		= 0444,
++		.proc_handler	= &proc_do_uuid,
++		.strategy	= &uuid_strategy,
++	},
++	{ .ctl_name = 0 }
++};
++
++static void sysctl_init_random(struct entropy_store *random_state)
++{
++	min_read_thresh = 8;
++	min_write_thresh = 0;
++	max_read_thresh = max_write_thresh = random_state->poolinfo.POOLBITS;
++	random_table[1].data = &random_state->entropy_count;
++}
++#endif 	/* CONFIG_SYSCTL */
++
++/********************************************************************
++ *
++ * Random funtions for networking
++ *
++ ********************************************************************/
++
++/*
++ * TCP initial sequence number picking.  This uses the random number
++ * generator to pick an initial secret value.  This value is hashed
++ * along with the TCP endpoint information to provide a unique
++ * starting point for each pair of TCP endpoints.  This defeats
++ * attacks which rely on guessing the initial TCP sequence number.
++ * This algorithm was suggested by Steve Bellovin.
++ *
++ * Using a very strong hash was taking an appreciable amount of the total
++ * TCP connection establishment time, so this is a weaker hash,
++ * compensated for by changing the secret periodically.
++ */
++
++/* F, G and H are basic MD4 functions: selection, majority, parity */
++#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
++#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
++#define H(x, y, z) ((x) ^ (y) ^ (z))
++
++/*
++ * The generic round function.  The application is so specific that
++ * we don't bother protecting all the arguments with parens, as is generally
++ * good macro practice, in favor of extra legibility.
++ * Rotation is separate from addition to prevent recomputation
++ */
++#define ROUND(f, a, b, c, d, x, s)	\
++	(a += f(b, c, d) + x, a = (a << s) | (a >> (32-s)))
++#define K1 0
++#define K2 013240474631UL
++#define K3 015666365641UL
++
++/*
++ * Basic cut-down MD4 transform.  Returns only 32 bits of result.
++ */
++static __u32 halfMD4Transform (__u32 const buf[4], __u32 const in[8])
++{
++	__u32	a = buf[0], b = buf[1], c = buf[2], d = buf[3];
++
++	/* Round 1 */
++	ROUND(F, a, b, c, d, in[0] + K1,  3);
++	ROUND(F, d, a, b, c, in[1] + K1,  7);
++	ROUND(F, c, d, a, b, in[2] + K1, 11);
++	ROUND(F, b, c, d, a, in[3] + K1, 19);
++	ROUND(F, a, b, c, d, in[4] + K1,  3);
++	ROUND(F, d, a, b, c, in[5] + K1,  7);
++	ROUND(F, c, d, a, b, in[6] + K1, 11);
++	ROUND(F, b, c, d, a, in[7] + K1, 19);
++
++	/* Round 2 */
++	ROUND(G, a, b, c, d, in[1] + K2,  3);
++	ROUND(G, d, a, b, c, in[3] + K2,  5);
++	ROUND(G, c, d, a, b, in[5] + K2,  9);
++	ROUND(G, b, c, d, a, in[7] + K2, 13);
++	ROUND(G, a, b, c, d, in[0] + K2,  3);
++	ROUND(G, d, a, b, c, in[2] + K2,  5);
++	ROUND(G, c, d, a, b, in[4] + K2,  9);
++	ROUND(G, b, c, d, a, in[6] + K2, 13);
++
++	/* Round 3 */
++	ROUND(H, a, b, c, d, in[3] + K3,  3);
++	ROUND(H, d, a, b, c, in[7] + K3,  9);
++	ROUND(H, c, d, a, b, in[2] + K3, 11);
++	ROUND(H, b, c, d, a, in[6] + K3, 15);
++	ROUND(H, a, b, c, d, in[1] + K3,  3);
++	ROUND(H, d, a, b, c, in[5] + K3,  9);
++	ROUND(H, c, d, a, b, in[0] + K3, 11);
++	ROUND(H, b, c, d, a, in[4] + K3, 15);
++
++	return buf[1] + b;	/* "most hashed" word */
++	/* Alternative: return sum of all words? */
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++
++static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
++{
++	__u32	a = buf[0], b = buf[1], c = buf[2], d = buf[3];
++
++	/* Round 1 */
++	ROUND(F, a, b, c, d, in[ 0] + K1,  3);
++	ROUND(F, d, a, b, c, in[ 1] + K1,  7);
++	ROUND(F, c, d, a, b, in[ 2] + K1, 11);
++	ROUND(F, b, c, d, a, in[ 3] + K1, 19);
++	ROUND(F, a, b, c, d, in[ 4] + K1,  3);
++	ROUND(F, d, a, b, c, in[ 5] + K1,  7);
++	ROUND(F, c, d, a, b, in[ 6] + K1, 11);
++	ROUND(F, b, c, d, a, in[ 7] + K1, 19);
++	ROUND(F, a, b, c, d, in[ 8] + K1,  3);
++	ROUND(F, d, a, b, c, in[ 9] + K1,  7);
++	ROUND(F, c, d, a, b, in[10] + K1, 11);
++	ROUND(F, b, c, d, a, in[11] + K1, 19);
++
++	/* Round 2 */
++	ROUND(G, a, b, c, d, in[ 1] + K2,  3);
++	ROUND(G, d, a, b, c, in[ 3] + K2,  5);
++	ROUND(G, c, d, a, b, in[ 5] + K2,  9);
++	ROUND(G, b, c, d, a, in[ 7] + K2, 13);
++	ROUND(G, a, b, c, d, in[ 9] + K2,  3);
++	ROUND(G, d, a, b, c, in[11] + K2,  5);
++	ROUND(G, c, d, a, b, in[ 0] + K2,  9);
++	ROUND(G, b, c, d, a, in[ 2] + K2, 13);
++	ROUND(G, a, b, c, d, in[ 4] + K2,  3);
++	ROUND(G, d, a, b, c, in[ 6] + K2,  5);
++	ROUND(G, c, d, a, b, in[ 8] + K2,  9);
++	ROUND(G, b, c, d, a, in[10] + K2, 13);
++
++	/* Round 3 */
++	ROUND(H, a, b, c, d, in[ 3] + K3,  3);
++	ROUND(H, d, a, b, c, in[ 7] + K3,  9);
++	ROUND(H, c, d, a, b, in[11] + K3, 11);
++	ROUND(H, b, c, d, a, in[ 2] + K3, 15);
++	ROUND(H, a, b, c, d, in[ 6] + K3,  3);
++	ROUND(H, d, a, b, c, in[10] + K3,  9);
++	ROUND(H, c, d, a, b, in[ 1] + K3, 11);
++	ROUND(H, b, c, d, a, in[ 5] + K3, 15);
++	ROUND(H, a, b, c, d, in[ 9] + K3,  3);
++	ROUND(H, d, a, b, c, in[ 0] + K3,  9);
++	ROUND(H, c, d, a, b, in[ 4] + K3, 11);
++	ROUND(H, b, c, d, a, in[ 8] + K3, 15);
++
++	return buf[1] + b;	/* "most hashed" word */
++	/* Alternative: return sum of all words? */
++}
++#endif
++
++#undef ROUND
++#undef F
++#undef G
++#undef H
++#undef K1
++#undef K2
++#undef K3
++
++/* This should not be decreased so low that ISNs wrap too fast. */
++#define REKEY_INTERVAL	300
++/*
++ * Bit layout of the tcp sequence numbers (before adding current time):
++ * bit 24-31: increased after every key exchange
++ * bit 0-23: hash(source,dest)
++ *
++ * The implementation is similar to the algorithm described
++ * in the Appendix of RFC 1185, except that
++ * - it uses a 1 MHz clock instead of a 250 kHz clock
++ * - it performs a rekey every 5 minutes, which is equivalent
++ * 	to a (source,dest) tulple dependent forward jump of the
++ * 	clock by 0..2^(HASH_BITS+1)
++ *
++ * Thus the average ISN wraparound time is 68 minutes instead of
++ * 4.55 hours.
++ *
++ * SMP cleanup and lock avoidance with poor man's RCU.
++ * 			Manfred Spraul <manfred at colorfullife.com>
++ * 		
++ */
++#define COUNT_BITS	8
++#define COUNT_MASK	( (1<<COUNT_BITS)-1)
++#define HASH_BITS	24
++#define HASH_MASK	( (1<<HASH_BITS)-1 )
++
++static struct keydata {
++	time_t rekey_time;
++	__u32	count;		// already shifted to the final position
++	__u32	secret[12];
++} ____cacheline_aligned ip_keydata[2];
++
++static spinlock_t ip_lock = SPIN_LOCK_UNLOCKED;
++static unsigned int ip_cnt;
++
++static struct keydata *__check_and_rekey(time_t time)
++{
++	struct keydata *keyptr;
++	spin_lock_bh(&ip_lock);
++	keyptr = &ip_keydata[ip_cnt&1];
++	if (!keyptr->rekey_time || (time - keyptr->rekey_time) > REKEY_INTERVAL) {
++		keyptr = &ip_keydata[1^(ip_cnt&1)];
++		keyptr->rekey_time = time;
++		get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
++		keyptr->count = (ip_cnt&COUNT_MASK)<<HASH_BITS;
++		mb();
++		ip_cnt++;
++	}
++	spin_unlock_bh(&ip_lock);
++	return keyptr;
++}
++
++static inline struct keydata *check_and_rekey(time_t time)
++{
++	struct keydata *keyptr = &ip_keydata[ip_cnt&1];
++
++	rmb();
++	if (!keyptr->rekey_time || (time - keyptr->rekey_time) > REKEY_INTERVAL) {
++		keyptr = __check_and_rekey(time);
++	}
++
++	return keyptr;
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++__u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
++				   __u16 sport, __u16 dport)
++{
++	struct timeval 	tv;
++	__u32		seq;
++	__u32		hash[12];
++	struct keydata *keyptr;
++
++	/* The procedure is the same as for IPv4, but addresses are longer.
++	 * Thus we must use twothirdsMD4Transform.
++	 */
++
++	do_gettimeofday(&tv);	/* We need the usecs below... */
++	keyptr = check_and_rekey(tv.tv_sec);
++
++	memcpy(hash, saddr, 16);
++	hash[4]=(sport << 16) + dport;
++	memcpy(&hash[5],keyptr->secret,sizeof(__u32)*7);
++
++	seq = twothirdsMD4Transform(daddr, hash) & HASH_MASK;
++	seq += keyptr->count;
++	seq += tv.tv_usec + tv.tv_sec*1000000;
++
++	return seq;
++}
++EXPORT_SYMBOL(secure_tcpv6_sequence_number);
++
++__u32 secure_ipv6_id(__u32 *daddr)
++{
++	struct keydata *keyptr;
++
++	keyptr = check_and_rekey(get_seconds());
++
++	return halfMD4Transform(daddr, keyptr->secret);
++}
++
++EXPORT_SYMBOL(secure_ipv6_id);
++#endif
++
++
++__u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
++				 __u16 sport, __u16 dport)
++{
++	struct timeval 	tv;
++	__u32		seq;
++	__u32	hash[4];
++	struct keydata *keyptr;
++
++	/*
++	 * Pick a random secret every REKEY_INTERVAL seconds.
++	 */
++	do_gettimeofday(&tv);	/* We need the usecs below... */
++	keyptr = check_and_rekey(tv.tv_sec);
++
++	/*
++	 *  Pick a unique starting offset for each TCP connection endpoints
++	 *  (saddr, daddr, sport, dport).
++	 *  Note that the words are placed into the starting vector, which is 
++	 *  then mixed with a partial MD4 over random data.
++	 */
++	hash[0]=saddr;
++	hash[1]=daddr;
++	hash[2]=(sport << 16) + dport;
++	hash[3]=keyptr->secret[11];
++
++	seq = halfMD4Transform(hash, keyptr->secret) & HASH_MASK;
++	seq += keyptr->count;
++	/*
++	 *	As close as possible to RFC 793, which
++	 *	suggests using a 250 kHz clock.
++	 *	Further reading shows this assumes 2 Mb/s networks.
++	 *	For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
++	 *	That's funny, Linux has one built in!  Use it!
++	 *	(Networks are faster now - should this be increased?)
++	 */
++	seq += tv.tv_usec + tv.tv_sec*1000000;
++#if 0
++	printk("init_seq(%lx, %lx, %d, %d) = %d\n",
++	       saddr, daddr, sport, dport, seq);
++#endif
++	return seq;
++}
++
++EXPORT_SYMBOL(secure_tcp_sequence_number);
++
++/*  The code below is shamelessly stolen from secure_tcp_sequence_number().
++ *  All blames to Andrey V. Savochkin <saw at msu.ru>.
++ */
++__u32 secure_ip_id(__u32 daddr)
++{
++	struct keydata *keyptr;
++	__u32 hash[4];
++
++	keyptr = check_and_rekey(get_seconds());
++
++	/*
++	 *  Pick a unique starting offset for each IP destination.
++	 *  The dest ip address is placed in the starting vector,
++	 *  which is then hashed with random data.
++	 */
++	hash[0] = daddr;
++	hash[1] = keyptr->secret[9];
++	hash[2] = keyptr->secret[10];
++	hash[3] = keyptr->secret[11];
++
++	return halfMD4Transform(hash, keyptr->secret);
++}
++
++#ifdef CONFIG_SYN_COOKIES
++/*
++ * Secure SYN cookie computation. This is the algorithm worked out by
++ * Dan Bernstein and Eric Schenk.
++ *
++ * For linux I implement the 1 minute counter by looking at the jiffies clock.
++ * The count is passed in as a parameter, so this code doesn't much care.
++ */
++
++#define COOKIEBITS 24	/* Upper bits store count */
++#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
++
++static int	syncookie_init;
++static __u32	syncookie_secret[2][16-3+HASH_BUFFER_SIZE];
++
++__u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport,
++		__u16 dport, __u32 sseq, __u32 count, __u32 data)
++{
++	__u32 	tmp[16 + HASH_BUFFER_SIZE + HASH_EXTRA_SIZE];
++	__u32	seq;
++
++	/*
++	 * Pick two random secrets the first time we need a cookie.
++	 */
++	if (syncookie_init == 0) {
++		get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
++		syncookie_init = 1;
++	}
++
++	/*
++	 * Compute the secure sequence number.
++	 * The output should be:
++   	 *   HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
++	 *      + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
++	 * Where sseq is their sequence number and count increases every
++	 * minute by 1.
++	 * As an extra hack, we add a small "data" value that encodes the
++	 * MSS into the second hash value.
++	 */
++
++	memcpy(tmp+3, syncookie_secret[0], sizeof(syncookie_secret[0]));
++	tmp[0]=saddr;
++	tmp[1]=daddr;
++	tmp[2]=(sport << 16) + dport;
++	HASH_TRANSFORM(tmp+16, tmp);
++	seq = tmp[17] + sseq + (count << COOKIEBITS);
++
++	memcpy(tmp+3, syncookie_secret[1], sizeof(syncookie_secret[1]));
++	tmp[0]=saddr;
++	tmp[1]=daddr;
++	tmp[2]=(sport << 16) + dport;
++	tmp[3] = count;	/* minute counter */
++	HASH_TRANSFORM(tmp+16, tmp);
++
++	/* Add in the second hash and the data */
++	return seq + ((tmp[17] + data) & COOKIEMASK);
++}
++
++/*
++ * This retrieves the small "data" value from the syncookie.
++ * If the syncookie is bad, the data returned will be out of
++ * range.  This must be checked by the caller.
++ *
++ * The count value used to generate the cookie must be within
++ * "maxdiff" if the current (passed-in) "count".  The return value
++ * is (__u32)-1 if this test fails.
++ */
++__u32 check_tcp_syn_cookie(__u32 cookie, __u32 saddr, __u32 daddr, __u16 sport,
++		__u16 dport, __u32 sseq, __u32 count, __u32 maxdiff)
++{
++	__u32 	tmp[16 + HASH_BUFFER_SIZE + HASH_EXTRA_SIZE];
++	__u32	diff;
++
++	if (syncookie_init == 0)
++		return (__u32)-1;	/* Well, duh! */
++
++	/* Strip away the layers from the cookie */
++	memcpy(tmp+3, syncookie_secret[0], sizeof(syncookie_secret[0]));
++	tmp[0]=saddr;
++	tmp[1]=daddr;
++	tmp[2]=(sport << 16) + dport;
++	HASH_TRANSFORM(tmp+16, tmp);
++	cookie -= tmp[17] + sseq;
++	/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
++
++	diff = (count - (cookie >> COOKIEBITS)) & ((__u32)-1 >> COOKIEBITS);
++	if (diff >= maxdiff)
++		return (__u32)-1;
++
++	memcpy(tmp+3, syncookie_secret[1], sizeof(syncookie_secret[1]));
++	tmp[0] = saddr;
++	tmp[1] = daddr;
++	tmp[2] = (sport << 16) + dport;
++	tmp[3] = count - diff;	/* minute counter */
++	HASH_TRANSFORM(tmp+16, tmp);
++
++	return (cookie - tmp[17]) & COOKIEMASK;	/* Leaving the data behind */
++}
++#endif
+diff -Naur linux-2.6.8.1.orig/include/linux/sysctl.h linux-2.6.8.1.frandom/include/linux/sysctl.h
+--- linux-2.6.8.1.orig/include/linux/sysctl.h	2004-08-14 10:55:33.000000000 +0000
++++ linux-2.6.8.1.frandom/include/linux/sysctl.h	2004-09-11 05:02:11.000000000 +0000
+@@ -198,7 +198,8 @@
+ 	RANDOM_READ_THRESH=3,
+ 	RANDOM_WRITE_THRESH=4,
+ 	RANDOM_BOOT_ID=5,
+-	RANDOM_UUID=6
++	RANDOM_UUID=6,
++	RANDOM_ERANDOM=7
+ };
+ 
+ /* /proc/sys/kernel/pty */
+diff -Naur linux-2.6.8.1.orig/include/linux/sysctl.h~ linux-2.6.8.1.frandom/include/linux/sysctl.h~
+--- linux-2.6.8.1.orig/include/linux/sysctl.h~	1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.8.1.frandom/include/linux/sysctl.h~	2004-09-11 05:00:42.000000000 +0000
+@@ -0,0 +1,867 @@
++/*
++ * sysctl.h: General linux system control interface
++ *
++ * Begun 24 March 1995, Stephen Tweedie
++ *
++ ****************************************************************
++ ****************************************************************
++ **
++ **  The values in this file are exported to user space via 
++ **  the sysctl() binary interface.  However this interface
++ **  is unstable and deprecated and will be removed in the future. 
++ **  For a stable interface use /proc/sys.
++ **
++ ****************************************************************
++ ****************************************************************
++ */
++
++#ifndef _LINUX_SYSCTL_H
++#define _LINUX_SYSCTL_H
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/compiler.h>
++
++struct file;
++
++#define CTL_MAXNAME 10		/* how many path components do we allow in a
++				   call to sysctl?   In other words, what is
++				   the largest acceptable value for the nlen
++				   member of a struct __sysctl_args to have? */
++
++struct __sysctl_args {
++	int __user *name;
++	int nlen;
++	void __user *oldval;
++	size_t __user *oldlenp;
++	void __user *newval;
++	size_t newlen;
++	unsigned long __unused[4];
++};
++
++/* Define sysctl names first */
++
++/* Top-level names: */
++
++/* For internal pattern-matching use only: */
++#ifdef __KERNEL__
++#define CTL_ANY		-1	/* Matches any name */
++#define CTL_NONE	0
++#endif
++
++enum
++{
++	CTL_KERN=1,		/* General kernel info and control */
++	CTL_VM=2,		/* VM management */
++	CTL_NET=3,		/* Networking */
++	CTL_PROC=4,		/* Process info */
++	CTL_FS=5,		/* Filesystems */
++	CTL_DEBUG=6,		/* Debugging */
++	CTL_DEV=7,		/* Devices */
++	CTL_BUS=8,		/* Busses */
++	CTL_ABI=9,		/* Binary emulation */
++	CTL_CPU=10		/* CPU stuff (speed scaling, etc) */
++};
++
++/* CTL_BUS names: */
++enum
++{
++	CTL_BUS_ISA=1		/* ISA */
++};
++
++/* CTL_KERN names: */
++enum
++{
++	KERN_OSTYPE=1,		/* string: system version */
++	KERN_OSRELEASE=2,	/* string: system release */
++	KERN_OSREV=3,		/* int: system revision */
++	KERN_VERSION=4,		/* string: compile time info */
++	KERN_SECUREMASK=5,	/* struct: maximum rights mask */
++	KERN_PROF=6,		/* table: profiling information */
++	KERN_NODENAME=7,
++	KERN_DOMAINNAME=8,
++
++	KERN_CAP_BSET=14,	/* int: capability bounding set */
++	KERN_PANIC=15,		/* int: panic timeout */
++	KERN_REALROOTDEV=16,	/* real root device to mount after initrd */
++
++	KERN_SPARC_REBOOT=21,	/* reboot command on Sparc */
++	KERN_CTLALTDEL=22,	/* int: allow ctl-alt-del to reboot */
++	KERN_PRINTK=23,		/* struct: control printk logging parameters */
++	KERN_NAMETRANS=24,	/* Name translation */
++	KERN_PPC_HTABRECLAIM=25, /* turn htab reclaimation on/off on PPC */
++	KERN_PPC_ZEROPAGED=26,	/* turn idle page zeroing on/off on PPC */
++	KERN_PPC_POWERSAVE_NAP=27, /* use nap mode for power saving */
++	KERN_MODPROBE=28,
++	KERN_SG_BIG_BUFF=29,
++	KERN_ACCT=30,		/* BSD process accounting parameters */
++	KERN_PPC_L2CR=31,	/* l2cr register on PPC */
++
++	KERN_RTSIGNR=32,	/* Number of rt sigs queued */
++	KERN_RTSIGMAX=33,	/* Max queuable */
++	
++	KERN_SHMMAX=34,         /* long: Maximum shared memory segment */
++	KERN_MSGMAX=35,         /* int: Maximum size of a messege */
++	KERN_MSGMNB=36,         /* int: Maximum message queue size */
++	KERN_MSGPOOL=37,        /* int: Maximum system message pool size */
++	KERN_SYSRQ=38,		/* int: Sysreq enable */
++	KERN_MAX_THREADS=39,	/* int: Maximum nr of threads in the system */
++ 	KERN_RANDOM=40,		/* Random driver */
++ 	KERN_SHMALL=41,		/* int: Maximum size of shared memory */
++ 	KERN_MSGMNI=42,		/* int: msg queue identifiers */
++ 	KERN_SEM=43,		/* struct: sysv semaphore limits */
++ 	KERN_SPARC_STOP_A=44,	/* int: Sparc Stop-A enable */
++ 	KERN_SHMMNI=45,		/* int: shm array identifiers */
++	KERN_OVERFLOWUID=46,	/* int: overflow UID */
++	KERN_OVERFLOWGID=47,	/* int: overflow GID */
++	KERN_SHMPATH=48,	/* string: path to shm fs */
++	KERN_HOTPLUG=49,	/* string: path to hotplug policy agent */
++	KERN_IEEE_EMULATION_WARNINGS=50, /* int: unimplemented ieee instructions */
++	KERN_S390_USER_DEBUG_LOGGING=51,  /* int: dumps of user faults */
++	KERN_CORE_USES_PID=52,		/* int: use core or core.%pid */
++	KERN_TAINTED=53,	/* int: various kernel tainted flags */
++	KERN_CADPID=54,		/* int: PID of the process to notify on CAD */
++	KERN_PIDMAX=55,		/* int: PID # limit */
++  	KERN_CORE_PATTERN=56,	/* string: pattern for core-file names */
++	KERN_PANIC_ON_OOPS=57,  /* int: whether we will panic on an oops */
++	KERN_HPPA_PWRSW=58,	/* int: hppa soft-power enable */
++	KERN_HPPA_UNALIGNED=59,	/* int: hppa unaligned-trap enable */
++	KERN_PRINTK_RATELIMIT=60, /* int: tune printk ratelimiting */
++	KERN_PRINTK_RATELIMIT_BURST=61,	/* int: tune printk ratelimiting */
++	KERN_PTY=62,		/* dir: pty driver */
++	KERN_NGROUPS_MAX=63,	/* int: NGROUPS_MAX */
++	KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */
++	KERN_HZ_TIMER=65,	/* int: hz timer on or off */
++};
++
++
++/* CTL_VM names: */
++enum
++{
++	VM_UNUSED1=1,		/* was: struct: Set vm swapping control */
++	VM_UNUSED2=2,		/* was; int: Linear or sqrt() swapout for hogs */
++	VM_UNUSED3=3,		/* was: struct: Set free page thresholds */
++	VM_UNUSED4=4,		/* Spare */
++	VM_OVERCOMMIT_MEMORY=5,	/* Turn off the virtual memory safety limit */
++	VM_UNUSED5=6,		/* was: struct: Set buffer memory thresholds */
++	VM_UNUSED7=7,		/* was: struct: Set cache memory thresholds */
++	VM_UNUSED8=8,		/* was: struct: Control kswapd behaviour */
++	VM_UNUSED9=9,		/* was: struct: Set page table cache parameters */
++	VM_PAGE_CLUSTER=10,	/* int: set number of pages to swap together */
++	VM_DIRTY_BACKGROUND=11,	/* dirty_background_ratio */
++	VM_DIRTY_RATIO=12,	/* dirty_ratio */
++	VM_DIRTY_WB_CS=13,	/* dirty_writeback_centisecs */
++	VM_DIRTY_EXPIRE_CS=14,	/* dirty_expire_centisecs */
++	VM_NR_PDFLUSH_THREADS=15, /* nr_pdflush_threads */
++	VM_OVERCOMMIT_RATIO=16, /* percent of RAM to allow overcommit in */
++	VM_PAGEBUF=17,		/* struct: Control pagebuf parameters */
++	VM_HUGETLB_PAGES=18,	/* int: Number of available Huge Pages */
++	VM_SWAPPINESS=19,	/* Tendency to steal mapped memory */
++	VM_LOWER_ZONE_PROTECTION=20,/* Amount of protection of lower zones */
++	VM_MIN_FREE_KBYTES=21,	/* Minimum free kilobytes to maintain */
++	VM_MAX_MAP_COUNT=22,	/* int: Maximum number of mmaps/address-space */
++	VM_LAPTOP_MODE=23,	/* vm laptop mode */
++	VM_BLOCK_DUMP=24,	/* block dump mode */
++	VM_HUGETLB_GROUP=25,	/* permitted hugetlb group */
++	VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
++};
++
++
++/* CTL_NET names: */
++enum
++{
++	NET_CORE=1,
++	NET_ETHER=2,
++	NET_802=3,
++	NET_UNIX=4,
++	NET_IPV4=5,
++	NET_IPX=6,
++	NET_ATALK=7,
++	NET_NETROM=8,
++	NET_AX25=9,
++	NET_BRIDGE=10,
++	NET_ROSE=11,
++	NET_IPV6=12,
++	NET_X25=13,
++	NET_TR=14,
++	NET_DECNET=15,
++	NET_ECONET=16,
++	NET_SCTP=17, 
++};
++
++/* /proc/sys/kernel/random */
++enum
++{
++	RANDOM_POOLSIZE=1,
++	RANDOM_ENTROPY_COUNT=2,
++	RANDOM_READ_THRESH=3,
++	RANDOM_WRITE_THRESH=4,
++	RANDOM_BOOT_ID=5,
++	RANDOM_UUID=6
++};
++
++/* /proc/sys/kernel/pty */
++enum
++{
++	PTY_MAX=1,
++	PTY_NR=2
++};
++
++/* /proc/sys/bus/isa */
++enum
++{
++	BUS_ISA_MEM_BASE=1,
++	BUS_ISA_PORT_BASE=2,
++	BUS_ISA_PORT_SHIFT=3
++};
++
++/* /proc/sys/net/core */
++enum
++{
++	NET_CORE_WMEM_MAX=1,
++	NET_CORE_RMEM_MAX=2,
++	NET_CORE_WMEM_DEFAULT=3,
++	NET_CORE_RMEM_DEFAULT=4,
++/* was	NET_CORE_DESTROY_DELAY */
++	NET_CORE_MAX_BACKLOG=6,
++	NET_CORE_FASTROUTE=7,
++	NET_CORE_MSG_COST=8,
++	NET_CORE_MSG_BURST=9,
++	NET_CORE_OPTMEM_MAX=10,
++	NET_CORE_HOT_LIST_LENGTH=11,
++	NET_CORE_DIVERT_VERSION=12,
++	NET_CORE_NO_CONG_THRESH=13,
++	NET_CORE_NO_CONG=14,
++	NET_CORE_LO_CONG=15,
++	NET_CORE_MOD_CONG=16,
++	NET_CORE_DEV_WEIGHT=17,
++	NET_CORE_SOMAXCONN=18,
++};
++
++/* /proc/sys/net/ethernet */
++
++/* /proc/sys/net/802 */
++
++/* /proc/sys/net/unix */
++
++enum
++{
++	NET_UNIX_DESTROY_DELAY=1,
++	NET_UNIX_DELETE_DELAY=2,
++	NET_UNIX_MAX_DGRAM_QLEN=3,
++};
++
++/* /proc/sys/net/ipv4 */
++enum
++{
++	/* v2.0 compatibile variables */
++	NET_IPV4_FORWARD=8,
++	NET_IPV4_DYNADDR=9,
++
++	NET_IPV4_CONF=16,
++	NET_IPV4_NEIGH=17,
++	NET_IPV4_ROUTE=18,
++	NET_IPV4_FIB_HASH=19,
++	NET_IPV4_NETFILTER=20,
++
++	NET_IPV4_TCP_TIMESTAMPS=33,
++	NET_IPV4_TCP_WINDOW_SCALING=34,
++	NET_IPV4_TCP_SACK=35,
++	NET_IPV4_TCP_RETRANS_COLLAPSE=36,
++	NET_IPV4_DEFAULT_TTL=37,
++	NET_IPV4_AUTOCONFIG=38,
++	NET_IPV4_NO_PMTU_DISC=39,
++	NET_IPV4_TCP_SYN_RETRIES=40,
++	NET_IPV4_IPFRAG_HIGH_THRESH=41,
++	NET_IPV4_IPFRAG_LOW_THRESH=42,
++	NET_IPV4_IPFRAG_TIME=43,
++	NET_IPV4_TCP_MAX_KA_PROBES=44,
++	NET_IPV4_TCP_KEEPALIVE_TIME=45,
++	NET_IPV4_TCP_KEEPALIVE_PROBES=46,
++	NET_IPV4_TCP_RETRIES1=47,
++	NET_IPV4_TCP_RETRIES2=48,
++	NET_IPV4_TCP_FIN_TIMEOUT=49,
++	NET_IPV4_IP_MASQ_DEBUG=50,
++	NET_TCP_SYNCOOKIES=51,
++	NET_TCP_STDURG=52,
++	NET_TCP_RFC1337=53,
++	NET_TCP_SYN_TAILDROP=54,
++	NET_TCP_MAX_SYN_BACKLOG=55,
++	NET_IPV4_LOCAL_PORT_RANGE=56,
++	NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
++	NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
++	NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
++	NET_IPV4_ICMP_DESTUNREACH_RATE=60,
++	NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
++	NET_IPV4_ICMP_PARAMPROB_RATE=62,
++	NET_IPV4_ICMP_ECHOREPLY_RATE=63,
++	NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
++	NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
++	NET_TCP_TW_RECYCLE=66,
++	NET_IPV4_ALWAYS_DEFRAG=67,
++	NET_IPV4_TCP_KEEPALIVE_INTVL=68,
++	NET_IPV4_INET_PEER_THRESHOLD=69,
++	NET_IPV4_INET_PEER_MINTTL=70,
++	NET_IPV4_INET_PEER_MAXTTL=71,
++	NET_IPV4_INET_PEER_GC_MINTIME=72,
++	NET_IPV4_INET_PEER_GC_MAXTIME=73,
++	NET_TCP_ORPHAN_RETRIES=74,
++	NET_TCP_ABORT_ON_OVERFLOW=75,
++	NET_TCP_SYNACK_RETRIES=76,
++	NET_TCP_MAX_ORPHANS=77,
++	NET_TCP_MAX_TW_BUCKETS=78,
++	NET_TCP_FACK=79,
++	NET_TCP_REORDERING=80,
++	NET_TCP_ECN=81,
++	NET_TCP_DSACK=82,
++	NET_TCP_MEM=83,
++	NET_TCP_WMEM=84,
++	NET_TCP_RMEM=85,
++	NET_TCP_APP_WIN=86,
++	NET_TCP_ADV_WIN_SCALE=87,
++	NET_IPV4_NONLOCAL_BIND=88,
++	NET_IPV4_ICMP_RATELIMIT=89,
++	NET_IPV4_ICMP_RATEMASK=90,
++	NET_TCP_TW_REUSE=91,
++	NET_TCP_FRTO=92,
++	NET_TCP_LOW_LATENCY=93,
++	NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
++	NET_TCP_WESTWOOD=95,
++	NET_IPV4_IGMP_MAX_MSF=96,
++	NET_TCP_NO_METRICS_SAVE=97,
++	NET_TCP_VEGAS=98,
++	NET_TCP_VEGAS_ALPHA=99,
++	NET_TCP_VEGAS_BETA=100,
++	NET_TCP_VEGAS_GAMMA=101,
++ 	NET_TCP_BIC=102,
++ 	NET_TCP_BIC_FAST_CONVERGENCE=103,
++	NET_TCP_BIC_LOW_WINDOW=104,
++	NET_TCP_DEFAULT_WIN_SCALE=105,
++	NET_TCP_MODERATE_RCVBUF=106,
++};
++
++enum {
++	NET_IPV4_ROUTE_FLUSH=1,
++	NET_IPV4_ROUTE_MIN_DELAY=2,
++	NET_IPV4_ROUTE_MAX_DELAY=3,
++	NET_IPV4_ROUTE_GC_THRESH=4,
++	NET_IPV4_ROUTE_MAX_SIZE=5,
++	NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
++	NET_IPV4_ROUTE_GC_TIMEOUT=7,
++	NET_IPV4_ROUTE_GC_INTERVAL=8,
++	NET_IPV4_ROUTE_REDIRECT_LOAD=9,
++	NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
++	NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
++	NET_IPV4_ROUTE_ERROR_COST=12,
++	NET_IPV4_ROUTE_ERROR_BURST=13,
++	NET_IPV4_ROUTE_GC_ELASTICITY=14,
++	NET_IPV4_ROUTE_MTU_EXPIRES=15,
++	NET_IPV4_ROUTE_MIN_PMTU=16,
++	NET_IPV4_ROUTE_MIN_ADVMSS=17,
++	NET_IPV4_ROUTE_SECRET_INTERVAL=18,
++};
++
++enum
++{
++	NET_PROTO_CONF_ALL=-2,
++	NET_PROTO_CONF_DEFAULT=-3
++
++	/* And device ifindices ... */
++};
++
++enum
++{
++	NET_IPV4_CONF_FORWARDING=1,
++	NET_IPV4_CONF_MC_FORWARDING=2,
++	NET_IPV4_CONF_PROXY_ARP=3,
++	NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
++	NET_IPV4_CONF_SECURE_REDIRECTS=5,
++	NET_IPV4_CONF_SEND_REDIRECTS=6,
++	NET_IPV4_CONF_SHARED_MEDIA=7,
++	NET_IPV4_CONF_RP_FILTER=8,
++	NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
++	NET_IPV4_CONF_BOOTP_RELAY=10,
++	NET_IPV4_CONF_LOG_MARTIANS=11,
++	NET_IPV4_CONF_TAG=12,
++	NET_IPV4_CONF_ARPFILTER=13,
++	NET_IPV4_CONF_MEDIUM_ID=14,
++	NET_IPV4_CONF_NOXFRM=15,
++	NET_IPV4_CONF_NOPOLICY=16,
++	NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
++	NET_IPV4_CONF_ARP_ANNOUNCE=18,
++	NET_IPV4_CONF_ARP_IGNORE=19,
++};
++
++/* /proc/sys/net/ipv4/netfilter */
++enum
++{
++	NET_IPV4_NF_CONNTRACK_MAX=1,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
++	NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
++	NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
++	NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
++	NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
++	NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
++	NET_IPV4_NF_CONNTRACK_BUCKETS=14,
++};
++ 
++/* /proc/sys/net/ipv6 */
++enum {
++	NET_IPV6_CONF=16,
++	NET_IPV6_NEIGH=17,
++	NET_IPV6_ROUTE=18,
++	NET_IPV6_ICMP=19,
++	NET_IPV6_BINDV6ONLY=20,
++	NET_IPV6_IP6FRAG_HIGH_THRESH=21,
++	NET_IPV6_IP6FRAG_LOW_THRESH=22,
++	NET_IPV6_IP6FRAG_TIME=23,
++	NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
++	NET_IPV6_MLD_MAX_MSF=25,
++};
++
++enum {
++	NET_IPV6_ROUTE_FLUSH=1,
++	NET_IPV6_ROUTE_GC_THRESH=2,
++	NET_IPV6_ROUTE_MAX_SIZE=3,
++	NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
++	NET_IPV6_ROUTE_GC_TIMEOUT=5,
++	NET_IPV6_ROUTE_GC_INTERVAL=6,
++	NET_IPV6_ROUTE_GC_ELASTICITY=7,
++	NET_IPV6_ROUTE_MTU_EXPIRES=8,
++	NET_IPV6_ROUTE_MIN_ADVMSS=9
++};
++
++enum {
++	NET_IPV6_FORWARDING=1,
++	NET_IPV6_HOP_LIMIT=2,
++	NET_IPV6_MTU=3,
++	NET_IPV6_ACCEPT_RA=4,
++	NET_IPV6_ACCEPT_REDIRECTS=5,
++	NET_IPV6_AUTOCONF=6,
++	NET_IPV6_DAD_TRANSMITS=7,
++	NET_IPV6_RTR_SOLICITS=8,
++	NET_IPV6_RTR_SOLICIT_INTERVAL=9,
++	NET_IPV6_RTR_SOLICIT_DELAY=10,
++	NET_IPV6_USE_TEMPADDR=11,
++	NET_IPV6_TEMP_VALID_LFT=12,
++	NET_IPV6_TEMP_PREFERED_LFT=13,
++	NET_IPV6_REGEN_MAX_RETRY=14,
++	NET_IPV6_MAX_DESYNC_FACTOR=15,
++	NET_IPV6_MAX_ADDRESSES=16,
++	NET_IPV6_FORCE_MLD_VERSION=17
++};
++
++/* /proc/sys/net/ipv6/icmp */
++enum {
++	NET_IPV6_ICMP_RATELIMIT=1
++};
++
++/* /proc/sys/net/<protocol>/neigh/<dev> */
++enum {
++	NET_NEIGH_MCAST_SOLICIT=1,
++	NET_NEIGH_UCAST_SOLICIT=2,
++	NET_NEIGH_APP_SOLICIT=3,
++	NET_NEIGH_RETRANS_TIME=4,
++	NET_NEIGH_REACHABLE_TIME=5,
++	NET_NEIGH_DELAY_PROBE_TIME=6,
++	NET_NEIGH_GC_STALE_TIME=7,
++	NET_NEIGH_UNRES_QLEN=8,
++	NET_NEIGH_PROXY_QLEN=9,
++	NET_NEIGH_ANYCAST_DELAY=10,
++	NET_NEIGH_PROXY_DELAY=11,
++	NET_NEIGH_LOCKTIME=12,
++	NET_NEIGH_GC_INTERVAL=13,
++	NET_NEIGH_GC_THRESH1=14,
++	NET_NEIGH_GC_THRESH2=15,
++	NET_NEIGH_GC_THRESH3=16
++};
++
++/* /proc/sys/net/ipx */
++enum {
++	NET_IPX_PPROP_BROADCASTING=1,
++	NET_IPX_FORWARDING=2
++};
++
++
++/* /proc/sys/net/appletalk */
++enum {
++	NET_ATALK_AARP_EXPIRY_TIME=1,
++	NET_ATALK_AARP_TICK_TIME=2,
++	NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
++	NET_ATALK_AARP_RESOLVE_TIME=4
++};
++
++
++/* /proc/sys/net/netrom */
++enum {
++	NET_NETROM_DEFAULT_PATH_QUALITY=1,
++	NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
++	NET_NETROM_NETWORK_TTL_INITIALISER=3,
++	NET_NETROM_TRANSPORT_TIMEOUT=4,
++	NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
++	NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
++	NET_NETROM_TRANSPORT_BUSY_DELAY=7,
++	NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
++	NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
++	NET_NETROM_ROUTING_CONTROL=10,
++	NET_NETROM_LINK_FAILS_COUNT=11
++};
++
++/* /proc/sys/net/ax25 */
++enum {
++	NET_AX25_IP_DEFAULT_MODE=1,
++	NET_AX25_DEFAULT_MODE=2,
++	NET_AX25_BACKOFF_TYPE=3,
++	NET_AX25_CONNECT_MODE=4,
++	NET_AX25_STANDARD_WINDOW=5,
++	NET_AX25_EXTENDED_WINDOW=6,
++	NET_AX25_T1_TIMEOUT=7,
++	NET_AX25_T2_TIMEOUT=8,
++	NET_AX25_T3_TIMEOUT=9,
++	NET_AX25_IDLE_TIMEOUT=10,
++	NET_AX25_N2=11,
++	NET_AX25_PACLEN=12,
++	NET_AX25_PROTOCOL=13,
++	NET_AX25_DAMA_SLAVE_TIMEOUT=14
++};
++
++/* /proc/sys/net/rose */
++enum {
++	NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
++	NET_ROSE_CALL_REQUEST_TIMEOUT=2,
++	NET_ROSE_RESET_REQUEST_TIMEOUT=3,
++	NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
++	NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
++	NET_ROSE_ROUTING_CONTROL=6,
++	NET_ROSE_LINK_FAIL_TIMEOUT=7,
++	NET_ROSE_MAX_VCS=8,
++	NET_ROSE_WINDOW_SIZE=9,
++	NET_ROSE_NO_ACTIVITY_TIMEOUT=10
++};
++
++/* /proc/sys/net/x25 */
++enum {
++	NET_X25_RESTART_REQUEST_TIMEOUT=1,
++	NET_X25_CALL_REQUEST_TIMEOUT=2,
++	NET_X25_RESET_REQUEST_TIMEOUT=3,
++	NET_X25_CLEAR_REQUEST_TIMEOUT=4,
++	NET_X25_ACK_HOLD_BACK_TIMEOUT=5
++};
++
++/* /proc/sys/net/token-ring */
++enum
++{
++	NET_TR_RIF_TIMEOUT=1
++};
++
++/* /proc/sys/net/decnet/ */
++enum {
++	NET_DECNET_NODE_TYPE = 1,
++	NET_DECNET_NODE_ADDRESS = 2,
++	NET_DECNET_NODE_NAME = 3,
++	NET_DECNET_DEFAULT_DEVICE = 4,
++	NET_DECNET_TIME_WAIT = 5,
++	NET_DECNET_DN_COUNT = 6,
++	NET_DECNET_DI_COUNT = 7,
++	NET_DECNET_DR_COUNT = 8,
++	NET_DECNET_DST_GC_INTERVAL = 9,
++	NET_DECNET_CONF = 10,
++	NET_DECNET_NO_FC_MAX_CWND = 11,
++	NET_DECNET_DEBUG_LEVEL = 255
++};
++
++/* /proc/sys/net/decnet/conf/<dev> */
++enum {
++	NET_DECNET_CONF_LOOPBACK = -2,
++	NET_DECNET_CONF_DDCMP = -3,
++	NET_DECNET_CONF_PPP = -4,
++	NET_DECNET_CONF_X25 = -5,
++	NET_DECNET_CONF_GRE = -6,
++	NET_DECNET_CONF_ETHER = -7
++
++	/* ... and ifindex of devices */
++};
++
++/* /proc/sys/net/decnet/conf/<dev>/ */
++enum {
++	NET_DECNET_CONF_DEV_PRIORITY = 1,
++	NET_DECNET_CONF_DEV_T1 = 2,
++	NET_DECNET_CONF_DEV_T2 = 3,
++	NET_DECNET_CONF_DEV_T3 = 4,
++	NET_DECNET_CONF_DEV_FORWARDING = 5,
++	NET_DECNET_CONF_DEV_BLKSIZE = 6,
++	NET_DECNET_CONF_DEV_STATE = 7
++};
++
++/* /proc/sys/net/sctp */
++enum {
++	NET_SCTP_RTO_INITIAL = 1,
++	NET_SCTP_RTO_MIN     = 2,
++	NET_SCTP_RTO_MAX     = 3,
++	NET_SCTP_RTO_ALPHA   = 4,
++	NET_SCTP_RTO_BETA    = 5,
++	NET_SCTP_VALID_COOKIE_LIFE       =  6,
++	NET_SCTP_ASSOCIATION_MAX_RETRANS =  7,
++	NET_SCTP_PATH_MAX_RETRANS        =  8,
++	NET_SCTP_MAX_INIT_RETRANSMITS    =  9,
++	NET_SCTP_HB_INTERVAL             = 10,
++	NET_SCTP_PRESERVE_ENABLE         = 11,
++	NET_SCTP_MAX_BURST               = 12,
++	NET_SCTP_ADDIP_ENABLE		 = 13,
++	NET_SCTP_PRSCTP_ENABLE		 = 14,
++};
++
++/* /proc/sys/net/bridge */
++enum {
++	NET_BRIDGE_NF_CALL_ARPTABLES = 1,
++	NET_BRIDGE_NF_CALL_IPTABLES = 2,
++	NET_BRIDGE_NF_CALL_IP6TABLES = 3,
++	NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
++};
++
++/* CTL_PROC names: */
++
++/* CTL_FS names: */
++enum
++{
++	FS_NRINODE=1,	/* int:current number of allocated inodes */
++	FS_STATINODE=2,
++	FS_MAXINODE=3,	/* int:maximum number of inodes that can be allocated */
++	FS_NRDQUOT=4,	/* int:current number of allocated dquots */
++	FS_MAXDQUOT=5,	/* int:maximum number of dquots that can be allocated */
++	FS_NRFILE=6,	/* int:current number of allocated filedescriptors */
++	FS_MAXFILE=7,	/* int:maximum number of filedescriptors that can be allocated */
++	FS_DENTRY=8,
++	FS_NRSUPER=9,	/* int:current number of allocated super_blocks */
++	FS_MAXSUPER=10,	/* int:maximum number of super_blocks that can be allocated */
++	FS_OVERFLOWUID=11,	/* int: overflow UID */
++	FS_OVERFLOWGID=12,	/* int: overflow GID */
++	FS_LEASES=13,	/* int: leases enabled */
++	FS_DIR_NOTIFY=14,	/* int: directory notification enabled */
++	FS_LEASE_TIME=15,	/* int: maximum time to wait for a lease break */
++	FS_DQSTATS=16,	/* disc quota usage statistics */
++	FS_XFS=17,	/* struct: control xfs parameters */
++	FS_AIO_NR=18,	/* current system-wide number of aio requests */
++	FS_AIO_MAX_NR=19,	/* system-wide maximum number of aio requests */
++};
++
++/* /proc/sys/fs/quota/ */
++enum {
++	FS_DQ_LOOKUPS = 1,
++	FS_DQ_DROPS = 2,
++	FS_DQ_READS = 3,
++	FS_DQ_WRITES = 4,
++	FS_DQ_CACHE_HITS = 5,
++	FS_DQ_ALLOCATED = 6,
++	FS_DQ_FREE = 7,
++	FS_DQ_SYNCS = 8,
++};
++
++/* CTL_DEBUG names: */
++
++/* CTL_DEV names: */
++enum {
++	DEV_CDROM=1,
++	DEV_HWMON=2,
++	DEV_PARPORT=3,
++	DEV_RAID=4,
++	DEV_MAC_HID=5,
++	DEV_SCSI=6,
++};
++
++/* /proc/sys/dev/cdrom */
++enum {
++	DEV_CDROM_INFO=1,
++	DEV_CDROM_AUTOCLOSE=2,
++	DEV_CDROM_AUTOEJECT=3,
++	DEV_CDROM_DEBUG=4,
++	DEV_CDROM_LOCK=5,
++	DEV_CDROM_CHECK_MEDIA=6
++};
++
++/* /proc/sys/dev/parport */
++enum {
++	DEV_PARPORT_DEFAULT=-3
++};
++
++/* /proc/sys/dev/raid */
++enum {
++	DEV_RAID_SPEED_LIMIT_MIN=1,
++	DEV_RAID_SPEED_LIMIT_MAX=2
++};
++
++/* /proc/sys/dev/parport/default */
++enum {
++	DEV_PARPORT_DEFAULT_TIMESLICE=1,
++	DEV_PARPORT_DEFAULT_SPINTIME=2
++};
++
++/* /proc/sys/dev/parport/parport n */
++enum {
++	DEV_PARPORT_SPINTIME=1,
++	DEV_PARPORT_BASE_ADDR=2,
++	DEV_PARPORT_IRQ=3,
++	DEV_PARPORT_DMA=4,
++	DEV_PARPORT_MODES=5,
++	DEV_PARPORT_DEVICES=6,
++	DEV_PARPORT_AUTOPROBE=16
++};
++
++/* /proc/sys/dev/parport/parport n/devices/ */
++enum {
++	DEV_PARPORT_DEVICES_ACTIVE=-3,
++};
++
++/* /proc/sys/dev/parport/parport n/devices/device n */
++enum {
++	DEV_PARPORT_DEVICE_TIMESLICE=1,
++};
++
++/* /proc/sys/dev/mac_hid */
++enum {
++	DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
++	DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
++	DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
++	DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
++	DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
++	DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
++};
++
++/* /proc/sys/dev/scsi */
++enum {
++	DEV_SCSI_LOGGING_LEVEL=1,
++};
++
++/* /proc/sys/abi */
++enum
++{
++	ABI_DEFHANDLER_COFF=1,	/* default handler for coff binaries */
++	ABI_DEFHANDLER_ELF=2, 	/* default handler for ELF binaries */
++	ABI_DEFHANDLER_LCALL7=3,/* default handler for procs using lcall7 */
++	ABI_DEFHANDLER_LIBCSO=4,/* default handler for an libc.so ELF interp */
++	ABI_TRACE=5,		/* tracing flags */
++	ABI_FAKE_UTSNAME=6,	/* fake target utsname information */
++};
++
++#ifdef __KERNEL__
++
++extern void sysctl_init(void);
++
++typedef struct ctl_table ctl_table;
++
++typedef int ctl_handler (ctl_table *table, int __user *name, int nlen,
++			 void __user *oldval, size_t __user *oldlenp,
++			 void __user *newval, size_t newlen, 
++			 void **context);
++
++typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
++			  void __user *buffer, size_t *lenp, loff_t *ppos);
++
++extern int proc_dostring(ctl_table *, int, struct file *,
++			 void __user *, size_t *, loff_t *);
++extern int proc_dointvec(ctl_table *, int, struct file *,
++			 void __user *, size_t *, loff_t *);
++extern int proc_dointvec_bset(ctl_table *, int, struct file *,
++			      void __user *, size_t *, loff_t *);
++extern int proc_dointvec_minmax(ctl_table *, int, struct file *,
++				void __user *, size_t *, loff_t *);
++extern int proc_dointvec_jiffies(ctl_table *, int, struct file *,
++				 void __user *, size_t *, loff_t *);
++extern int proc_dointvec_userhz_jiffies(ctl_table *, int, struct file *,
++					void __user *, size_t *, loff_t *);
++extern int proc_doulongvec_minmax(ctl_table *, int, struct file *,
++				  void __user *, size_t *, loff_t *);
++extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
++				      struct file *, void __user *, size_t *, loff_t *);
++
++extern int do_sysctl (int __user *name, int nlen,
++		      void __user *oldval, size_t __user *oldlenp,
++		      void __user *newval, size_t newlen);
++
++extern int do_sysctl_strategy (ctl_table *table, 
++			       int __user *name, int nlen,
++			       void __user *oldval, size_t __user *oldlenp,
++			       void __user *newval, size_t newlen, void ** context);
++
++extern ctl_handler sysctl_string;
++extern ctl_handler sysctl_intvec;
++extern ctl_handler sysctl_jiffies;
++
++
++/*
++ * Register a set of sysctl names by calling register_sysctl_table
++ * with an initialised array of ctl_table's.  An entry with zero
++ * ctl_name terminates the table.  table->de will be set up by the
++ * registration and need not be initialised in advance.
++ *
++ * sysctl names can be mirrored automatically under /proc/sys.  The
++ * procname supplied controls /proc naming.
++ *
++ * The table's mode will be honoured both for sys_sysctl(2) and
++ * proc-fs access.
++ *
++ * Leaf nodes in the sysctl tree will be represented by a single file
++ * under /proc; non-leaf nodes will be represented by directories.  A
++ * null procname disables /proc mirroring at this node.
++ * 
++ * sysctl(2) can automatically manage read and write requests through
++ * the sysctl table.  The data and maxlen fields of the ctl_table
++ * struct enable minimal validation of the values being written to be
++ * performed, and the mode field allows minimal authentication.
++ * 
++ * More sophisticated management can be enabled by the provision of a
++ * strategy routine with the table entry.  This will be called before
++ * any automatic read or write of the data is performed.
++ * 
++ * The strategy routine may return:
++ * <0: Error occurred (error is passed to user process)
++ * 0:  OK - proceed with automatic read or write.
++ * >0: OK - read or write has been done by the strategy routine, so 
++ *     return immediately.
++ * 
++ * There must be a proc_handler routine for any terminal nodes
++ * mirrored under /proc/sys (non-terminals are handled by a built-in
++ * directory handler).  Several default handlers are available to
++ * cover common cases.
++ */
++
++/* A sysctl table is an array of struct ctl_table: */
++struct ctl_table 
++{
++	int ctl_name;			/* Binary ID */
++	const char *procname;		/* Text ID for /proc/sys, or zero */
++	void *data;
++	int maxlen;
++	mode_t mode;
++	ctl_table *child;
++	proc_handler *proc_handler;	/* Callback for text formatting */
++	ctl_handler *strategy;		/* Callback function for all r/w */
++	struct proc_dir_entry *de;	/* /proc control block */
++	void *extra1;
++	void *extra2;
++};
++
++/* struct ctl_table_header is used to maintain dynamic lists of
++   ctl_table trees. */
++struct ctl_table_header
++{
++	ctl_table *ctl_table;
++	struct list_head ctl_entry;
++};
++
++struct ctl_table_header * register_sysctl_table(ctl_table * table, 
++						int insert_at_head);
++void unregister_sysctl_table(struct ctl_table_header * table);
++
++#else /* __KERNEL__ */
++
++#endif /* __KERNEL__ */
++
++#endif /* _LINUX_SYSCTL_H */




More information about the patches mailing list