diff -ruN linux-2.6.2.orig/crypto/Kconfig linux-2.6.2/crypto/Kconfig --- linux-2.6.2.orig/crypto/Kconfig 2004-02-17 02:57:32.968568768 +0100 +++ linux-2.6.2/crypto/Kconfig 2004-02-17 03:05:00.464539040 +0100 @@ -151,6 +151,12 @@ You will most probably want this if using IPSec. +config CRYPTO_UCL + tristate "UCL nrv2e compression algorithm" + depends on CRYPTO + help + UCL nrv2e kernel module used mainly for gcloop. + config CRYPTO_TEST tristate "Testing module" depends on CRYPTO diff -ruN linux-2.6.2.orig/crypto/Makefile linux-2.6.2/crypto/Makefile --- linux-2.6.2.orig/crypto/Makefile 2004-02-17 02:57:32.968568768 +0100 +++ linux-2.6.2/crypto/Makefile 2004-02-17 03:05:00.511531896 +0100 @@ -1,6 +1,7 @@ # # Cryptographic API # +CFLAGS_ucl_compress.o = -I /usr/include proc-crypto-$(CONFIG_PROC_FS) = proc.o @@ -22,5 +23,10 @@ obj-$(CONFIG_CRYPTO_CAST5) += cast5.o obj-$(CONFIG_CRYPTO_CAST6) += cast6.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o +ucl-objs := ucl_compress.o libucl.a +obj-${CONFIG_CRYPTO_UCL} += ucl.o +#dirty +$(obj)/libucl.a: + cp /usr/lib/libucl.a $(obj)/ obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o diff -ruN linux-2.6.2.orig/crypto/ucl_compress.c linux-2.6.2/crypto/ucl_compress.c --- linux-2.6.2.orig/crypto/ucl_compress.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/crypto/ucl_compress.c 2004-02-17 03:05:00.512531744 +0100 @@ -0,0 +1,122 @@ +/* + * Cryptographic API. + * + * ucl/nrv2e, implemented here primarily for use + * by gcloop + * + * Copyright (c) 2003 James Morris + * Copyright (c) 2003 Luca Barbato + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * + * Just an hack to use ucl instead of zlib + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/*#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION +#define DEFLATE_DEF_WINBITS 11 +#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL +*/ + + +/*better build the compressor w/out malloc but that is ok for a try*/ +void* malloc(unsigned long size) +{ +return vmalloc(size); +} + +void free(void *data) +{ +vfree(data); +} + + +struct uclcomp_ctx { + struct ucl_compress_config_t config; + int level; +}; + +static inline int uclcomp_gfp(void) +{ + return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; +} + +static int uclcomp_init(void *ctx) +{ + return ucl_init(); +} + +static void uclcomp_exit(void *ctx) +{ +/* struct deflate_ctx *dctx = ctx; + + if (dctx->comp_initialized) + vfree(dctx->comp_stream.workspace); + if (dctx->decomp_initialized) + kfree(dctx->decomp_stream.workspace);*/ +} + +static int uclcomp_compress(void *ctx, const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ +/* FIXME : I should use the ctx to store the configuration data for the + * compressor, now we are just using the default. + */ + int ret = 0; + ret =ucl_nrv2e_99_compress (src, slen, dst, dlen, + NULL,10,NULL,NULL); + return (ret == UCL_E_OK) ? 0 : -EINVAL; +} + +static int uclcomp_decompress(void *ctx, const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + + int ret = 0; + ret = ucl_nrv2e_decompress_8 (src, slen, dst, + (ucl_uintp) dlen, NULL); + return (ret == UCL_E_OK) ? 0 : -EINVAL; +} + +static struct crypto_alg alg = { + .cra_name = "ucl", + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct uclcomp_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .compress = { + .coa_init = uclcomp_init, + .coa_exit = uclcomp_exit, + .coa_compress = uclcomp_compress, + .coa_decompress = uclcomp_decompress } } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ucl compression/decompression"); +MODULE_AUTHOR("Luca Barbato "); + diff -ruN linux-2.6.2.orig/drivers/block/Kconfig linux-2.6.2/drivers/block/Kconfig --- linux-2.6.2.orig/drivers/block/Kconfig 2004-02-17 02:58:15.106162880 +0100 +++ linux-2.6.2/drivers/block/Kconfig 2004-02-17 03:05:00.555525208 +0100 @@ -257,6 +257,14 @@ provided by the CryptoAPI as loop transformation. This might be used as hard disk encryption. +config BLK_DEV_COMPRESSLOOP + tristate "Compressloop Support (EXPERIMENTAL)" + select CRYPTO + depends on BLK_DEV_CRYPTOLOOP && EXPERIMENTAL + ---help--- + Cryptoloop workalike, supports compressors from CryptoAPI + + config BLK_DEV_NBD tristate "Network block device support" depends on NET diff -ruN linux-2.6.2.orig/drivers/block/Makefile linux-2.6.2/drivers/block/Makefile --- linux-2.6.2.orig/drivers/block/Makefile 2004-02-17 02:58:15.109162424 +0100 +++ linux-2.6.2/drivers/block/Makefile 2004-02-17 03:05:00.644511680 +0100 @@ -12,6 +12,7 @@ # NOTE that ll_rw_blk.c must come early in linkage order - it starts the # kblockd threads # +#CFLAGS_compressloop.o = -I /usr/include obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o @@ -38,3 +39,7 @@ obj-$(CONFIG_BLK_DEV_UMEM) += umem.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o + +#compressloop1-objs := compressloop.o libucl.a + +obj-$(CONFIG_BLK_DEV_COMPRESSLOOP) +=compressloop.o diff -ruN linux-2.6.2.orig/drivers/block/compressloop.c linux-2.6.2/drivers/block/compressloop.c --- linux-2.6.2.orig/drivers/block/compressloop.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/drivers/block/compressloop.c 2004-02-17 03:05:28.689248232 +0100 @@ -0,0 +1,397 @@ +/* + Linux loop decompression enabling module, based on cryptoloop.c + + Copyright (C) 2002 Herbert Valerio Riedel + Copyright (C) 2003 Fruhwirth Clemens + Copyright (C) 2004 Luca Barbato + + This module is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This module is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this module; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("loop blockdevice transfer function adaptor" + "/ CryptoAPI (compressors)"); +MODULE_AUTHOR("Luca Barbato "); + +#define CLOOP_NAME "gcloop" + +#define LOOP_IV_SECTOR_BITS 9 +#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS) + +/* + * The loop-AES way to access file is working, the former I used isn't + * anymore - lu*/ + +static int cloop_file_io(struct file *file, char *buf, int size, loff_t *ppos) +{ + mm_segment_t fs; + int x, y, z; + + y = 0; + do { + z = size - y; + fs = get_fs(); + set_fs(get_ds()); + x = file->f_op->read(file, buf + y, z, ppos); + set_fs(fs); + if (!x) + return 1; + + if (x < 0) { + if ((x == -EAGAIN) || (x == -ENOMEM) || (x == -ERESTART) || (x == -EINTR)) { + blk_run_queues(); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ / 2); + continue; + } + return 1; + } + y += x; + } while (y < size); + return 0; +} + + + +static int +load_compressed_head(struct loop_device *lo, struct compressloop_data *data) +{ + struct file *file = lo->lo_backing_file; + char *buf = NULL; + int total_offsets = 1, offsets_read ,i; + loff_t pos=0; + ssize_t bsize = lo->lo_blocksize; + int size = sizeof(struct cloop_head); + char *fbuf = (char *) &data->head; + + + + printk(KERN_INFO "%s: loading compressed headers \n", + CLOOP_NAME); + + buf = kmalloc(bsize,GFP_KERNEL); + if(buf == NULL) { + printk(KERN_ERR "%s: can't alloc %i bytes \n", + CLOOP_NAME,bsize); + return -EINVAL; + } + data->buffered_blocknum=-1; + + /* first load the head */ + + if (cloop_file_io(file, fbuf, size, &pos)) { + printk(KERN_ERR "%s: I/O Error\n", + CLOOP_NAME); + } + + if (ntohl(data->head.block_size) % 512 != 0) { + printk(KERN_ERR "%s: bsize %u not multiple of 512\n", + CLOOP_NAME, ntohl(data->head.block_size)); + goto error_release; + } + + total_offsets=ntohl(data->head.num_blocks)+1; + data->offsets = kmalloc(sizeof(u_int32_t) * total_offsets, GFP_KERNEL); + if (!data->offsets) { + printk(KERN_ERR "%s: out of kernel mem for offsets\n", + CLOOP_NAME); + goto error_release; + } + + data->buffer = kmalloc(ntohl(data->head.block_size), GFP_KERNEL); + if (!data->buffer) { + printk(KERN_ERR "%s: out of kernel mem for buffer\n", + CLOOP_NAME); + goto error_release; + } + + + data->compressed_buffer = kmalloc(ntohl(data->head.block_size), GFP_KERNEL); + if (!data->compressed_buffer) { + printk(KERN_ERR "%s: out of kernel mem for compressed_buffer\n", + CLOOP_NAME); + goto error_release; + } + + + /* then load the offset */ + + for (i = 0, offsets_read = 0; offsets_read < total_offsets; i++) { + int toread=min(bsize,(total_offsets-offsets_read)*sizeof(uint32_t)); + if(cloop_file_io(file, buf, bsize, &pos)) { + printk(KERN_ERR "%s: can't read the image\n", + CLOOP_NAME); + return -EINVAL; + } + + memcpy(&data->offsets[offsets_read], buf, toread); + offsets_read += toread/sizeof(uint32_t); + } +#ifdef CLOOP_DEBUG + for (i=0; i< ntohl(data->head.num_blocks); i++) { + printk(KERN_ERR "Block %u pos %u length %u \n", i,ntohl(data->offsets[i]),ntohl(data->offsets[i+1])-ntohl(data->offsets[i])); + } +#endif + /*FIXME it needs some more checks*/ + lo->lo_sizelimit=ntohl(data->head.num_blocks) + * ntohl(data->head.block_size)/512; + + printk(KERN_ERR "loading complete: total size %llu, %u blocks \n", + lo->lo_sizelimit, ntohl(data->head.num_blocks) ); + + init_MUTEX(&data->mutex); + kfree(buf); + return 0; + +error_release: + if(buf) kfree(buf); + if(data->offsets) kfree(data->offsets); + if(data->compressed_buffer) kfree(data->compressed_buffer); + if(data->buffer) kfree(data->buffer); + printk(KERN_ERR "%s: loading failed\n", + CLOOP_NAME); + return -EINVAL; +} + +static int +compressloop_init(struct loop_device *lo, const struct loop_info64 *info) +{ + int err = -EINVAL; + char cms[LO_NAME_SIZE]; /* cipher-mode string */ + char *cipher; + char *mode; + char *cmsp = cms; /* c-m string pointer */ + struct compressloop_data *data = NULL; + + strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); + cms[LO_NAME_SIZE - 1] = 0; + cipher = strsep(&cmsp, "-"); + mode = strsep(&cmsp, "-"); + + printk(KERN_INFO "%s: loading %s cipher, %s mode\n",CLOOP_NAME, + cipher,(mode)?mode:"standard"); + + data = kmalloc(sizeof(struct compressloop_data), GFP_KERNEL); + if (data == NULL) + return -EINVAL; + + memset (data,0,sizeof(struct compressloop_data)); + + if (mode == NULL || strcmp(mode, "comp") == 0) + data->tfm = crypto_alloc_tfm(cipher,0); + if (data->tfm == NULL) + goto out_free_data; + + err = load_compressed_head(lo,data); + + if (err !=0) + goto out_free_tfm; + + lo->key_data = data; + lo->lo_flags |= LO_FLAGS_READ_ONLY; + return 0; + + out_free_tfm: + crypto_free_tfm(data->tfm); + + out_free_data: + kfree(data); + return err; +} + +static int +compressloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) +{ + return -EINVAL; +} + +static int +compressloop_release(struct loop_device *lo) +{ + struct compressloop_data *data= (struct compressloop_data *) lo->key_data; + + if (data != NULL) { + if(data->tfm) crypto_free_tfm(data->tfm); + if(data->offsets) kfree(data->offsets); + if(data->compressed_buffer) kfree(data->compressed_buffer); + if(data->buffer) kfree(data->buffer); + kfree(data); + lo->key_data = NULL; + return 0; + } + printk(KERN_ERR "compressloop_release(): data == NULL?\n"); + return -EINVAL; +} + + + +static inline int +load_buffer(loff_t blocknum, struct loop_device *lo) +{ + unsigned int buf_done = 0; + unsigned long buflen; + unsigned int buf_length; + loff_t pos; + int ret = 0; + struct file *file = lo->lo_backing_file; + struct compressloop_data *data = (struct compressloop_data *) lo->key_data; + + if( blocknum > ntohl(data->head.num_blocks) || blocknum < 0) { + printk(KERN_WARNING "%s: Invalid block number %llu requested.\n", + CLOOP_NAME, blocknum); + data->buffered_blocknum = -1; + return 0; + } + + if (blocknum == data->buffered_blocknum ) return 1; + + /* Get the compressed blocksize*/ + buf_length = ntohl(data->offsets[blocknum+1]) + - ntohl(data->offsets[blocknum]); + /* Get the uncompressed blocksize*/ + buflen = ntohl(data->head.block_size); + + pos = ntohl(data->offsets[blocknum]); +#ifdef GCLOOP_DEBUG + printk (KERN_INFO "load_buffer : block %llu offset %u buf_length %u, %u - %u \n",blocknum, ntohl(data->offsets[blocknum]), buf_length, ntohl(data->offsets[blocknum+1]), ntohl(data->offsets[blocknum]) ); +#endif + /*if the block is uncompressible we just memcpy() the block*/ + if (buf_length==0) { + memset(data->buffer,0,buflen); + data->buffered_blocknum = blocknum; + return 1; + } + + if (cloop_file_io(file,(char *)data->compressed_buffer,buf_length, &pos)) { + printk(KERN_ERR "%s: I/O Error\n", + CLOOP_NAME); + } + + if(buf_length>buflen) { /*Not a blocksize we expect*/ + printk(KERN_ERR "%s; error, corrupted index or old cloop" + "format, please update the image", CLOOP_NAME); + return 0; + } + if(buf_length==buflen) /*uncompressed*/ + memcpy(data->buffer,data->compressed_buffer, + buflen); + else + ret = crypto_comp_decompress(data->tfm, data->compressed_buffer, + buf_length, data->buffer, (unsigned int *)&buflen); + + if (ret != 0) { + printk(KERN_ERR "%s: error %i uncompressing block %llu %u/%lu/%u/%u " + "%u-%u\n", CLOOP_NAME, ret, blocknum, + ntohl(data->head.block_size), buflen, + buf_length, buf_done, ntohl(data->offsets[blocknum]), + ntohl(data->offsets[blocknum+1])); + data->buffered_blocknum = -1; + return 0; + } + + data->buffered_blocknum = blocknum; + return 1; +} + + +static int +do_clo_receive(struct loop_device *lo, + struct bio_vec *bvec, int bsize, loff_t pos) +{ + int retval=0; + char *dest=kmap(bvec->bv_page) + bvec->bv_offset; + struct compressloop_data *data = + (struct compressloop_data *) lo->key_data; + uint32_t block_size=ntohl(data->head.block_size), + len=bvec->bv_len; + down_interruptible(& data->mutex); +#ifdef GCLOOP_DEBUG + printk (KERN_INFO "do_clo_receive : bsize %i blocksize %u len %u pos %llu \n",bsize, block_size, len, pos ); +#endif + while (len > 0) { + unsigned int offset_in_buffer, length_in_buffer; + loff_t index = pos; + + /* using div64.h do_div macro*/ + offset_in_buffer = do_div(index,block_size); + + if (!load_buffer(index,lo)) { + retval=-EIO; + break; + } + /* Now, at least part of what we want will be in the buffer. */ + length_in_buffer = block_size - offset_in_buffer; + + if (length_in_buffer > len) + length_in_buffer = len; + + memcpy(dest, data->buffer + offset_in_buffer,length_in_buffer); + + dest += length_in_buffer; + len -= length_in_buffer; + pos += length_in_buffer; + } + + kunmap(bvec->bv_page); + up(&data->mutex); + return (retval < 0)? retval: 0; +} + + + + +static struct loop_func_table compressloop_funcs = { + .number = LO_CRYPT_COMPRESS, + .init = compressloop_init, + .ioctl = compressloop_ioctl, + .transfer = NULL, + .release = compressloop_release, + .owner = THIS_MODULE, + .do_receive = do_clo_receive +}; + +static int __init +init_compressloop(void) +{ + int rc = loop_register_transfer(&compressloop_funcs); + + if (rc) + printk(KERN_ERR "compressloop: loop_register_transfer failed\n"); + return rc; +} + +static void __exit +cleanup_compressloop(void) +{ + if (loop_unregister_transfer(LO_CRYPT_COMPRESS)) + printk(KERN_ERR + "compressloop: loop_unregister_transfer failed\n"); +} + +module_init(init_compressloop); +module_exit(cleanup_compressloop); diff -ruN linux-2.6.2.orig/drivers/block/loop.c linux-2.6.2/drivers/block/loop.c --- linux-2.6.2.orig/drivers/block/loop.c 2004-02-17 02:58:14.669229304 +0100 +++ linux-2.6.2/drivers/block/loop.c 2004-02-17 03:05:33.193563472 +0100 @@ -324,7 +324,7 @@ for (vecnr = 0; vecnr < bio->bi_vcnt; vecnr++) { struct bio_vec *bvec = &bio->bi_io_vec[vecnr]; - ret = do_lo_receive(lo, bvec, bsize, pos); + ret = lo->do_receive(lo, bvec, bsize, pos); if (ret < 0) break; pos += bvec->bv_len; @@ -882,7 +882,15 @@ err = loop_init_xfer(lo, xfer, info); if (err) return err; - + /*If you are about to use a compressed loop*/ + if (!xfer) + xfer = &none_funcs; + + if (xfer->number == LO_CRYPT_COMPRESS) + /*believe what is written in the image*/ + set_capacity(disks[lo->lo_number], lo->lo_sizelimit); + else + /*else check for physical limits*/ if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { lo->lo_offset = info->lo_offset; @@ -891,13 +899,14 @@ return -EFBIG; } + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; - if (!xfer) - xfer = &none_funcs; +/* if (!xfer) + xfer = &none_funcs;*/ lo->transfer = xfer->transfer; lo->ioctl = xfer->ioctl; @@ -908,8 +917,12 @@ memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, info->lo_encrypt_key_size); lo->lo_key_owner = current->uid; - } - + } + + if (xfer->do_receive) + lo->do_receive = xfer->do_receive; + else lo->do_receive=do_lo_receive; + return 0; } @@ -960,7 +973,8 @@ info64->lo_flags = info->lo_flags; info64->lo_init[0] = info->lo_init[0]; info64->lo_init[1] = info->lo_init[1]; - if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + if ((info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) || + (info->lo_encrypt_type == LO_CRYPT_COMPRESS )) memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); else memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); @@ -981,7 +995,8 @@ info->lo_flags = info64->lo_flags; info->lo_init[0] = info64->lo_init[0]; info->lo_init[1] = info64->lo_init[1]; - if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + if ((info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) || + (info->lo_encrypt_type == LO_CRYPT_COMPRESS )) memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); else memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); diff -ruN linux-2.6.2.orig/drivers/block/loop.c.orig linux-2.6.2/drivers/block/loop.c.orig --- linux-2.6.2.orig/drivers/block/loop.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/drivers/block/loop.c.orig 2004-02-04 04:43:56.000000000 +0100 @@ -0,0 +1,1261 @@ +/* + * linux/drivers/block/loop.c + * + * Written by Theodore Ts'o, 3/29/93 + * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + * + * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 + * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 + * + * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 + * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 + * + * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 + * + * Added devfs support - Richard Gooch 16-Jan-1998 + * + * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 + * + * Loadable modules and other fixes by AK, 1998 + * + * Make real block number available to downstream transfer functions, enables + * CBC (and relatives) mode encryption requiring unique IVs per data block. + * Reed H. Petty, rhp@draper.net + * + * Maximum number of loop devices now dynamic via max_loop module parameter. + * Russell Kroll 19990701 + * + * Maximum number of loop devices when compiled-in now selectable by passing + * max_loop=<1-255> to the kernel on boot. + * Erik I. Bolsų, , Oct 31, 1999 + * + * Completely rewrite request handling to be make_request_fn style and + * non blocking, pushing work to a helper thread. Lots of fixes from + * Al Viro too. + * Jens Axboe , Nov 2000 + * + * Support up to 256 loop devices + * Heinz Mauelshagen , Feb 2002 + * + * Still To Fix: + * - Advisory locking is ignored here. + * - Should use an own CAP_* category instead of CAP_SYS_ADMIN + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for invalidate_bdev() */ + +#include + +static int max_loop = 8; +static struct loop_device *loop_dev; +static struct gendisk **disks; + +/* + * Transfer functions + */ +static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf, + char *loop_buf, int size, sector_t real_block) +{ + if (raw_buf != loop_buf) { + if (cmd == READ) + memcpy(loop_buf, raw_buf, size); + else + memcpy(raw_buf, loop_buf, size); + } + + return 0; +} + +static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf, + char *loop_buf, int size, sector_t real_block) +{ + char *in, *out, *key; + int i, keysize; + + if (cmd == READ) { + in = raw_buf; + out = loop_buf; + } else { + in = loop_buf; + out = raw_buf; + } + + key = lo->lo_encrypt_key; + keysize = lo->lo_encrypt_key_size; + for (i = 0; i < size; i++) + *out++ = *in++ ^ key[(i & 511) % keysize]; + return 0; +} + +static int xor_init(struct loop_device *lo, const struct loop_info64 *info) +{ + if (info->lo_encrypt_key_size <= 0) + return -EINVAL; + return 0; +} + +static struct loop_func_table none_funcs = { + .number = LO_CRYPT_NONE, + .transfer = transfer_none, +}; + +static struct loop_func_table xor_funcs = { + .number = LO_CRYPT_XOR, + .transfer = transfer_xor, + .init = xor_init +}; + +/* xfer_funcs[0] is special - its release function is never called */ +static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { + &none_funcs, + &xor_funcs +}; + +static int +figure_loop_size(struct loop_device *lo) +{ + loff_t size, offset, loopsize; + sector_t x; + + /* Compute loopsize in bytes */ + size = i_size_read(lo->lo_backing_file->f_mapping->host); + offset = lo->lo_offset; + loopsize = size - offset; + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) + loopsize = lo->lo_sizelimit; + + /* + * Unfortunately, if we want to do I/O on the device, + * the number of 512-byte sectors has to fit into a sector_t. + */ + size = loopsize >> 9; + x = (sector_t)size; + + if ((loff_t)x != size) + return -EFBIG; + + set_capacity(disks[lo->lo_number], x); + return 0; +} + +static inline int +lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf, + char *lbuf, int size, sector_t rblock) +{ + if (!lo->transfer) + return 0; + + return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock); +} + +static int +do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) +{ + struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ + struct address_space *mapping = file->f_mapping; + struct address_space_operations *aops = mapping->a_ops; + struct page *page; + char *kaddr, *data; + pgoff_t index; + unsigned size, offset; + int len; + int ret = 0; + + down(&mapping->host->i_sem); + index = pos >> PAGE_CACHE_SHIFT; + offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1); + data = kmap(bvec->bv_page) + bvec->bv_offset; + len = bvec->bv_len; + while (len > 0) { + sector_t IV; + int transfer_result; + + IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); + + size = PAGE_CACHE_SIZE - offset; + if (size > len) + size = len; + + page = grab_cache_page(mapping, index); + if (!page) + goto fail; + if (aops->prepare_write(file, page, offset, offset+size)) + goto unlock; + kaddr = kmap(page); + transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, + data, size, IV); + if (transfer_result) { + /* + * The transfer failed, but we still write the data to + * keep prepare/commit calls balanced. + */ + printk(KERN_ERR "loop: transfer error block %llu\n", + (unsigned long long)index); + memset(kaddr + offset, 0, size); + } + flush_dcache_page(page); + kunmap(page); + if (aops->commit_write(file, page, offset, offset+size)) + goto unlock; + if (transfer_result) + goto unlock; + data += size; + len -= size; + offset = 0; + index++; + pos += size; + unlock_page(page); + page_cache_release(page); + } + up(&mapping->host->i_sem); +out: + kunmap(bvec->bv_page); + return ret; + +unlock: + unlock_page(page); + page_cache_release(page); +fail: + up(&mapping->host->i_sem); + ret = -1; + goto out; +} + +static int +lo_send(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) +{ + unsigned vecnr; + int ret = 0; + + for (vecnr = 0; vecnr < bio->bi_vcnt; vecnr++) { + struct bio_vec *bvec = &bio->bi_io_vec[vecnr]; + + ret = do_lo_send(lo, bvec, bsize, pos); + if (ret < 0) + break; + pos += bvec->bv_len; + } + return ret; +} + +struct lo_read_data { + struct loop_device *lo; + char *data; + int bsize; +}; + +static int +lo_read_actor(read_descriptor_t *desc, struct page *page, + unsigned long offset, unsigned long size) +{ + char *kaddr; + unsigned long count = desc->count; + struct lo_read_data *p = (struct lo_read_data*)desc->buf; + struct loop_device *lo = p->lo; + sector_t IV; + + IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); + + if (size > count) + size = count; + + kaddr = kmap(page); + if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) { + size = 0; + printk(KERN_ERR "loop: transfer error block %ld\n", + page->index); + desc->error = -EINVAL; + } + kunmap(page); + + desc->count = count - size; + desc->written += size; + p->data += size; + return size; +} + +static int +do_lo_receive(struct loop_device *lo, + struct bio_vec *bvec, int bsize, loff_t pos) +{ + struct lo_read_data cookie; + struct file *file; + int retval; + + cookie.lo = lo; + cookie.data = kmap(bvec->bv_page) + bvec->bv_offset; + cookie.bsize = bsize; + file = lo->lo_backing_file; + retval = file->f_op->sendfile(file, &pos, bvec->bv_len, + lo_read_actor, &cookie); + kunmap(bvec->bv_page); + return (retval < 0)? retval: 0; +} + +static int +lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) +{ + unsigned vecnr; + int ret = 0; + + for (vecnr = 0; vecnr < bio->bi_vcnt; vecnr++) { + struct bio_vec *bvec = &bio->bi_io_vec[vecnr]; + + ret = do_lo_receive(lo, bvec, bsize, pos); + if (ret < 0) + break; + pos += bvec->bv_len; + } + return ret; +} + +static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) +{ + loff_t pos; + int ret; + + pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; + if (bio_rw(bio) == WRITE) + ret = lo_send(lo, bio, lo->lo_blocksize, pos); + else + ret = lo_receive(lo, bio, lo->lo_blocksize, pos); + return ret; +} + +static int loop_end_io_transfer(struct bio *, unsigned int, int); + +static void loop_put_buffer(struct bio *bio) +{ + /* + * check bi_end_io, may just be a remapped bio + */ + if (bio && bio->bi_end_io == loop_end_io_transfer) { + int i; + + for (i = 0; i < bio->bi_vcnt; i++) + __free_page(bio->bi_io_vec[i].bv_page); + + bio_put(bio); + } +} + +/* + * Add bio to back of pending list + */ +static void loop_add_bio(struct loop_device *lo, struct bio *bio) +{ + unsigned long flags; + + spin_lock_irqsave(&lo->lo_lock, flags); + if (lo->lo_biotail) { + lo->lo_biotail->bi_next = bio; + lo->lo_biotail = bio; + } else + lo->lo_bio = lo->lo_biotail = bio; + spin_unlock_irqrestore(&lo->lo_lock, flags); + + up(&lo->lo_bh_mutex); +} + +/* + * Grab first pending buffer + */ +static struct bio *loop_get_bio(struct loop_device *lo) +{ + struct bio *bio; + + spin_lock_irq(&lo->lo_lock); + if ((bio = lo->lo_bio)) { + if (bio == lo->lo_biotail) + lo->lo_biotail = NULL; + lo->lo_bio = bio->bi_next; + bio->bi_next = NULL; + } + spin_unlock_irq(&lo->lo_lock); + + return bio; +} + +/* + * if this was a WRITE lo->transfer stuff has already been done. for READs, + * queue it for the loop thread and let it do the transfer out of + * bi_end_io context (we don't want to do decrypt of a page with irqs + * disabled) + */ +static int loop_end_io_transfer(struct bio *bio, unsigned int bytes_done, int err) +{ + struct bio *rbh = bio->bi_private; + struct loop_device *lo = rbh->bi_bdev->bd_disk->private_data; + + if (bio->bi_size) + return 1; + + if (err || bio_rw(bio) == WRITE) { + bio_endio(rbh, rbh->bi_size, err); + if (atomic_dec_and_test(&lo->lo_pending)) + up(&lo->lo_bh_mutex); + loop_put_buffer(bio); + } else + loop_add_bio(lo, bio); + + return 0; +} + +static struct bio *loop_copy_bio(struct bio *rbh) +{ + struct bio *bio; + struct bio_vec *bv; + int i; + + bio = bio_alloc(__GFP_NOWARN, rbh->bi_vcnt); + if (!bio) + return NULL; + + /* + * iterate iovec list and alloc pages + */ + __bio_for_each_segment(bv, rbh, i, 0) { + struct bio_vec *bbv = &bio->bi_io_vec[i]; + + bbv->bv_page = alloc_page(__GFP_NOWARN|__GFP_HIGHMEM); + if (bbv->bv_page == NULL) + goto oom; + + bbv->bv_len = bv->bv_len; + bbv->bv_offset = bv->bv_offset; + } + + bio->bi_vcnt = rbh->bi_vcnt; + bio->bi_size = rbh->bi_size; + + return bio; + +oom: + while (--i >= 0) + __free_page(bio->bi_io_vec[i].bv_page); + + bio_put(bio); + return NULL; +} + +static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh) +{ + struct bio *bio; + + /* + * When called on the page reclaim -> writepage path, this code can + * trivially consume all memory. So we drop PF_MEMALLOC to avoid + * stealing all the page reserves and throttle to the writeout rate. + * pdflush will have been woken by page reclaim. Let it do its work. + */ + do { + int flags = current->flags; + + current->flags &= ~PF_MEMALLOC; + bio = loop_copy_bio(rbh); + if (flags & PF_MEMALLOC) + current->flags |= PF_MEMALLOC; + + if (bio == NULL) + blk_congestion_wait(WRITE, HZ/10); + } while (bio == NULL); + + bio->bi_end_io = loop_end_io_transfer; + bio->bi_private = rbh; + bio->bi_sector = rbh->bi_sector + (lo->lo_offset >> 9); + bio->bi_rw = rbh->bi_rw; + bio->bi_bdev = lo->lo_device; + + return bio; +} + +static int loop_transfer_bio(struct loop_device *lo, + struct bio *to_bio, struct bio *from_bio) +{ + sector_t IV; + struct bio_vec *from_bvec, *to_bvec; + char *vto, *vfrom; + int ret = 0, i; + + IV = from_bio->bi_sector + (lo->lo_offset >> 9); + + __bio_for_each_segment(from_bvec, from_bio, i, 0) { + to_bvec = &to_bio->bi_io_vec[i]; + + kmap(from_bvec->bv_page); + kmap(to_bvec->bv_page); + vfrom = page_address(from_bvec->bv_page) + from_bvec->bv_offset; + vto = page_address(to_bvec->bv_page) + to_bvec->bv_offset; + ret |= lo_do_transfer(lo, bio_data_dir(to_bio), vto, vfrom, + from_bvec->bv_len, IV); + kunmap(from_bvec->bv_page); + kunmap(to_bvec->bv_page); + IV += from_bvec->bv_len >> 9; + } + + return ret; +} + +static int loop_make_request(request_queue_t *q, struct bio *old_bio) +{ + struct bio *new_bio = NULL; + struct loop_device *lo = q->queuedata; + int rw = bio_rw(old_bio); + + if (!lo) + goto out; + + spin_lock_irq(&lo->lo_lock); + if (lo->lo_state != Lo_bound) + goto inactive; + atomic_inc(&lo->lo_pending); + spin_unlock_irq(&lo->lo_lock); + + if (rw == WRITE) { + if (lo->lo_flags & LO_FLAGS_READ_ONLY) + goto err; + } else if (rw == READA) { + rw = READ; + } else if (rw != READ) { + printk(KERN_ERR "loop: unknown command (%x)\n", rw); + goto err; + } + + /* + * file backed, queue for loop_thread to handle + */ + if (lo->lo_flags & LO_FLAGS_DO_BMAP) { + loop_add_bio(lo, old_bio); + return 0; + } + + /* + * piggy old buffer on original, and submit for I/O + */ + new_bio = loop_get_buffer(lo, old_bio); + if (rw == WRITE) { + if (loop_transfer_bio(lo, new_bio, old_bio)) + goto err; + } + + generic_make_request(new_bio); + return 0; + +err: + if (atomic_dec_and_test(&lo->lo_pending)) + up(&lo->lo_bh_mutex); + loop_put_buffer(new_bio); +out: + bio_io_error(old_bio, old_bio->bi_size); + return 0; +inactive: + spin_unlock_irq(&lo->lo_lock); + goto out; +} + +static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) +{ + int ret; + + /* + * For block backed loop, we know this is a READ + */ + if (lo->lo_flags & LO_FLAGS_DO_BMAP) { + ret = do_bio_filebacked(lo, bio); + bio_endio(bio, bio->bi_size, ret); + } else { + struct bio *rbh = bio->bi_private; + + ret = loop_transfer_bio(lo, bio, rbh); + + bio_endio(rbh, rbh->bi_size, ret); + loop_put_buffer(bio); + } +} + +/* + * worker thread that handles reads/writes to file backed loop devices, + * to avoid blocking in our make_request_fn. it also does loop decrypting + * on reads for block backed loop, as that is too heavy to do from + * b_end_io context where irqs may be disabled. + */ +static int loop_thread(void *data) +{ + struct loop_device *lo = data; + struct bio *bio; + + daemonize("loop%d", lo->lo_number); + + /* + * loop can be used in an encrypted device, + * hence, it mustn't be stopped at all + * because it could be indirectly used during suspension + */ + current->flags |= PF_IOTHREAD; + + set_user_nice(current, -20); + + lo->lo_state = Lo_bound; + atomic_inc(&lo->lo_pending); + + /* + * up sem, we are running + */ + up(&lo->lo_sem); + + for (;;) { + down_interruptible(&lo->lo_bh_mutex); + /* + * could be upped because of tear-down, not because of + * pending work + */ + if (!atomic_read(&lo->lo_pending)) + break; + + bio = loop_get_bio(lo); + if (!bio) { + printk("loop: missing bio\n"); + continue; + } + loop_handle_bio(lo, bio); + + /* + * upped both for pending work and tear-down, lo_pending + * will hit zero then + */ + if (atomic_dec_and_test(&lo->lo_pending)) + break; + } + + up(&lo->lo_sem); + return 0; +} + +static int loop_set_fd(struct loop_device *lo, struct file *lo_file, + struct block_device *bdev, unsigned int arg) +{ + struct file *file; + struct inode *inode; + struct block_device *lo_device = NULL; + struct address_space *mapping; + unsigned lo_blocksize; + int lo_flags = 0; + int error; + + /* This is safe, since we have a reference from open(). */ + __module_get(THIS_MODULE); + + error = -EBUSY; + if (lo->lo_state != Lo_unbound) + goto out; + + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + + mapping = file->f_mapping; + inode = mapping->host; + + if (!(file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + + error = -EINVAL; + if (S_ISBLK(inode->i_mode)) { + lo_device = I_BDEV(inode); + if (lo_device == bdev) { + error = -EBUSY; + goto out_putf; + } + lo_blocksize = block_size(lo_device); + if (bdev_read_only(lo_device)) + lo_flags |= LO_FLAGS_READ_ONLY; + } else if (S_ISREG(inode->i_mode)) { + struct address_space_operations *aops = mapping->a_ops; + /* + * If we can't read - sorry. If we only can't write - well, + * it's going to be read-only. + */ + if (!inode->i_fop->sendfile) + goto out_putf; + + if (!aops->prepare_write || !aops->commit_write) + lo_flags |= LO_FLAGS_READ_ONLY; + + lo_blocksize = inode->i_blksize; + lo_flags |= LO_FLAGS_DO_BMAP; + } else + goto out_putf; + + if (!(lo_file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + + set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); + + lo->lo_blocksize = lo_blocksize; + lo->lo_device = lo_device; + lo->lo_flags = lo_flags; + lo->lo_backing_file = file; + lo->transfer = NULL; + lo->ioctl = NULL; + lo->lo_sizelimit = 0; + if (figure_loop_size(lo)) { + error = -EFBIG; + goto out_putf; + } + lo->old_gfp_mask = mapping_gfp_mask(mapping); + mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + + lo->lo_bio = lo->lo_biotail = NULL; + + /* + * set queue make_request_fn, and add limits based on lower level + * device + */ + blk_queue_make_request(lo->lo_queue, loop_make_request); + lo->lo_queue->queuedata = lo; + + /* + * we remap to a block device, make sure we correctly stack limits + */ + if (S_ISBLK(inode->i_mode)) { + request_queue_t *q = bdev_get_queue(lo_device); + + blk_queue_max_sectors(lo->lo_queue, q->max_sectors); + blk_queue_max_phys_segments(lo->lo_queue,q->max_phys_segments); + blk_queue_max_hw_segments(lo->lo_queue, q->max_hw_segments); + blk_queue_hardsect_size(lo->lo_queue, queue_hardsect_size(q)); + blk_queue_max_segment_size(lo->lo_queue, q->max_segment_size); + blk_queue_segment_boundary(lo->lo_queue, q->seg_boundary_mask); + blk_queue_merge_bvec(lo->lo_queue, q->merge_bvec_fn); + } + + set_blocksize(bdev, lo_blocksize); + + kernel_thread(loop_thread, lo, CLONE_KERNEL); + down(&lo->lo_sem); + return 0; + + out_putf: + fput(file); + out: + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return error; +} + +static int +loop_release_xfer(struct loop_device *lo) +{ + int err = 0; + struct loop_func_table *xfer = lo->lo_encryption; + + if (xfer) { + if (xfer->release) + err = xfer->release(lo); + lo->transfer = NULL; + lo->lo_encryption = NULL; + module_put(xfer->owner); + } + return err; +} + +static int +loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, + const struct loop_info64 *i) +{ + int err = 0; + + if (xfer) { + struct module *owner = xfer->owner; + + if (!try_module_get(owner)) + return -EINVAL; + if (xfer->init) + err = xfer->init(lo, i); + if (err) + module_put(owner); + else + lo->lo_encryption = xfer; + } + return err; +} + +static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) +{ + struct file *filp = lo->lo_backing_file; + int gfp = lo->old_gfp_mask; + + if (lo->lo_state != Lo_bound) + return -ENXIO; + + if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ + return -EBUSY; + + if (filp == NULL) + return -EINVAL; + + spin_lock_irq(&lo->lo_lock); + lo->lo_state = Lo_rundown; + if (atomic_dec_and_test(&lo->lo_pending)) + up(&lo->lo_bh_mutex); + spin_unlock_irq(&lo->lo_lock); + + down(&lo->lo_sem); + + lo->lo_backing_file = NULL; + + loop_release_xfer(lo); + lo->transfer = NULL; + lo->ioctl = NULL; + lo->lo_device = NULL; + lo->lo_encryption = NULL; + lo->lo_offset = 0; + lo->lo_sizelimit = 0; + lo->lo_encrypt_key_size = 0; + lo->lo_flags = 0; + memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); + memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); + memset(lo->lo_file_name, 0, LO_NAME_SIZE); + invalidate_bdev(bdev, 0); + set_capacity(disks[lo->lo_number], 0); + mapping_set_gfp_mask(filp->f_mapping, gfp); + lo->lo_state = Lo_unbound; + fput(filp); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return 0; +} + +static int +loop_set_status(struct loop_device *lo, const struct loop_info64 *info) +{ + int err; + struct loop_func_table *xfer; + + if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + if (lo->lo_state != Lo_bound) + return -ENXIO; + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + + err = loop_release_xfer(lo); + if (err) + return err; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; + + if (type >= MAX_LO_CRYPT) + return -EINVAL; + xfer = xfer_funcs[type]; + if (xfer == NULL) + return -EINVAL; + } else + xfer = NULL; + + err = loop_init_xfer(lo, xfer, info); + if (err) + return err; + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + lo->lo_offset = info->lo_offset; + lo->lo_sizelimit = info->lo_sizelimit; + if (figure_loop_size(lo)) + return -EFBIG; + } + + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); + memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); + lo->lo_file_name[LO_NAME_SIZE-1] = 0; + lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; + + if (!xfer) + xfer = &none_funcs; + lo->transfer = xfer->transfer; + lo->ioctl = xfer->ioctl; + + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; + if (info->lo_encrypt_key_size) { + memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, + info->lo_encrypt_key_size); + lo->lo_key_owner = current->uid; + } + + return 0; +} + +static int +loop_get_status(struct loop_device *lo, struct loop_info64 *info) +{ + struct file *file = lo->lo_backing_file; + struct kstat stat; + int error; + + if (lo->lo_state != Lo_bound) + return -ENXIO; + error = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat); + if (error) + return error; + memset(info, 0, sizeof(*info)); + info->lo_number = lo->lo_number; + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); + info->lo_offset = lo->lo_offset; + info->lo_sizelimit = lo->lo_sizelimit; + info->lo_flags = lo->lo_flags; + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); + info->lo_encrypt_type = + lo->lo_encryption ? lo->lo_encryption->number : 0; + if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { + info->lo_encrypt_key_size = lo->lo_encrypt_key_size; + memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, + lo->lo_encrypt_key_size); + } + return 0; +} + +static void +loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) +{ + memset(info64, 0, sizeof(*info64)); + info64->lo_number = info->lo_number; + info64->lo_device = info->lo_device; + info64->lo_inode = info->lo_inode; + info64->lo_rdevice = info->lo_rdevice; + info64->lo_offset = info->lo_offset; + info64->lo_sizelimit = 0; + info64->lo_encrypt_type = info->lo_encrypt_type; + info64->lo_encrypt_key_size = info->lo_encrypt_key_size; + info64->lo_flags = info->lo_flags; + info64->lo_init[0] = info->lo_init[0]; + info64->lo_init[1] = info->lo_init[1]; + if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); + else + memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); + memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); +} + +static int +loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) +{ + memset(info, 0, sizeof(*info)); + info->lo_number = info64->lo_number; + info->lo_device = info64->lo_device; + info->lo_inode = info64->lo_inode; + info->lo_rdevice = info64->lo_rdevice; + info->lo_offset = info64->lo_offset; + info->lo_encrypt_type = info64->lo_encrypt_type; + info->lo_encrypt_key_size = info64->lo_encrypt_key_size; + info->lo_flags = info64->lo_flags; + info->lo_init[0] = info64->lo_init[0]; + info->lo_init[1] = info64->lo_init[1]; + if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); + else + memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); + + /* error in case values were truncated */ + if (info->lo_device != info64->lo_device || + info->lo_rdevice != info64->lo_rdevice || + info->lo_inode != info64->lo_inode || + info->lo_offset != info64->lo_offset) + return -EOVERFLOW; + + return 0; +} + +static int +loop_set_status_old(struct loop_device *lo, const struct loop_info *arg) +{ + struct loop_info info; + struct loop_info64 info64; + + if (copy_from_user(&info, arg, sizeof (struct loop_info))) + return -EFAULT; + loop_info64_from_old(&info, &info64); + return loop_set_status(lo, &info64); +} + +static int +loop_set_status64(struct loop_device *lo, const struct loop_info64 *arg) +{ + struct loop_info64 info64; + + if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) + return -EFAULT; + return loop_set_status(lo, &info64); +} + +static int +loop_get_status_old(struct loop_device *lo, struct loop_info *arg) { + struct loop_info info; + struct loop_info64 info64; + int err = 0; + + if (!arg) + err = -EINVAL; + if (!err) + err = loop_get_status(lo, &info64); + if (!err) + err = loop_info64_to_old(&info64, &info); + if (!err && copy_to_user(arg, &info, sizeof(info))) + err = -EFAULT; + + return err; +} + +static int +loop_get_status64(struct loop_device *lo, struct loop_info64 *arg) { + struct loop_info64 info64; + int err = 0; + + if (!arg) + err = -EINVAL; + if (!err) + err = loop_get_status(lo, &info64); + if (!err && copy_to_user(arg, &info64, sizeof(info64))) + err = -EFAULT; + + return err; +} + +static int lo_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg) +{ + struct loop_device *lo = inode->i_bdev->bd_disk->private_data; + int err; + + down(&lo->lo_ctl_mutex); + switch (cmd) { + case LOOP_SET_FD: + err = loop_set_fd(lo, file, inode->i_bdev, arg); + break; + case LOOP_CLR_FD: + err = loop_clr_fd(lo, inode->i_bdev); + break; + case LOOP_SET_STATUS: + err = loop_set_status_old(lo, (struct loop_info *) arg); + break; + case LOOP_GET_STATUS: + err = loop_get_status_old(lo, (struct loop_info *) arg); + break; + case LOOP_SET_STATUS64: + err = loop_set_status64(lo, (struct loop_info64 *) arg); + break; + case LOOP_GET_STATUS64: + err = loop_get_status64(lo, (struct loop_info64 *) arg); + break; + default: + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + } + up(&lo->lo_ctl_mutex); + return err; +} + +static int lo_open(struct inode *inode, struct file *file) +{ + struct loop_device *lo = inode->i_bdev->bd_disk->private_data; + + down(&lo->lo_ctl_mutex); + lo->lo_refcnt++; + up(&lo->lo_ctl_mutex); + + return 0; +} + +static int lo_release(struct inode *inode, struct file *file) +{ + struct loop_device *lo = inode->i_bdev->bd_disk->private_data; + + down(&lo->lo_ctl_mutex); + --lo->lo_refcnt; + up(&lo->lo_ctl_mutex); + + return 0; +} + +static struct block_device_operations lo_fops = { + .owner = THIS_MODULE, + .open = lo_open, + .release = lo_release, + .ioctl = lo_ioctl, +}; + +/* + * And now the modules code and kernel interface. + */ +MODULE_PARM(max_loop, "i"); +MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); + +int loop_register_transfer(struct loop_func_table *funcs) +{ + unsigned int n = funcs->number; + + if (n >= MAX_LO_CRYPT || xfer_funcs[n]) + return -EINVAL; + xfer_funcs[n] = funcs; + return 0; +} + +int loop_unregister_transfer(int number) +{ + unsigned int n = number; + struct loop_device *lo; + struct loop_func_table *xfer; + + if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) + return -EINVAL; + + xfer_funcs[n] = NULL; + + for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { + down(&lo->lo_ctl_mutex); + + if (lo->lo_encryption == xfer) + loop_release_xfer(lo); + + up(&lo->lo_ctl_mutex); + } + + return 0; +} + +EXPORT_SYMBOL(loop_register_transfer); +EXPORT_SYMBOL(loop_unregister_transfer); + +int __init loop_init(void) +{ + int i; + + if (max_loop < 1 || max_loop > 256) { + printk(KERN_WARNING "loop: invalid max_loop (must be between" + " 1 and 256), using default (8)\n"); + max_loop = 8; + } + + if (register_blkdev(LOOP_MAJOR, "loop")) + return -EIO; + + loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL); + if (!loop_dev) + goto out_mem1; + memset(loop_dev, 0, max_loop * sizeof(struct loop_device)); + + disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL); + if (!disks) + goto out_mem2; + + for (i = 0; i < max_loop; i++) { + disks[i] = alloc_disk(1); + if (!disks[i]) + goto out_mem3; + } + + devfs_mk_dir("loop"); + + for (i = 0; i < max_loop; i++) { + struct loop_device *lo = &loop_dev[i]; + struct gendisk *disk = disks[i]; + + memset(lo, 0, sizeof(*lo)); + lo->lo_queue = blk_alloc_queue(GFP_KERNEL); + if (!lo->lo_queue) + goto out_mem4; + disks[i]->queue = lo->lo_queue; + init_MUTEX(&lo->lo_ctl_mutex); + init_MUTEX_LOCKED(&lo->lo_sem); + init_MUTEX_LOCKED(&lo->lo_bh_mutex); + lo->lo_number = i; + spin_lock_init(&lo->lo_lock); + disk->major = LOOP_MAJOR; + disk->first_minor = i; + disk->fops = &lo_fops; + sprintf(disk->disk_name, "loop%d", i); + sprintf(disk->devfs_name, "loop/%d", i); + disk->private_data = lo; + disk->queue = lo->lo_queue; + add_disk(disk); + } + printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop); + return 0; + +out_mem4: + while (i--) + blk_put_queue(loop_dev[i].lo_queue); + i = max_loop; +out_mem3: + while (i--) + put_disk(disks[i]); + kfree(disks); +out_mem2: + kfree(loop_dev); +out_mem1: + unregister_blkdev(LOOP_MAJOR, "loop"); + printk(KERN_ERR "loop: ran out of memory\n"); + return -ENOMEM; +} + +void loop_exit(void) +{ + int i; + + for (i = 0; i < max_loop; i++) { + del_gendisk(disks[i]); + blk_put_queue(loop_dev[i].lo_queue); + put_disk(disks[i]); + } + devfs_remove("loop"); + if (unregister_blkdev(LOOP_MAJOR, "loop")) + printk(KERN_WARNING "loop: cannot unregister blkdev\n"); + + kfree(disks); + kfree(loop_dev); +} + +module_init(loop_init); +module_exit(loop_exit); + +#ifndef MODULE +static int __init max_loop_setup(char *str) +{ + max_loop = simple_strtol(str, NULL, 0); + return 1; +} + +__setup("max_loop=", max_loop_setup); +#endif diff -ruN linux-2.6.2.orig/include/linux/cloop.h linux-2.6.2/include/linux/cloop.h --- linux-2.6.2.orig/include/linux/cloop.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/include/linux/cloop.h 2004-02-17 03:05:00.878476112 +0100 @@ -0,0 +1,28 @@ +#ifndef _CLOOP_H +#define _CLOOP_H + + +#define CLOOP_HEADROOM 128 + +struct cloop_head +{ + char preamble[CLOOP_HEADROOM]; + u_int32_t block_size; + u_int32_t num_blocks; +}; + +struct compressloop_data +{ + /* data we need to know which block uncompress) */ + struct cloop_head head; + u_int32_t *offsets; + /* will hold the uncompressor */ + struct crypto_tfm *tfm; + /*workspace/cache*/ + char *buffer; + char *compressed_buffer; + int buffered_blocknum; + struct semaphore mutex; +}; + +#endif /*_CLOOP_H*/ diff -ruN linux-2.6.2.orig/include/linux/loop.h linux-2.6.2/include/linux/loop.h --- linux-2.6.2.orig/include/linux/loop.h 2004-02-17 02:55:55.000000000 +0100 +++ linux-2.6.2/include/linux/loop.h 2004-02-17 03:05:54.031395640 +0100 @@ -63,6 +63,8 @@ atomic_t lo_pending; request_queue_t *lo_queue; + int (*do_receive)(struct loop_device *lo, + struct bio_vec *bvec, int bsize, loff_t pos); }; #endif /* __KERNEL__ */ @@ -122,6 +124,7 @@ #define LO_CRYPT_DUMMY 9 #define LO_CRYPT_SKIPJACK 10 #define LO_CRYPT_CRYPTOAPI 18 +#define LO_CRYPT_COMPRESS 19 #define MAX_LO_CRYPT 20 #ifdef __KERNEL__ @@ -135,6 +138,9 @@ int (*release)(struct loop_device *); int (*ioctl)(struct loop_device *, int cmd, unsigned long arg); struct module *owner; + int (*do_receive)(struct loop_device *lo, struct bio_vec *bvec, + int bsize, loff_t pos); + }; int loop_register_transfer(struct loop_func_table *funcs); diff -ruN linux-2.6.2.orig/include/ucl/ucl.h linux-2.6.2/include/ucl/ucl.h --- linux-2.6.2.orig/include/ucl/ucl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/include/ucl/ucl.h 2004-02-17 03:05:00.961463496 +0100 @@ -0,0 +1,235 @@ +/* ucl.h -- prototypes for the UCL real-time data compression library + + This file is part of the UCL data compression library. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The UCL library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The UCL library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the UCL library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/ucl/ + */ + + +#ifndef __UCL_H +#define __UCL_H + +#ifndef __UCLCONF_H +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + + +/*********************************************************************** +// +************************************************************************/ + +/* note: to use default values pass -1, i.e. initialize + * this struct by a memset(x,0xff,sizeof(x)) */ +struct ucl_compress_config_t +{ + int bb_endian; + int bb_size; + ucl_uint max_offset; + ucl_uint max_match; + int s_level; + int h_level; + int p_level; + int c_flags; + ucl_uint m_size; +}; + +#define ucl_compress_config_p ucl_compress_config_t __UCL_MMODEL * + + +/*********************************************************************** +// compressors +************************************************************************/ + +UCL_EXTERN(int) +ucl_nrv2b_99_compress ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_progress_callback_p cb, + int level, + const struct ucl_compress_config_p conf, + ucl_uintp result ); + +UCL_EXTERN(int) +ucl_nrv2d_99_compress ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_progress_callback_p cb, + int level, + const struct ucl_compress_config_p conf, + ucl_uintp result ); + +UCL_EXTERN(int) +ucl_nrv2e_99_compress ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_progress_callback_p cb, + int level, + const struct ucl_compress_config_p conf, + ucl_uintp result ); + + +/*********************************************************************** +// decompressors +************************************************************************/ + +UCL_EXTERN(int) +ucl_nrv2b_decompress_8 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_decompress_le16 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_decompress_le32 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_decompress_safe_8 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_decompress_safe_le16 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_decompress_safe_le32 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +UCL_EXTERN(int) +ucl_nrv2d_decompress_8 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_decompress_le16 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_decompress_le32 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_decompress_safe_8 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_decompress_safe_le16 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_decompress_safe_le32 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +UCL_EXTERN(int) +ucl_nrv2e_decompress_8 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_decompress_le16 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_decompress_le32 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_decompress_safe_8 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_decompress_safe_le16 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_decompress_safe_le32 ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); + + +/*********************************************************************** +// assembler decompressors [TO BE ADDED] +************************************************************************/ + + +/*********************************************************************** +// test an overlapping in-place decompression within a buffer: +// - try a virtual decompression from &buf[src_off] -> &buf[0] +// - no data is actually written +// - only the bytes at buf[src_off .. src_off+src_len] will get accessed +************************************************************************/ + +UCL_EXTERN(int) +ucl_nrv2b_test_overlap_8 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_test_overlap_le16 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2b_test_overlap_le32 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +UCL_EXTERN(int) +ucl_nrv2d_test_overlap_8 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_test_overlap_le16 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2d_test_overlap_le32 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +UCL_EXTERN(int) +ucl_nrv2e_test_overlap_8 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_test_overlap_le16 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); +UCL_EXTERN(int) +ucl_nrv2e_test_overlap_le32 ( const ucl_bytep buf, ucl_uint src_off, + ucl_uint src_len, ucl_uintp dst_len, + ucl_voidp wrkmem ); + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* already included */ + diff -ruN linux-2.6.2.orig/include/ucl/uclconf.h linux-2.6.2/include/ucl/uclconf.h --- linux-2.6.2.orig/include/ucl/uclconf.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/include/ucl/uclconf.h 2004-02-17 03:05:01.009456200 +0100 @@ -0,0 +1,407 @@ +/* uclconf.h -- configuration for the UCL real-time data compression library + + This file is part of the UCL data compression library. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The UCL library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The UCL library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the UCL library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/ucl/ + */ + + +#ifndef __UCLCONF_H +#define __UCLCONF_H + +#define UCL_VERSION 0x010100L +#define UCL_VERSION_STRING "1.01" +#define UCL_VERSION_DATE "Jan 02 2002" + +/* internal Autoconf configuration file - only used when building UCL */ +#if defined(UCL_HAVE_CONFIG_H) +# include +#endif +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/*********************************************************************** +// UCL requires a conforming +************************************************************************/ + +#if !defined(CHAR_BIT) || (CHAR_BIT != 8) +# error "invalid CHAR_BIT" +#endif +#if !defined(UCHAR_MAX) || !defined(UINT_MAX) || !defined(ULONG_MAX) +# error "check your compiler installation" +#endif +#if (USHRT_MAX < 1) || (UINT_MAX < 1) || (ULONG_MAX < 1) +# error "your limits.h macros are broken" +#endif + +/* workaround a compiler bug under hpux 10.20 */ +#define UCL_0xffffffffL 4294967295ul + +#if !defined(UCL_UINT32_C) +# if (UINT_MAX < UCL_0xffffffffL) +# define UCL_UINT32_C(c) c ## UL +# else +# define UCL_UINT32_C(c) c ## U +# endif +#endif + + +/*********************************************************************** +// architecture defines +************************************************************************/ + +#if !defined(__UCL_WIN) && !defined(__UCL_DOS) && !defined(__UCL_OS2) +# if defined(__WINDOWS__) || defined(_WINDOWS) || defined(_Windows) +# define __UCL_WIN +# elif defined(__WIN32__) || defined(_WIN32) || defined(WIN32) +# define __UCL_WIN +# elif defined(__NT__) || defined(__NT_DLL__) || defined(__WINDOWS_386__) +# define __UCL_WIN +# elif defined(__DOS__) || defined(__MSDOS__) || defined(MSDOS) +# define __UCL_DOS +# elif defined(__OS2__) || defined(__OS2V2__) || defined(OS2) +# define __UCL_OS2 +# elif defined(__palmos__) +# define __UCL_PALMOS +# elif defined(__TOS__) || defined(__atarist__) +# define __UCL_TOS +# endif +#endif + +#if (UINT_MAX < UCL_0xffffffffL) +# if defined(__UCL_WIN) +# define __UCL_WIN16 +# elif defined(__UCL_DOS) +# define __UCL_DOS16 +# elif defined(__UCL_PALMOS) +# define __UCL_PALMOS16 +# elif defined(__UCL_TOS) +# define __UCL_TOS16 +# elif defined(__C166__) +# else +# error "16-bit target not supported - contact me for porting hints" +# endif +#endif + +#if !defined(__UCL_i386) +# if defined(__UCL_DOS) || defined(__UCL_WIN16) +# define __UCL_i386 +# elif defined(__i386__) || defined(__386__) || defined(_M_IX86) +# define __UCL_i386 +# endif +#endif + +#if defined(__UCL_STRICT_16BIT) +# if (UINT_MAX < UCL_0xffffffffL) +# include +# endif +#endif + +/* memory checkers */ +#if !defined(__UCL_CHECKER) +# if defined(__BOUNDS_CHECKING_ON) +# define __UCL_CHECKER +# elif defined(__CHECKER__) +# define __UCL_CHECKER +# elif defined(__INSURE__) +# define __UCL_CHECKER +# elif defined(__PURIFY__) +# define __UCL_CHECKER +# endif +#endif + + +/*********************************************************************** +// integral and pointer types +************************************************************************/ + +/* Integral types with 32 bits or more */ +#if !defined(UCL_UINT32_MAX) +# if (UINT_MAX >= UCL_0xffffffffL) + typedef unsigned int ucl_uint32; + typedef int ucl_int32; +# define UCL_UINT32_MAX UINT_MAX +# define UCL_INT32_MAX INT_MAX +# define UCL_INT32_MIN INT_MIN +# elif (ULONG_MAX >= UCL_0xffffffffL) + typedef unsigned long ucl_uint32; + typedef long ucl_int32; +# define UCL_UINT32_MAX ULONG_MAX +# define UCL_INT32_MAX LONG_MAX +# define UCL_INT32_MIN LONG_MIN +# else +# error "ucl_uint32" +# endif +#endif + +/* ucl_uint is used like size_t */ +#if !defined(UCL_UINT_MAX) +# if (UINT_MAX >= UCL_0xffffffffL) + typedef unsigned int ucl_uint; + typedef int ucl_int; +# define UCL_UINT_MAX UINT_MAX +# define UCL_INT_MAX INT_MAX +# define UCL_INT_MIN INT_MIN +# elif (ULONG_MAX >= UCL_0xffffffffL) + typedef unsigned long ucl_uint; + typedef long ucl_int; +# define UCL_UINT_MAX ULONG_MAX +# define UCL_INT_MAX LONG_MAX +# define UCL_INT_MIN LONG_MIN +# else +# error "ucl_uint" +# endif +#endif + +/* Memory model that allows to access memory at offsets of ucl_uint. */ +#if !defined(__UCL_MMODEL) +# if (UCL_UINT_MAX <= UINT_MAX) +# define __UCL_MMODEL +# elif defined(__UCL_DOS16) || defined(__UCL_WIN16) +# define __UCL_MMODEL __huge +# define UCL_999_UNSUPPORTED +# elif defined(__UCL_PALMOS16) || defined(__UCL_TOS16) +# define __UCL_MMODEL +# else +# error "__UCL_MMODEL" +# endif +#endif + +/* no typedef here because of const-pointer issues */ +#define ucl_byte unsigned char __UCL_MMODEL +#define ucl_bytep unsigned char __UCL_MMODEL * +#define ucl_charp char __UCL_MMODEL * +#define ucl_voidp void __UCL_MMODEL * +#define ucl_shortp short __UCL_MMODEL * +#define ucl_ushortp unsigned short __UCL_MMODEL * +#define ucl_uint32p ucl_uint32 __UCL_MMODEL * +#define ucl_int32p ucl_int32 __UCL_MMODEL * +#define ucl_uintp ucl_uint __UCL_MMODEL * +#define ucl_intp ucl_int __UCL_MMODEL * +#define ucl_voidpp ucl_voidp __UCL_MMODEL * +#define ucl_bytepp ucl_bytep __UCL_MMODEL * + +typedef int ucl_bool; + + +/*********************************************************************** +// function types +************************************************************************/ + +/* linkage */ +#if !defined(__UCL_EXTERN_C) +# ifdef __cplusplus +# define __UCL_EXTERN_C extern "C" +# else +# define __UCL_EXTERN_C extern +# endif +#endif + +/* calling conventions */ +#if !defined(__UCL_CDECL) +# if defined(__UCL_DOS16) || defined(__UCL_WIN16) +# define __UCL_CDECL __far __cdecl +# elif defined(__UCL_i386) && defined(_MSC_VER) +# define __UCL_CDECL __cdecl +# elif defined(__UCL_i386) && defined(__WATCOMC__) +# define __UCL_CDECL __near __cdecl +# else +# define __UCL_CDECL +# endif +#endif +#if !defined(__UCL_ENTRY) +# define __UCL_ENTRY __UCL_CDECL +#endif + +/* DLL export information */ +#if !defined(__UCL_EXPORT1) +# define __UCL_EXPORT1 +#endif +#if !defined(__UCL_EXPORT2) +# define __UCL_EXPORT2 +#endif + +/* calling convention for C functions */ +#if !defined(UCL_PUBLIC) +# define UCL_PUBLIC(_rettype) __UCL_EXPORT1 _rettype __UCL_EXPORT2 __UCL_ENTRY +#endif +#if !defined(UCL_EXTERN) +# define UCL_EXTERN(_rettype) __UCL_EXTERN_C UCL_PUBLIC(_rettype) +#endif +#if !defined(UCL_PRIVATE) +# define UCL_PRIVATE(_rettype) static _rettype __UCL_ENTRY +#endif + +/* cdecl calling convention for assembler functions */ +#if !defined(UCL_PUBLIC_CDECL) +# define UCL_PUBLIC_CDECL(_rettype) \ + __UCL_EXPORT1 _rettype __UCL_EXPORT2 __UCL_CDECL +#endif +#if !defined(UCL_EXTERN_CDECL) +# define UCL_EXTERN_CDECL(_rettype) __UCL_EXTERN_C UCL_PUBLIC_CDECL(_rettype) +#endif + +/* C++ exception specification for extern "C" function types */ +#if !defined(__cplusplus) +# undef UCL_NOTHROW +# define UCL_NOTHROW +#elif !defined(UCL_NOTHROW) +# define UCL_NOTHROW +#endif + + +typedef int +(__UCL_ENTRY *ucl_compress_t) ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +typedef int +(__UCL_ENTRY *ucl_decompress_t) ( const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +typedef int +(__UCL_ENTRY *ucl_optimize_t) ( ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem ); + +typedef int +(__UCL_ENTRY *ucl_compress_dict_t)(const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem, + const ucl_bytep dict, ucl_uint dict_len ); + +typedef int +(__UCL_ENTRY *ucl_decompress_dict_t)(const ucl_bytep src, ucl_uint src_len, + ucl_bytep dst, ucl_uintp dst_len, + ucl_voidp wrkmem, + const ucl_bytep dict, ucl_uint dict_len ); + + +/* a progress indicator callback function */ +typedef struct +{ + void (__UCL_ENTRY *callback) (ucl_uint, ucl_uint, int, ucl_voidp user); + ucl_voidp user; +} +ucl_progress_callback_t; +#define ucl_progress_callback_p ucl_progress_callback_t __UCL_MMODEL * + + +/*********************************************************************** +// error codes and prototypes +************************************************************************/ + +/* Error codes for the compression/decompression functions. Negative + * values are errors, positive values will be used for special but + * normal events. + */ +#define UCL_E_OK 0 +#define UCL_E_ERROR (-1) +#define UCL_E_INVALID_ARGUMENT (-2) +#define UCL_E_OUT_OF_MEMORY (-3) +/* compression errors */ +#define UCL_E_NOT_COMPRESSIBLE (-101) +/* decompression errors */ +#define UCL_E_INPUT_OVERRUN (-201) +#define UCL_E_OUTPUT_OVERRUN (-202) +#define UCL_E_LOOKBEHIND_OVERRUN (-203) +#define UCL_E_EOF_NOT_FOUND (-204) +#define UCL_E_INPUT_NOT_CONSUMED (-205) +#define UCL_E_OVERLAP_OVERRUN (-206) + + +/* ucl_init() should be the first function you call. + * Check the return code ! + * + * ucl_init() is a macro to allow checking that the library and the + * compiler's view of various types are consistent. + */ +#define ucl_init() __ucl_init2(UCL_VERSION,(int)sizeof(short),(int)sizeof(int),\ + (int)sizeof(long),(int)sizeof(ucl_uint32),(int)sizeof(ucl_uint),\ + (int)-1,(int)sizeof(char *),(int)sizeof(ucl_voidp),\ + (int)sizeof(ucl_compress_t)) +UCL_EXTERN(int) __ucl_init2(ucl_uint32,int,int,int,int,int,int,int,int,int); + +/* version functions (useful for shared libraries) */ +UCL_EXTERN(ucl_uint32) ucl_version(void); +UCL_EXTERN(const char *) ucl_version_string(void); +UCL_EXTERN(const char *) ucl_version_date(void); +UCL_EXTERN(const ucl_charp) _ucl_version_string(void); +UCL_EXTERN(const ucl_charp) _ucl_version_date(void); + +/* string functions */ +UCL_EXTERN(int) +ucl_memcmp(const ucl_voidp _s1, const ucl_voidp _s2, ucl_uint _len); +UCL_EXTERN(ucl_voidp) +ucl_memcpy(ucl_voidp _dest, const ucl_voidp _src, ucl_uint _len); +UCL_EXTERN(ucl_voidp) +ucl_memmove(ucl_voidp _dest, const ucl_voidp _src, ucl_uint _len); +UCL_EXTERN(ucl_voidp) +ucl_memset(ucl_voidp _s, int _c, ucl_uint _len); + +/* checksum functions */ +UCL_EXTERN(ucl_uint32) +ucl_adler32(ucl_uint32 _adler, const ucl_bytep _buf, ucl_uint _len); +UCL_EXTERN(ucl_uint32) +ucl_crc32(ucl_uint32 _c, const ucl_bytep _buf, ucl_uint _len); + +/* memory allocation functions */ +UCL_EXTERN(ucl_voidp) ucl_alloc(ucl_uint _nelems, ucl_uint _size); +UCL_EXTERN(ucl_voidp) ucl_malloc(ucl_uint _size); +UCL_EXTERN(void) ucl_free(ucl_voidp _ptr); + +typedef ucl_voidp (__UCL_ENTRY *ucl_alloc_hook_t) (ucl_uint, ucl_uint); +typedef void (__UCL_ENTRY *ucl_free_hook_t) (ucl_voidp); + +extern ucl_alloc_hook_t ucl_alloc_hook; +extern ucl_free_hook_t ucl_free_hook; + +/* misc. */ +UCL_EXTERN(ucl_bool) ucl_assert(int _expr); +UCL_EXTERN(int) _ucl_config_check(void); +typedef union { ucl_bytep p; ucl_uint u; } __ucl_pu_u; +typedef union { ucl_bytep p; ucl_uint32 u32; } __ucl_pu32_u; + +/* align a char pointer on a boundary that is a multiple of `size' */ +UCL_EXTERN(unsigned) __ucl_align_gap(const ucl_voidp _ptr, ucl_uint _size); +#define UCL_PTR_ALIGN_UP(_ptr,_size) \ + ((_ptr) + (ucl_uint) __ucl_align_gap((const ucl_voidp)(_ptr),(ucl_uint)(_size))) + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* already included */ + diff -ruN linux-2.6.2.orig/include/ucl/uclutil.h linux-2.6.2/include/ucl/uclutil.h --- linux-2.6.2.orig/include/ucl/uclutil.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.2/include/ucl/uclutil.h 2004-02-17 03:05:01.010456048 +0100 @@ -0,0 +1,71 @@ +/* uclutil.h -- utilities for the UCL real-time data compression library + + This file is part of the UCL data compression library. + + Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer + Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer + All Rights Reserved. + + The UCL library is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + The UCL library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with the UCL library; see the file COPYING. + If not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Markus F.X.J. Oberhumer + + http://www.oberhumer.com/opensource/ucl/ + */ + + +#ifndef __UCLUTIL_H +#define __UCLUTIL_H + +#ifndef __UCLCONF_H +#include +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/*********************************************************************** +// +************************************************************************/ + +UCL_EXTERN(ucl_uint) +ucl_fread(FILE *f, ucl_voidp buf, ucl_uint size); +UCL_EXTERN(ucl_uint) +ucl_fwrite(FILE *f, const ucl_voidp buf, ucl_uint size); + + +#if (UCL_UINT_MAX <= UINT_MAX) + /* avoid problems with Win32 DLLs */ +# define ucl_fread(f,b,s) (fread(b,1,s,f)) +# define ucl_fwrite(f,b,s) (fwrite(b,1,s,f)) +#endif + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* already included */ +