Index: sys/amd64/conf/NOTES =================================================================== --- sys/amd64/conf/NOTES +++ sys/amd64/conf/NOTES @@ -127,7 +127,6 @@ device speaker #Play IBM BASIC-style noises out your speaker hint.speaker.0.at="isa" hint.speaker.0.port="0x61" -device gzip #Exec gzipped a.out's. REQUIRES COMPAT_AOUT! ##################################################################### Index: sys/arm/arm/elf_trampoline.c =================================================================== --- sys/arm/arm/elf_trampoline.c +++ /dev/null @@ -1,557 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2005 Olivier Houchard. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Since we are compiled outside of the normal kernel build process, we - * need to include opt_global.h manually. - */ -#include "opt_global.h" -#include "opt_kernname.h" - -#include -__FBSDID("$FreeBSD$"); -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* For KERNVIRTADDR */ - -#if __ARM_ARCH >= 6 -#error "elf_trampline is not supported on ARMv6/v7 platforms" -#endif -extern char kernel_start[]; -extern char kernel_end[]; - -extern void *_end; - -void _start(void); -void __start(void); -void __startC(unsigned r0, unsigned r1, unsigned r2, unsigned r3); - -extern void do_call(void *, void *, void *, int); - -#define GZ_HEAD 0xa - -#if defined(CPU_ARM9E) -#define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all -extern void armv5_ec_idcache_wbinv_all(void); -#endif -#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) -#define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all -extern void sheeva_l2cache_wbinv_all(void); -#else -#define cpu_l2cache_wbinv_all() -#endif - -/* - * Boot parameters - */ -static struct arm_boot_params s_boot_params; - -static __inline void * -memcpy(void *dst, const void *src, int len) -{ - const char *s = src; - char *d = dst; - - while (len) { - if (0 && len >= 4 && !((vm_offset_t)d & 3) && - !((vm_offset_t)s & 3)) { - *(uint32_t *)d = *(uint32_t *)s; - s += 4; - d += 4; - len -= 4; - } else { - *d++ = *s++; - len--; - } - } - return (dst); -} - -static __inline void -bzero(void *addr, int count) -{ - char *tmp = (char *)addr; - - while (count > 0) { - if (count >= 4 && !((vm_offset_t)tmp & 3)) { - *(uint32_t *)tmp = 0; - tmp += 4; - count -= 4; - } else { - *tmp = 0; - tmp++; - count--; - } - } -} - -void -_startC(unsigned r0, unsigned r1, unsigned r2, unsigned r3) -{ - int tmp1; - unsigned int sp = ((unsigned int)&_end & ~3) + 4; - unsigned int pc, kernphysaddr; - - s_boot_params.abp_r0 = r0; - s_boot_params.abp_r1 = r1; - s_boot_params.abp_r2 = r2; - s_boot_params.abp_r3 = r3; - - /* - * Figure out the physical address the kernel was loaded at. This - * assumes the entry point (this code right here) is in the first page, - * which will always be the case for this trampoline code. - */ - __asm __volatile("mov %0, pc\n" - : "=r" (pc)); - kernphysaddr = pc & ~PAGE_MASK; - -#if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR) - if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) || - (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) { - /* - * We're running from flash, so just copy the whole thing - * from flash to memory. - * This is far from optimal, we could do the relocation or - * the unzipping directly from flash to memory to avoid this - * needless copy, but it would require to know the flash - * physical address. - */ - unsigned int target_addr; - unsigned int tmp_sp; - uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR - + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000; - - target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR; - tmp_sp = target_addr + 0x100000 + - (unsigned int)&_end - (unsigned int)&_start; - memcpy((char *)target_addr, (char *)src_addr, - (unsigned int)&_end - (unsigned int)&_start); - /* Temporary set the sp and jump to the new location. */ - __asm __volatile( - "mov sp, %1\n" - "mov r0, %2\n" - "mov r1, %3\n" - "mov r2, %4\n" - "mov r3, %5\n" - "mov pc, %0\n" - : : "r" (target_addr), "r" (tmp_sp), - "r" (s_boot_params.abp_r0), "r" (s_boot_params.abp_r1), - "r" (s_boot_params.abp_r2), "r" (s_boot_params.abp_r3) - : "r0", "r1", "r2", "r3"); - - } -#endif -#ifdef KZIP - sp += KERNSIZE + 0x100; - sp &= ~(L1_TABLE_SIZE - 1); - sp += 2 * L1_TABLE_SIZE; -#endif - sp += 1024 * 1024; /* Should be enough for a stack */ - - __asm __volatile("adr %0, 2f\n" - "bic %0, %0, #0xff000000\n" - "and %1, %1, #0xff000000\n" - "orr %0, %0, %1\n" - "mrc p15, 0, %1, c1, c0, 0\n" /* CP15_SCTLR(%1)*/ - "bic %1, %1, #1\n" /* Disable MMU */ - "orr %1, %1, #(4 | 8)\n" /* Add DC enable, - WBUF enable */ - "orr %1, %1, #0x1000\n" /* Add IC enable */ - "orr %1, %1, #(0x800)\n" /* BPRD enable */ - - "mcr p15, 0, %1, c1, c0, 0\n" /* CP15_SCTLR(%1)*/ - "nop\n" - "nop\n" - "nop\n" - "mov pc, %0\n" - "2: nop\n" - "mov sp, %2\n" - : "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp)); - __start(); -} - -#ifdef KZIP -static unsigned char *orig_input, *i_input, *i_output; - - -static u_int memcnt; /* Memory allocated: blocks */ -static size_t memtot; /* Memory allocated: bytes */ -/* - * Library functions required by inflate(). - */ - -#define MEMSIZ 0x8000 - -/* - * Allocate memory block. - */ -unsigned char * -kzipmalloc(int size) -{ - void *ptr; - static u_char mem[MEMSIZ]; - - if (memtot + size > MEMSIZ) - return NULL; - ptr = mem + memtot; - memtot += size; - memcnt++; - return ptr; -} - -/* - * Free allocated memory block. - */ -void -kzipfree(void *ptr) -{ - memcnt--; - if (!memcnt) - memtot = 0; -} - -void -putstr(char *dummy) -{ -} - -static int -input(void *dummy) -{ - if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) { - return (GZ_EOF); - } - return *i_input++; -} - -static int -output(void *dummy, unsigned char *ptr, unsigned long len) -{ - - - memcpy(i_output, ptr, len); - i_output += len; - return (0); -} - -static void * -inflate_kernel(void *kernel, void *startaddr) -{ - struct inflate infl; - unsigned char slide[GZ_WSIZE]; - - orig_input = kernel; - memcnt = memtot = 0; - i_input = (unsigned char *)kernel + GZ_HEAD; - if (((char *)kernel)[3] & 0x18) { - while (*i_input) - i_input++; - i_input++; - } - i_output = startaddr; - bzero(&infl, sizeof(infl)); - infl.gz_input = input; - infl.gz_output = output; - infl.gz_slide = slide; - inflate(&infl); - return ((char *)(((vm_offset_t)i_output & ~3) + 4)); -} - -#endif - -void * -load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, - int d) -{ - Elf32_Ehdr *eh; - Elf32_Phdr phdr[64] /* XXX */, *php; - Elf32_Shdr shdr[64] /* XXX */; - int i,j; - void *entry_point; - int symtabindex = -1; - int symstrindex = -1; - vm_offset_t lastaddr = 0; - Elf_Addr ssym = 0; - Elf_Dyn *dp; - struct arm_boot_params local_boot_params; - - eh = (Elf32_Ehdr *)kstart; - ssym = 0; - entry_point = (void*)eh->e_entry; - memcpy(phdr, (void *)(kstart + eh->e_phoff ), - eh->e_phnum * sizeof(phdr[0])); - - /* Determine lastaddr. */ - for (i = 0; i < eh->e_phnum; i++) { - if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr - + phdr[i].p_memsz)) - lastaddr = phdr[i].p_vaddr - KERNVIRTADDR + - curaddr + phdr[i].p_memsz; - } - - /* Save the symbol tables, as there're about to be scratched. */ - memcpy(shdr, (void *)(kstart + eh->e_shoff), - sizeof(*shdr) * eh->e_shnum); - if (eh->e_shnum * eh->e_shentsize != 0 && - eh->e_shoff != 0) { - for (i = 0; i < eh->e_shnum; i++) { - if (shdr[i].sh_type == SHT_SYMTAB) { - for (j = 0; j < eh->e_phnum; j++) { - if (phdr[j].p_type == PT_LOAD && - shdr[i].sh_offset >= - phdr[j].p_offset && - (shdr[i].sh_offset + - shdr[i].sh_size <= - phdr[j].p_offset + - phdr[j].p_filesz)) { - shdr[i].sh_offset = 0; - shdr[i].sh_size = 0; - j = eh->e_phnum; - } - } - if (shdr[i].sh_offset != 0 && - shdr[i].sh_size != 0) { - symtabindex = i; - symstrindex = shdr[i].sh_link; - } - } - } - func_end = roundup(func_end, sizeof(long)); - if (symtabindex >= 0 && symstrindex >= 0) { - ssym = lastaddr; - if (d) { - memcpy((void *)func_end, (void *)( - shdr[symtabindex].sh_offset + kstart), - shdr[symtabindex].sh_size); - memcpy((void *)(func_end + - shdr[symtabindex].sh_size), - (void *)(shdr[symstrindex].sh_offset + - kstart), shdr[symstrindex].sh_size); - } else { - lastaddr += shdr[symtabindex].sh_size; - lastaddr = roundup(lastaddr, - sizeof(shdr[symtabindex].sh_size)); - lastaddr += sizeof(shdr[symstrindex].sh_size); - lastaddr += shdr[symstrindex].sh_size; - lastaddr = roundup(lastaddr, - sizeof(shdr[symstrindex].sh_size)); - } - - } - } - if (!d) - return ((void *)lastaddr); - - /* - * Now the stack is fixed, copy boot params - * before it's overrided - */ - memcpy(&local_boot_params, &s_boot_params, sizeof(local_boot_params)); - - j = eh->e_phnum; - for (i = 0; i < j; i++) { - volatile char c; - - if (phdr[i].p_type != PT_LOAD) - continue; - memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr), - (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz); - /* Clean space from oversized segments, eg: bss. */ - if (phdr[i].p_filesz < phdr[i].p_memsz) - bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + - curaddr + phdr[i].p_filesz), phdr[i].p_memsz - - phdr[i].p_filesz); - } - /* Now grab the symbol tables. */ - if (symtabindex >= 0 && symstrindex >= 0) { - *(Elf_Size *)lastaddr = - shdr[symtabindex].sh_size; - lastaddr += sizeof(shdr[symtabindex].sh_size); - memcpy((void*)lastaddr, - (void *)func_end, - shdr[symtabindex].sh_size); - lastaddr += shdr[symtabindex].sh_size; - lastaddr = roundup(lastaddr, - sizeof(shdr[symtabindex].sh_size)); - *(Elf_Size *)lastaddr = - shdr[symstrindex].sh_size; - lastaddr += sizeof(shdr[symstrindex].sh_size); - memcpy((void*)lastaddr, - (void*)(func_end + - shdr[symtabindex].sh_size), - shdr[symstrindex].sh_size); - lastaddr += shdr[symstrindex].sh_size; - lastaddr = roundup(lastaddr, - sizeof(shdr[symstrindex].sh_size)); - *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER; - *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR; - *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR; - } else - *(Elf_Addr *)curaddr = 0; - /* Invalidate the instruction cache. */ - __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n" - "mcr p15, 0, %0, c7, c10, 4\n" - : : "r" (curaddr)); - __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ - "bic %0, %0, #1\n" /* MMU_ENABLE */ - "mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ - : "=r" (ssym)); - /* Jump to the entry point. */ - ((void(*)(unsigned, unsigned, unsigned, unsigned)) - (entry_point - KERNVIRTADDR + curaddr)) - (local_boot_params.abp_r0, local_boot_params.abp_r1, - local_boot_params.abp_r2, local_boot_params.abp_r3); - __asm __volatile(".globl func_end\n" - "func_end:"); - - /* NOTREACHED */ - return NULL; -} - -extern char func_end[]; - - -#define PMAP_DOMAIN_KERNEL 0 /* - * Just define it instead of including the - * whole VM headers set. - */ -int __hack; -static __inline void -setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend, - int write_back) -{ - unsigned int *pd = (unsigned int *)pt_addr; - vm_paddr_t addr; - int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT; - int tmp; - - bzero(pd, L1_TABLE_SIZE); - for (addr = physstart; addr < physend; addr += L1_S_SIZE) { - pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)| - L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr; - if (write_back && 0) - pd[addr >> L1_S_SHIFT] |= L1_S_B; - } - /* XXX: See below */ - if (0xfff00000 < physstart || 0xfff00000 > physend) - pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)| - L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart; - __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */ - "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */ - "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */ - "mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ - "orr %0, %0, #1\n" /* MMU_ENABLE */ - "mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ - "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */ - "mov r0, r0\n" - "sub pc, pc, #4\n" : - "=r" (tmp) : "r" (pd), "r" (domain)); - - /* - * XXX: This is the most stupid workaround I've ever wrote. - * For some reason, the KB9202 won't boot the kernel unless - * we access an address which is not in the - * 0x20000000 - 0x20ffffff range. I hope I'll understand - * what's going on later. - */ - __hack = *(volatile int *)0xfffff21c; -} - -void -__start(void) -{ - void *curaddr; - void *dst, *altdst; - char *kernel = (char *)&kernel_start; - int sp; - int pt_addr; - - __asm __volatile("mov %0, pc" : - "=r" (curaddr)); - curaddr = (void*)((unsigned int)curaddr & 0xfff00000); -#ifdef KZIP - if (*kernel == 0x1f && kernel[1] == 0x8b) { - pt_addr = L1_TABLE_SIZE + - rounddown2((int)&_end + KERNSIZE + 0x100, L1_TABLE_SIZE); - - setup_pagetables(pt_addr, (vm_paddr_t)curaddr, - (vm_paddr_t)curaddr + 0x10000000, 1); - /* Gzipped kernel */ - dst = inflate_kernel(kernel, &_end); - kernel = (char *)&_end; - altdst = 4 + load_kernel((unsigned int)kernel, - (unsigned int)curaddr, - (unsigned int)&func_end + 800 , 0); - if (altdst > dst) - dst = altdst; - - /* - * Disable MMU. Otherwise, setup_pagetables call below - * might overwrite the L1 table we are currently using. - */ - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ - "bic %0, %0, #1\n" /* MMU_DISABLE */ - "mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ - :"=r" (pt_addr)); - } else -#endif - dst = 4 + load_kernel((unsigned int)&kernel_start, - (unsigned int)curaddr, - (unsigned int)&func_end, 0); - dst = (void *)(((vm_offset_t)dst & ~3)); - pt_addr = L1_TABLE_SIZE + rounddown2((unsigned int)dst, L1_TABLE_SIZE); - setup_pagetables(pt_addr, (vm_paddr_t)curaddr, - (vm_paddr_t)curaddr + 0x10000000, 0); - sp = pt_addr + L1_TABLE_SIZE + 8192; - sp = sp &~3; - dst = (void *)(sp + 4); - memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - - (unsigned int)&load_kernel + 800); - do_call(dst, kernel, dst + (unsigned int)(&func_end) - - (unsigned int)(&load_kernel) + 800, sp); -} - -/* We need to provide these functions but never call them */ -void __aeabi_unwind_cpp_pr0(void); -void __aeabi_unwind_cpp_pr1(void); -void __aeabi_unwind_cpp_pr2(void); - -__strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1); -__strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2); -void -__aeabi_unwind_cpp_pr0(void) -{ -} Index: sys/conf/files =================================================================== --- sys/conf/files +++ sys/conf/files @@ -3814,7 +3814,6 @@ kern/subr_gtaskqueue.c standard kern/subr_hash.c standard kern/subr_hints.c standard -kern/subr_inflate.c optional gzip kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard Index: sys/conf/files.amd64 =================================================================== --- sys/conf/files.amd64 +++ sys/conf/files.amd64 @@ -633,7 +633,6 @@ isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/imgact_aout.c optional compat_aout -kern/imgact_gzip.c optional gzip kern/link_elf_obj.c standard libkern/x86/crc32_sse42.c standard # Index: sys/conf/files.i386 =================================================================== --- sys/conf/files.i386 +++ sys/conf/files.i386 @@ -529,7 +529,6 @@ isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/imgact_aout.c optional compat_aout -kern/imgact_gzip.c optional gzip kern/subr_sfbuf.c standard libkern/divdi3.c standard libkern/ffsll.c standard Index: sys/i386/conf/NOTES =================================================================== --- sys/i386/conf/NOTES +++ sys/i386/conf/NOTES @@ -275,7 +275,6 @@ device speaker #Play IBM BASIC-style noises out your speaker hint.speaker.0.at="isa" hint.speaker.0.port="0x61" -device gzip #Exec gzipped a.out's. REQUIRES COMPAT_AOUT! device apm_saver # Requires APM Index: sys/kern/imgact_gzip.c =================================================================== --- sys/kern/imgact_gzip.c +++ /dev/null @@ -1,394 +0,0 @@ -/*- - * SPDX-License-Identifier: Beerware - * - * ---------------------------------------------------------------------------- - * "THE BEER-WARE LICENSE" (Revision 42): - * wrote this file. As long as you retain this notice you - * can do whatever you want with this stuff. If we meet some day, and you think - * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp - * ---------------------------------------------------------------------------- - */ - -/* - * This module handles execution of a.out files which have been run through - * "gzip". This saves diskspace, but wastes cpu-cycles and VM. - * - * TODO: - * text-segments should be made R/O after being filled - * is the vm-stuff safe ? - * should handle the entire header of gzip'ed stuff. - * inflate isn't quite reentrant yet... - * error-handling is a mess... - * so is the rest... - * tidy up unnecessary includes - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -struct imgact_gzip { - struct image_params *ip; - struct exec a_out; - int error; - int gotheader; - int where; - u_char *inbuf; - u_long offset; - u_long output; - u_long len; - int idx; - u_long virtual_offset, file_offset, file_end, bss_size; -}; - -static int exec_gzip_imgact(struct image_params *imgp); -static int NextByte(void *vp); -static int do_aout_hdr(struct imgact_gzip *); -static int Flush(void *vp, u_char *, u_long siz); - -static int -exec_gzip_imgact(struct image_params *imgp) -{ - int error; - const u_char *p = (const u_char *) imgp->image_header; - struct imgact_gzip igz; - struct inflate infl; - struct vmspace *vmspace; - - /* If these four are not OK, it isn't a gzip file */ - if (p[0] != 0x1f) - return -1; /* 0 Simply magic */ - if (p[1] != 0x8b) - return -1; /* 1 Simply magic */ - if (p[2] != 0x08) - return -1; /* 2 Compression method */ - if (p[9] != 0x03) - return -1; /* 9 OS compressed on */ - - /* - * If this one contains anything but a comment or a filename marker, - * we don't want to chew on it - */ - if (p[3] & ~(0x18)) - return ENOEXEC; /* 3 Flags */ - - /* These are of no use to us */ - /* 4-7 Timestamp */ - /* 8 Extra flags */ - - bzero(&igz, sizeof igz); - bzero(&infl, sizeof infl); - infl.gz_private = (void *) &igz; - infl.gz_input = NextByte; - infl.gz_output = Flush; - - igz.ip = imgp; - igz.idx = 10; - - if (p[3] & 0x08) { /* skip a filename */ - while (p[igz.idx++]) - if (igz.idx >= PAGE_SIZE) - return ENOEXEC; - } - if (p[3] & 0x10) { /* skip a comment */ - while (p[igz.idx++]) - if (igz.idx >= PAGE_SIZE) - return ENOEXEC; - } - igz.len = imgp->attr->va_size; - - error = inflate(&infl); - - /* - * The unzipped file may not even have been long enough to contain - * a header giving Flush() a chance to return error. Check for this. - */ - if ( !igz.gotheader ) - return ENOEXEC; - - if ( !error ) { - vmspace = imgp->proc->p_vmspace; - error = vm_map_protect(&vmspace->vm_map, - (vm_offset_t) vmspace->vm_taddr, - (vm_offset_t) (vmspace->vm_taddr + - (vmspace->vm_tsize << PAGE_SHIFT)) , - VM_PROT_READ|VM_PROT_EXECUTE,0); - } - - if (igz.inbuf) - kmap_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE); - if (igz.error || error) { - printf("Output=%lu ", igz.output); - printf("Inflate_error=%d igz.error=%d where=%d\n", - error, igz.error, igz.where); - } - if (igz.error) - return igz.error; - if (error) - return ENOEXEC; - return 0; -} - -static int -do_aout_hdr(struct imgact_gzip * gz) -{ - int error; - struct vmspace *vmspace; - vm_offset_t vmaddr; - - /* - * Set file/virtual offset based on a.out variant. We do two cases: - * host byte order and network byte order (for NetBSD compatibility) - */ - switch ((int) (gz->a_out.a_midmag & 0xffff)) { - case ZMAGIC: - gz->virtual_offset = 0; - if (gz->a_out.a_text) { - gz->file_offset = PAGE_SIZE; - } else { - /* Bill's "screwball mode" */ - gz->file_offset = 0; - } - break; - case QMAGIC: - gz->virtual_offset = PAGE_SIZE; - gz->file_offset = 0; - break; - default: - /* NetBSD compatibility */ - switch ((int) (ntohl(gz->a_out.a_midmag) & 0xffff)) { - case ZMAGIC: - case QMAGIC: - gz->virtual_offset = PAGE_SIZE; - gz->file_offset = 0; - break; - default: - gz->where = __LINE__; - return (-1); - } - } - - gz->bss_size = roundup(gz->a_out.a_bss, PAGE_SIZE); - - /* - * Check various fields in header for validity/bounds. - */ - if ( /* entry point must lay with text region */ - gz->a_out.a_entry < gz->virtual_offset || - gz->a_out.a_entry >= gz->virtual_offset + gz->a_out.a_text || - - /* text and data size must each be page rounded */ - gz->a_out.a_text & PAGE_MASK || gz->a_out.a_data & PAGE_MASK) { - gz->where = __LINE__; - return (-1); - } - /* - * text/data/bss must not exceed limits - */ - PROC_LOCK(gz->ip->proc); - if ( /* text can't exceed maximum text size */ - gz->a_out.a_text > maxtsiz || - - /* data + bss can't exceed rlimit */ - gz->a_out.a_data + gz->bss_size > - lim_cur_proc(gz->ip->proc, RLIMIT_DATA) || - racct_set(gz->ip->proc, RACCT_DATA, - gz->a_out.a_data + gz->bss_size) != 0) { - PROC_UNLOCK(gz->ip->proc); - gz->where = __LINE__; - return (ENOMEM); - } - PROC_UNLOCK(gz->ip->proc); - /* Find out how far we should go */ - gz->file_end = gz->file_offset + gz->a_out.a_text + gz->a_out.a_data; - - /* - * Avoid a possible deadlock if the current address space is destroyed - * and that address space maps the locked vnode. In the common case, - * the locked vnode's v_usecount is decremented but remains greater - * than zero. Consequently, the vnode lock is not needed by vrele(). - * However, in cases where the vnode lock is external, such as nullfs, - * v_usecount may become zero. - */ - VOP_UNLOCK(gz->ip->vp, 0); - - /* - * Destroy old process VM and create a new one (with a new stack) - */ - error = exec_new_vmspace(gz->ip, &aout_sysvec); - - vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY); - if (error) { - gz->where = __LINE__; - return (error); - } - - vmspace = gz->ip->proc->p_vmspace; - - vmaddr = gz->virtual_offset; - - error = vm_mmap(&vmspace->vm_map, - &vmaddr, - gz->a_out.a_text + gz->a_out.a_data, - VM_PROT_ALL, VM_PROT_ALL, MAP_ANON | MAP_FIXED, - OBJT_DEFAULT, - NULL, - 0); - - if (error) { - gz->where = __LINE__; - return (error); - } - - if (gz->bss_size != 0) { - /* - * Allocate demand-zeroed area for uninitialized data. - * "bss" = 'block started by symbol' - named after the - * IBM 7090 instruction of the same name. - */ - vmaddr = gz->virtual_offset + gz->a_out.a_text + - gz->a_out.a_data; - error = vm_map_find(&vmspace->vm_map, NULL, 0, &vmaddr, - gz->bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, - 0); - if (error) { - gz->where = __LINE__; - return (error); - } - } - /* Fill in process VM information */ - vmspace->vm_tsize = gz->a_out.a_text >> PAGE_SHIFT; - vmspace->vm_dsize = (gz->a_out.a_data + gz->bss_size) >> PAGE_SHIFT; - vmspace->vm_taddr = (caddr_t) (uintptr_t) gz->virtual_offset; - vmspace->vm_daddr = (caddr_t) (uintptr_t) - (gz->virtual_offset + gz->a_out.a_text); - - /* Fill in image_params */ - gz->ip->interpreted = 0; - gz->ip->entry_addr = gz->a_out.a_entry; - - gz->ip->proc->p_sysent = &aout_sysvec; - - return 0; -} - -static int -NextByte(void *vp) -{ - int error; - struct imgact_gzip *igz = (struct imgact_gzip *) vp; - - if (igz->idx >= igz->len) { - igz->where = __LINE__; - return GZ_EOF; - } - if (igz->inbuf && igz->idx < (igz->offset + PAGE_SIZE)) { - return igz->inbuf[(igz->idx++) - igz->offset]; - } - if (igz->inbuf) - kmap_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE); - igz->offset = igz->idx & ~PAGE_MASK; - - error = vm_mmap(exec_map, /* map */ - (vm_offset_t *) & igz->inbuf, /* address */ - PAGE_SIZE, /* size */ - VM_PROT_READ, /* protection */ - VM_PROT_READ, /* max protection */ - 0, /* flags */ - OBJT_VNODE, /* handle type */ - igz->ip->vp, /* vnode */ - igz->offset); /* offset */ - if (error) { - igz->where = __LINE__; - igz->error = error; - return GZ_EOF; - } - return igz->inbuf[(igz->idx++) - igz->offset]; -} - -static int -Flush(void *vp, u_char * ptr, u_long siz) -{ - struct imgact_gzip *gz = (struct imgact_gzip *) vp; - u_char *p = ptr, *q; - int i; - - /* First, find an a.out-header. */ - if (gz->output < sizeof gz->a_out) { - q = (u_char *) & gz->a_out; - i = min(siz, sizeof gz->a_out - gz->output); - bcopy(p, q + gz->output, i); - gz->output += i; - p += i; - siz -= i; - if (gz->output == sizeof gz->a_out) { - gz->gotheader = 1; - i = do_aout_hdr(gz); - if (i == -1) { - if (!gz->where) - gz->where = __LINE__; - gz->error = ENOEXEC; - return ENOEXEC; - } else if (i) { - gz->where = __LINE__; - gz->error = i; - return ENOEXEC; - } - if (gz->file_offset == 0) { - q = (u_char *) (uintptr_t) gz->virtual_offset; - copyout(&gz->a_out, q, sizeof gz->a_out); - } - } - } - /* Skip over zero-padded first PAGE if needed */ - if (gz->output < gz->file_offset && - gz->output + siz > gz->file_offset) { - i = min(siz, gz->file_offset - gz->output); - gz->output += i; - p += i; - siz -= i; - } - if (gz->output >= gz->file_offset && gz->output < gz->file_end) { - i = min(siz, gz->file_end - gz->output); - q = (u_char *) (uintptr_t) - (gz->virtual_offset + gz->output - gz->file_offset); - copyout(p, q, i); - gz->output += i; - p += i; - siz -= i; - } - gz->output += siz; - return 0; -} - - -/* - * Tell kern_execve.c about it, with a little help from the linker. - */ -static struct execsw gzip_execsw = { - .ex_imgact = exec_gzip_imgact, - .ex_name = "gzip" -}; -EXEC_SET(execgzip, gzip_execsw); Index: sys/kern/subr_inflate.c =================================================================== --- sys/kern/subr_inflate.c +++ /dev/null @@ -1,1084 +0,0 @@ -/*- - * Most parts of this file are not covered by: - * - * SPDX-License-Identifier: Beerware - * ---------------------------------------------------------------------------- - * "THE BEER-WARE LICENSE" (Revision 42): - * wrote this file. As long as you retain this notice you - * can do whatever you want with this stuff. If we meet some day, and you think - * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp - * ---------------------------------------------------------------------------- - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#ifdef _KERNEL -#include -#include -#endif -#include - -#ifdef _KERNEL -static MALLOC_DEFINE(M_GZIP, "gzip_trees", "Gzip trees"); -#endif - -/* needed to make inflate() work */ -#define uch u_char -#define ush u_short -#define ulg u_long - -/* Stuff to make inflate() work */ -#ifdef _KERNEL -#define memzero(dest,len) bzero(dest,len) -#endif -#define NOMEMCPY -#ifdef _KERNEL -#define FPRINTF printf -#else -extern void putstr (char *); -#define FPRINTF putstr -#endif - -#define FLUSH(x,y) { \ - int foo = (*x->gz_output)(x->gz_private,x->gz_slide,y); \ - if (foo) \ - return foo; \ - } - -static const int qflag = 0; - -#ifndef _KERNEL /* want to use this file in kzip also */ -extern unsigned char *kzipmalloc (int); -extern void kzipfree (void*); -#define malloc(x, y, z) kzipmalloc((x)) -#define free(x, y) kzipfree((x)) -#endif - -/* - * This came from unzip-5.12. I have changed it the flow to pass - * a structure pointer around, thus hopefully making it re-entrant. - * Poul-Henning - */ - -/* inflate.c -- put in the public domain by Mark Adler - version c14o, 23 August 1994 */ - -/* You can do whatever you like with this source file, though I would - prefer that if you modify it and redistribute it that you include - comments to that effect with your name and the date. Thank you. - - History: - vers date who what - ---- --------- -------------- ------------------------------------ - a ~~ Feb 92 M. Adler used full (large, one-step) lookup table - b1 21 Mar 92 M. Adler first version with partial lookup tables - b2 21 Mar 92 M. Adler fixed bug in fixed-code blocks - b3 22 Mar 92 M. Adler sped up match copies, cleaned up some - b4 25 Mar 92 M. Adler added prototypes; removed window[] (now - is the responsibility of unzip.h--also - changed name to slide[]), so needs diffs - for unzip.c and unzip.h (this allows - compiling in the small model on MSDOS); - fixed cast of q in huft_build(); - b5 26 Mar 92 M. Adler got rid of unintended macro recursion. - b6 27 Mar 92 M. Adler got rid of nextbyte() routine. fixed - bug in inflate_fixed(). - c1 30 Mar 92 M. Adler removed lbits, dbits environment variables. - changed BMAX to 16 for explode. Removed - OUTB usage, and replaced it with flush()-- - this was a 20% speed improvement! Added - an explode.c (to replace unimplod.c) that - uses the huft routines here. Removed - register union. - c2 4 Apr 92 M. Adler fixed bug for file sizes a multiple of 32k. - c3 10 Apr 92 M. Adler reduced memory of code tables made by - huft_build significantly (factor of two to - three). - c4 15 Apr 92 M. Adler added NOMEMCPY do kill use of memcpy(). - worked around a Turbo C optimization bug. - c5 21 Apr 92 M. Adler added the GZ_WSIZE #define to allow reducing - the 32K window size for specialized - applications. - c6 31 May 92 M. Adler added some typecasts to eliminate warnings - c7 27 Jun 92 G. Roelofs added some more typecasts (444: MSC bug). - c8 5 Oct 92 J-l. Gailly added ifdef'd code to deal with PKZIP bug. - c9 9 Oct 92 M. Adler removed a memory error message (~line 416). - c10 17 Oct 92 G. Roelofs changed ULONG/UWORD/byte to ulg/ush/uch, - removed old inflate, renamed inflate_entry - to inflate, added Mark's fix to a comment. - c10.5 14 Dec 92 M. Adler fix up error messages for incomplete trees. - c11 2 Jan 93 M. Adler fixed bug in detection of incomplete - tables, and removed assumption that EOB is - the longest code (bad assumption). - c12 3 Jan 93 M. Adler make tables for fixed blocks only once. - c13 5 Jan 93 M. Adler allow all zero length codes (pkzip 2.04c - outputs one zero length code for an empty - distance tree). - c14 12 Mar 93 M. Adler made inflate.c standalone with the - introduction of inflate.h. - c14b 16 Jul 93 G. Roelofs added (unsigned) typecast to w at 470. - c14c 19 Jul 93 J. Bush changed v[N_MAX], l[288], ll[28x+3x] arrays - to static for Amiga. - c14d 13 Aug 93 J-l. Gailly de-complicatified Mark's c[*p++]++ thing. - c14e 8 Oct 93 G. Roelofs changed memset() to memzero(). - c14f 22 Oct 93 G. Roelofs renamed quietflg to qflag; made Trace() - conditional; added inflate_free(). - c14g 28 Oct 93 G. Roelofs changed l/(lx+1) macro to pointer (Cray bug) - c14h 7 Dec 93 C. Ghisler huft_build() optimizations. - c14i 9 Jan 94 A. Verheijen set fixed_t{d,l} to NULL after freeing; - G. Roelofs check NEXTBYTE macro for GZ_EOF. - c14j 23 Jan 94 G. Roelofs removed Ghisler "optimizations"; ifdef'd - GZ_EOF check. - c14k 27 Feb 94 G. Roelofs added some typecasts to avoid warnings. - c14l 9 Apr 94 G. Roelofs fixed split comments on preprocessor lines - to avoid bug in Encore compiler. - c14m 7 Jul 94 P. Kienitz modified to allow assembler version of - inflate_codes() (define ASM_INFLATECODES) - c14n 22 Jul 94 G. Roelofs changed fprintf to FPRINTF for DLL versions - c14o 23 Aug 94 C. Spieler added a newline to a debug statement; - G. Roelofs added another typecast to avoid MSC warning - */ - - -/* - Inflate deflated (PKZIP's method 8 compressed) data. The compression - method searches for as much of the current string of bytes (up to a - length of 258) in the previous 32K bytes. If it doesn't find any - matches (of at least length 3), it codes the next byte. Otherwise, it - codes the length of the matched string and its distance backwards from - the current position. There is a single Huffman code that codes both - single bytes (called "literals") and match lengths. A second Huffman - code codes the distance information, which follows a length code. Each - length or distance code actually represents a base value and a number - of "extra" (sometimes zero) bits to get to add to the base value. At - the end of each deflated block is a special end-of-block (EOB) literal/ - length code. The decoding process is basically: get a literal/length - code; if EOB then done; if a literal, emit the decoded byte; if a - length then get the distance and emit the referred-to bytes from the - sliding window of previously emitted data. - - There are (currently) three kinds of inflate blocks: stored, fixed, and - dynamic. The compressor outputs a chunk of data at a time and decides - which method to use on a chunk-by-chunk basis. A chunk might typically - be 32K to 64K, uncompressed. If the chunk is uncompressible, then the - "stored" method is used. In this case, the bytes are simply stored as - is, eight bits per byte, with none of the above coding. The bytes are - preceded by a count, since there is no longer an EOB code. - - If the data is compressible, then either the fixed or dynamic methods - are used. In the dynamic method, the compressed data is preceded by - an encoding of the literal/length and distance Huffman codes that are - to be used to decode this block. The representation is itself Huffman - coded, and so is preceded by a description of that code. These code - descriptions take up a little space, and so for small blocks, there is - a predefined set of codes, called the fixed codes. The fixed method is - used if the block ends up smaller that way (usually for quite small - chunks); otherwise the dynamic method is used. In the latter case, the - codes are customized to the probabilities in the current block and so - can code it much better than the pre-determined fixed codes can. - - The Huffman codes themselves are decoded using a mutli-level table - lookup, in order to maximize the speed of decoding plus the speed of - building the decoding tables. See the comments below that precede the - lbits and dbits tuning parameters. - */ - - -/* - Notes beyond the 1.93a appnote.txt: - - 1. Distance pointers never point before the beginning of the output - stream. - 2. Distance pointers can point back across blocks, up to 32k away. - 3. There is an implied maximum of 7 bits for the bit length table and - 15 bits for the actual data. - 4. If only one code exists, then it is encoded using one bit. (Zero - would be more efficient, but perhaps a little confusing.) If two - codes exist, they are coded using one bit each (0 and 1). - 5. There is no way of sending zero distance codes--a dummy must be - sent if there are none. (History: a pre 2.0 version of PKZIP would - store blocks with no distance codes, but this was discovered to be - too harsh a criterion.) Valid only for 1.93a. 2.04c does allow - zero distance codes, which is sent as one code of zero bits in - length. - 6. There are up to 286 literal/length codes. Code 256 represents the - end-of-block. Note however that the static length tree defines - 288 codes just to fill out the Huffman codes. Codes 286 and 287 - cannot be used though, since there is no length base or extra bits - defined for them. Similarly, there are up to 30 distance codes. - However, static trees define 32 codes (all 5 bits) to fill out the - Huffman codes, but the last two had better not show up in the data. - 7. Unzip can check dynamic Huffman blocks for complete code sets. - The exception is that a single code would not be complete (see #4). - 8. The five bits following the block type is really the number of - literal codes sent minus 257. - 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits - (1+6+6). Therefore, to output three times the length, you output - three codes (1+1+1), whereas to output four times the same length, - you only need two codes (1+3). Hmm. - 10. In the tree reconstruction algorithm, Code = Code + Increment - only if BitLength(i) is not zero. (Pretty obvious.) - 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) - 12. Note: length code 284 can represent 227-258, but length code 285 - really is 258. The last length deserves its own, short code - since it gets used a lot in very redundant files. The length - 258 is special since 258 - 3 (the min match length) is 255. - 13. The literal/length and distance code bit lengths are read as a - single stream of lengths. It is possible (and advantageous) for - a repeat code (16, 17, or 18) to go across the boundary between - the two sets of lengths. - */ - - -#define PKZIP_BUG_WORKAROUND /* PKZIP 1.93a problem--live with it */ - -/* - inflate.h must supply the uch slide[GZ_WSIZE] array and the NEXTBYTE, - FLUSH() and memzero macros. If the window size is not 32K, it - should also define GZ_WSIZE. If INFMOD is defined, it can include - compiled functions to support the NEXTBYTE and/or FLUSH() macros. - There are defaults for NEXTBYTE and FLUSH() below for use as - examples of what those functions need to do. Normally, you would - also want FLUSH() to compute a crc on the data. inflate.h also - needs to provide these typedefs: - - typedef unsigned char uch; - typedef unsigned short ush; - typedef unsigned long ulg; - - This module uses the external functions malloc() and free() (and - probably memset() or bzero() in the memzero() macro). Their - prototypes are normally found in and . - */ -#define INFMOD /* tell inflate.h to include code to be - * compiled */ - -/* Huffman code lookup table entry--this entry is four bytes for machines - that have 16-bit pointers (e.g. PC's in the small or medium model). - Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 - means that v is a literal, 16 < e < 32 means that v is a pointer to - the next table, which codes e - 16 bits, and lastly e == 99 indicates - an unused code. If a code with e == 99 is looked up, this implies an - error in the data. */ -struct huft { - uch e; /* number of extra bits or operation */ - uch b; /* number of bits in this code or subcode */ - union { - ush n; /* literal, length base, or distance - * base */ - struct huft *t; /* pointer to next level of table */ - } v; -}; - - -/* Function prototypes */ -static int huft_build(struct inflate *, unsigned *, unsigned, unsigned, const ush *, const ush *, struct huft **, int *); -static int huft_free(struct inflate *, struct huft *); -static int inflate_codes(struct inflate *, struct huft *, struct huft *, int, int); -static int inflate_stored(struct inflate *); -static int xinflate(struct inflate *); -static int inflate_fixed(struct inflate *); -static int inflate_dynamic(struct inflate *); -static int inflate_block(struct inflate *, int *); - -/* The inflate algorithm uses a sliding 32K byte window on the uncompressed - stream to find repeated byte strings. This is implemented here as a - circular buffer. The index is updated simply by incrementing and then - and'ing with 0x7fff (32K-1). */ -/* It is left to other modules to supply the 32K area. It is assumed - to be usable as if it were declared "uch slide[32768];" or as just - "uch *slide;" and then malloc'ed in the latter case. The definition - must be in unzip.h, included above. */ - - -/* Tables for deflate from PKZIP's appnote.txt. */ - -/* Order of the bit length code lengths */ -static const unsigned border[] = { - 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; - -static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ - 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, - 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; - /* note: see note #13 above about the 258 in this list. */ - -static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, - 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ - -static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ - 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, - 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, - 8193, 12289, 16385, 24577}; - -static const ush cpdext[] = { /* Extra bits for distance codes */ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, - 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, - 12, 12, 13, 13}; - -/* And'ing with mask[n] masks the lower n bits */ -static const ush mask[] = { - 0x0000, - 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, - 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff -}; - - -/* Macros for inflate() bit peeking and grabbing. - The usage is: - - NEEDBITS(glbl,j) - x = b & mask[j]; - DUMPBITS(j) - - where NEEDBITS makes sure that b has at least j bits in it, and - DUMPBITS removes the bits from b. The macros use the variable k - for the number of bits in b. Normally, b and k are register - variables for speed, and are initialized at the beginning of a - routine that uses these macros from a global bit buffer and count. - - In order to not ask for more bits than there are in the compressed - stream, the Huffman tables are constructed to only ask for just - enough bits to make up the end-of-block code (value 256). Then no - bytes need to be "returned" to the buffer at the end of the last - block. See the huft_build() routine. - */ - -/* - * The following 2 were global variables. - * They are now fields of the inflate structure. - */ - -#define NEEDBITS(glbl,n) { \ - while(k<(n)) { \ - int c=(*glbl->gz_input)(glbl->gz_private); \ - if(c==GZ_EOF) \ - return 1; \ - b|=((ulg)c)<>=(n);k-=(n);} - -/* - Huffman code decoding is performed using a multi-level table lookup. - The fastest way to decode is to simply build a lookup table whose - size is determined by the longest code. However, the time it takes - to build this table can also be a factor if the data being decoded - is not very long. The most common codes are necessarily the - shortest codes, so those codes dominate the decoding time, and hence - the speed. The idea is you can have a shorter table that decodes the - shorter, more probable codes, and then point to subsidiary tables for - the longer codes. The time it costs to decode the longer codes is - then traded against the time it takes to make longer tables. - - This results of this trade are in the variables lbits and dbits - below. lbits is the number of bits the first level table for literal/ - length codes can decode in one step, and dbits is the same thing for - the distance codes. Subsequent tables are also less than or equal to - those sizes. These values may be adjusted either when all of the - codes are shorter than that, in which case the longest code length in - bits is used, or when the shortest code is *longer* than the requested - table size, in which case the length of the shortest code in bits is - used. - - There are two different values for the two tables, since they code a - different number of possibilities each. The literal/length table - codes 286 possible values, or in a flat code, a little over eight - bits. The distance table codes 30 possible values, or a little less - than five bits, flat. The optimum values for speed end up being - about one bit more than those, so lbits is 8+1 and dbits is 5+1. - The optimum values may differ though from machine to machine, and - possibly even between compilers. Your mileage may vary. - */ - -static const int lbits = 9; /* bits in base literal/length lookup table */ -static const int dbits = 6; /* bits in base distance lookup table */ - - -/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ -#define BMAX 16 /* maximum bit length of any code (16 for - * explode) */ -#define N_MAX 288 /* maximum number of codes in any set */ - -/* Given a list of code lengths and a maximum table size, make a set of - tables to decode that set of codes. Return zero on success, one if - the given code set is incomplete (the tables are still built in this - case), two if the input is invalid (all zero length codes or an - oversubscribed set of lengths), and three if not enough memory. - The code with value 256 is special, and the tables are constructed - so that no bits beyond that code are fetched when that code is - decoded. */ -/* - * Arguments: - * b code lengths in bits (all assumed <= BMAX) - * n number of codes (assumed <= N_MAX) - * s number of simple-valued codes (0..s-1) - * d list of base values for non-simple codes - * e list of extra bits for non-simple codes - * t result: starting table - * m maximum lookup bits, returns actual - */ -static int -huft_build(struct inflate *glbl, unsigned *b, unsigned n, unsigned s, - const ush *d, const ush *e, struct huft **t, int *m) -{ - unsigned a; /* counter for codes of length k */ - unsigned c[BMAX + 1]; /* bit length count table */ - unsigned el; /* length of EOB code (value 256) */ - unsigned f; /* i repeats in table every f entries */ - int g; /* maximum code length */ - int h; /* table level */ - unsigned i; /* counter, current code */ - unsigned j; /* counter */ - int k; /* number of bits in current code */ - int lx[BMAX + 1]; /* memory for l[-1..BMAX-1] */ - int *l = lx + 1; /* stack of bits per table */ - unsigned *p; /* pointer into c[], b[], or v[] */ - struct huft *q; /* points to current table */ - struct huft r; /* table entry for structure assignment */ - struct huft *u[BMAX];/* table stack */ - unsigned v[N_MAX]; /* values in order of bit length */ - int w; /* bits before this table == (l * h) */ - unsigned x[BMAX + 1]; /* bit offsets, then code stack */ - unsigned *xp; /* pointer into x */ - int y; /* number of dummy codes added */ - unsigned z; /* number of entries in current table */ - - /* Generate counts for each bit length */ - el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */ -#ifdef _KERNEL - memzero((char *) c, sizeof(c)); -#else - for (i = 0; i < BMAX+1; i++) - c [i] = 0; -#endif - p = b; - i = n; - do { - c[*p]++; - p++; /* assume all entries <= BMAX */ - } while (--i); - if (c[0] == n) { /* null input--all zero length codes */ - *t = (struct huft *) NULL; - *m = 0; - return 0; - } - /* Find minimum and maximum length, bound *m by those */ - for (j = 1; j <= BMAX; j++) - if (c[j]) - break; - k = j; /* minimum code length */ - if ((unsigned) *m < j) - *m = j; - for (i = BMAX; i; i--) - if (c[i]) - break; - g = i; /* maximum code length */ - if ((unsigned) *m > i) - *m = i; - - /* Adjust last length count to fill out codes, if needed */ - for (y = 1 << j; j < i; j++, y <<= 1) - if ((y -= c[j]) < 0) - return 2; /* bad input: more codes than bits */ - if ((y -= c[i]) < 0) - return 2; - c[i] += y; - - /* Generate starting offsets into the value table for each length */ - x[1] = j = 0; - p = c + 1; - xp = x + 2; - while (--i) { /* note that i == g from above */ - *xp++ = (j += *p++); - } - - /* Make a table of values in order of bit lengths */ - p = b; - i = 0; - do { - if ((j = *p++) != 0) - v[x[j]++] = i; - } while (++i < n); - - /* Generate the Huffman codes and for each, make the table entries */ - x[0] = i = 0; /* first Huffman code is zero */ - p = v; /* grab values in bit order */ - h = -1; /* no tables yet--level -1 */ - w = l[-1] = 0; /* no bits decoded yet */ - u[0] = (struct huft *) NULL; /* just to keep compilers happy */ - q = (struct huft *) NULL; /* ditto */ - z = 0; /* ditto */ - - /* go through the bit lengths (k already is bits in shortest code) */ - for (; k <= g; k++) { - a = c[k]; - while (a--) { - /* - * here i is the Huffman code of length k bits for - * value *p - */ - /* make tables up to required level */ - while (k > w + l[h]) { - w += l[h++]; /* add bits already decoded */ - - /* - * compute minimum size table less than or - * equal to *m bits - */ - z = (z = g - w) > (unsigned) *m ? *m : z; /* upper limit */ - if ((f = 1 << (j = k - w)) > a + 1) { /* try a k-w bit table *//* t - * oo few codes for k-w - * bit table */ - f -= a + 1; /* deduct codes from - * patterns left */ - xp = c + k; - while (++j < z) { /* try smaller tables up - * to z bits */ - if ((f <<= 1) <= *++xp) - break; /* enough codes to use - * up j bits */ - f -= *xp; /* else deduct codes - * from patterns */ - } - } - if ((unsigned) w + j > el && (unsigned) w < el) - j = el - w; /* make EOB code end at - * table */ - z = 1 << j; /* table entries for j-bit - * table */ - l[h] = j; /* set table size in stack */ - - /* allocate and link in new table */ - if ((q = (struct huft *) malloc((z + 1) * sizeof(struct huft), M_GZIP, M_WAITOK)) == - (struct huft *) NULL) { - if (h) - huft_free(glbl, u[0]); - return 3; /* not enough memory */ - } - glbl->gz_hufts += z + 1; /* track memory usage */ - *t = q + 1; /* link to list for - * huft_free() */ - *(t = &(q->v.t)) = (struct huft *) NULL; - u[h] = ++q; /* table starts after link */ - - /* connect to last table, if there is one */ - if (h) { - x[h] = i; /* save pattern for - * backing up */ - r.b = (uch) l[h - 1]; /* bits to dump before - * this table */ - r.e = (uch) (16 + j); /* bits in this table */ - r.v.t = q; /* pointer to this table */ - j = (i & ((1 << w) - 1)) >> (w - l[h - 1]); - u[h - 1][j] = r; /* connect to last table */ - } - } - - /* set up table entry in r */ - r.b = (uch) (k - w); - if (p >= v + n) - r.e = 99; /* out of values--invalid - * code */ - else if (*p < s) { - r.e = (uch) (*p < 256 ? 16 : 15); /* 256 is end-of-block - * code */ - r.v.n = *p++; /* simple code is just the - * value */ - } else { - r.e = (uch) e[*p - s]; /* non-simple--look up - * in lists */ - r.v.n = d[*p++ - s]; - } - - /* fill code-like entries with r */ - f = 1 << (k - w); - for (j = i >> w; j < z; j += f) - q[j] = r; - - /* backwards increment the k-bit code i */ - for (j = 1 << (k - 1); i & j; j >>= 1) - i ^= j; - i ^= j; - - /* backup over finished tables */ - while ((i & ((1 << w) - 1)) != x[h]) - w -= l[--h]; /* don't need to update q */ - } - } - - /* return actual size of base table */ - *m = l[0]; - - /* Return true (1) if we were given an incomplete table */ - return y != 0 && g != 1; -} - -/* - * Arguments: - * t table to free - */ -static int -huft_free(struct inflate *glbl, struct huft *t) -/* Free the malloc'ed tables built by huft_build(), which makes a linked - list of the tables it made, with the links in a dummy first entry of - each table. */ -{ - struct huft *p, *q; - - /* Go through linked list, freeing from the malloced (t[-1]) address. */ - p = t; - while (p != (struct huft *) NULL) { - q = (--p)->v.t; - free(p, M_GZIP); - p = q; - } - return 0; -} - -/* inflate (decompress) the codes in a deflated (compressed) block. - Return an error code or zero if it all goes ok. */ -/* - * Arguments: - * tl, td literal/length and distance decoder tables - * bl, bd number of bits decoded by tl[] and td[] - */ -static int -inflate_codes(struct inflate *glbl, struct huft *tl, struct huft*td, int bl, - int bd) -{ - unsigned e; /* table entry flag/number of extra bits */ - unsigned n, d; /* length and index for copy */ - unsigned w; /* current window position */ - struct huft *t; /* pointer to table entry */ - unsigned ml, md; /* masks for bl and bd bits */ - ulg b; /* bit buffer */ - unsigned k; /* number of bits in bit buffer */ - - /* make local copies of globals */ - b = glbl->gz_bb; /* initialize bit buffer */ - k = glbl->gz_bk; - w = glbl->gz_wp; /* initialize window position */ - - /* inflate the coded data */ - ml = mask[bl]; /* precompute masks for speed */ - md = mask[bd]; - while (1) { /* do until end of block */ - NEEDBITS(glbl, (unsigned) bl) - if ((e = (t = tl + ((unsigned) b & ml))->e) > 16) - do { - if (e == 99) - return 1; - DUMPBITS(t->b) - e -= 16; - NEEDBITS(glbl, e) - } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16); - DUMPBITS(t->b) - if (e == 16) { /* then it's a literal */ - glbl->gz_slide[w++] = (uch) t->v.n; - if (w == GZ_WSIZE) { - FLUSH(glbl, w); - w = 0; - } - } else { /* it's an EOB or a length */ - /* exit if end of block */ - if (e == 15) - break; - - /* get length of block to copy */ - NEEDBITS(glbl, e) - n = t->v.n + ((unsigned) b & mask[e]); - DUMPBITS(e); - - /* decode distance of block to copy */ - NEEDBITS(glbl, (unsigned) bd) - if ((e = (t = td + ((unsigned) b & md))->e) > 16) - do { - if (e == 99) - return 1; - DUMPBITS(t->b) - e -= 16; - NEEDBITS(glbl, e) - } while ((e = (t = t->v.t + ((unsigned) b & mask[e]))->e) > 16); - DUMPBITS(t->b) - NEEDBITS(glbl, e) - d = w - t->v.n - ((unsigned) b & mask[e]); - DUMPBITS(e) - /* do the copy */ - do { - n -= (e = (e = GZ_WSIZE - ((d &= GZ_WSIZE - 1) > w ? d : w)) > n ? n : e); -#ifndef NOMEMCPY - if (w - d >= e) { /* (this test assumes - * unsigned comparison) */ - memcpy(glbl->gz_slide + w, glbl->gz_slide + d, e); - w += e; - d += e; - } else /* do it slow to avoid memcpy() - * overlap */ -#endif /* !NOMEMCPY */ - do { - glbl->gz_slide[w++] = glbl->gz_slide[d++]; - } while (--e); - if (w == GZ_WSIZE) { - FLUSH(glbl, w); - w = 0; - } - } while (n); - } - } - - /* restore the globals from the locals */ - glbl->gz_wp = w; /* restore global window pointer */ - glbl->gz_bb = b; /* restore global bit buffer */ - glbl->gz_bk = k; - - /* done */ - return 0; -} - -/* "decompress" an inflated type 0 (stored) block. */ -static int -inflate_stored(struct inflate *glbl) -{ - unsigned n; /* number of bytes in block */ - unsigned w; /* current window position */ - ulg b; /* bit buffer */ - unsigned k; /* number of bits in bit buffer */ - - /* make local copies of globals */ - b = glbl->gz_bb; /* initialize bit buffer */ - k = glbl->gz_bk; - w = glbl->gz_wp; /* initialize window position */ - - /* go to byte boundary */ - n = k & 7; - DUMPBITS(n); - - /* get the length and its complement */ - NEEDBITS(glbl, 16) - n = ((unsigned) b & 0xffff); - DUMPBITS(16) - NEEDBITS(glbl, 16) - if (n != (unsigned) ((~b) & 0xffff)) - return 1; /* error in compressed data */ - DUMPBITS(16) - /* read and output the compressed data */ - while (n--) { - NEEDBITS(glbl, 8) - glbl->gz_slide[w++] = (uch) b; - if (w == GZ_WSIZE) { - FLUSH(glbl, w); - w = 0; - } - DUMPBITS(8) - } - - /* restore the globals from the locals */ - glbl->gz_wp = w; /* restore global window pointer */ - glbl->gz_bb = b; /* restore global bit buffer */ - glbl->gz_bk = k; - return 0; -} - -/* decompress an inflated type 1 (fixed Huffman codes) block. We should - either replace this with a custom decoder, or at least precompute the - Huffman tables. */ -static int -inflate_fixed(struct inflate *glbl) -{ - /* if first time, set up tables for fixed blocks */ - if (glbl->gz_fixed_tl == (struct huft *) NULL) { - int i; /* temporary variable */ - static unsigned l[288]; /* length list for huft_build */ - - /* literal table */ - for (i = 0; i < 144; i++) - l[i] = 8; - for (; i < 256; i++) - l[i] = 9; - for (; i < 280; i++) - l[i] = 7; - for (; i < 288; i++) /* make a complete, but wrong code - * set */ - l[i] = 8; - glbl->gz_fixed_bl = 7; - if ((i = huft_build(glbl, l, 288, 257, cplens, cplext, - &glbl->gz_fixed_tl, &glbl->gz_fixed_bl)) != 0) { - glbl->gz_fixed_tl = (struct huft *) NULL; - return i; - } - /* distance table */ - for (i = 0; i < 30; i++) /* make an incomplete code - * set */ - l[i] = 5; - glbl->gz_fixed_bd = 5; - if ((i = huft_build(glbl, l, 30, 0, cpdist, cpdext, - &glbl->gz_fixed_td, &glbl->gz_fixed_bd)) > 1) { - huft_free(glbl, glbl->gz_fixed_tl); - glbl->gz_fixed_tl = (struct huft *) NULL; - return i; - } - } - /* decompress until an end-of-block code */ - return inflate_codes(glbl, glbl->gz_fixed_tl, glbl->gz_fixed_td, glbl->gz_fixed_bl, glbl->gz_fixed_bd) != 0; -} - -/* decompress an inflated type 2 (dynamic Huffman codes) block. */ -static int -inflate_dynamic(struct inflate *glbl) -{ - int i; /* temporary variables */ - unsigned j; - unsigned l; /* last length */ - unsigned m; /* mask for bit lengths table */ - unsigned n; /* number of lengths to get */ - struct huft *tl; /* literal/length code table */ - struct huft *td; /* distance code table */ - int bl; /* lookup bits for tl */ - int bd; /* lookup bits for td */ - unsigned nb; /* number of bit length codes */ - unsigned nl; /* number of literal/length codes */ - unsigned nd; /* number of distance codes */ -#ifdef PKZIP_BUG_WORKAROUND - unsigned ll[288 + 32]; /* literal/length and distance code - * lengths */ -#else - unsigned ll[286 + 30]; /* literal/length and distance code - * lengths */ -#endif - ulg b; /* bit buffer */ - unsigned k; /* number of bits in bit buffer */ - - /* make local bit buffer */ - b = glbl->gz_bb; - k = glbl->gz_bk; - - /* read in table lengths */ - NEEDBITS(glbl, 5) - nl = 257 + ((unsigned) b & 0x1f); /* number of - * literal/length codes */ - DUMPBITS(5) - NEEDBITS(glbl, 5) - nd = 1 + ((unsigned) b & 0x1f); /* number of distance codes */ - DUMPBITS(5) - NEEDBITS(glbl, 4) - nb = 4 + ((unsigned) b & 0xf); /* number of bit length codes */ - DUMPBITS(4) -#ifdef PKZIP_BUG_WORKAROUND - if (nl > 288 || nd > 32) -#else - if (nl > 286 || nd > 30) -#endif - return 1; /* bad lengths */ - /* read in bit-length-code lengths */ - for (j = 0; j < nb; j++) { - NEEDBITS(glbl, 3) - ll[border[j]] = (unsigned) b & 7; - DUMPBITS(3) - } - for (; j < 19; j++) - ll[border[j]] = 0; - - /* build decoding table for trees--single level, 7 bit lookup */ - bl = 7; - if ((i = huft_build(glbl, ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) { - if (i == 1) - huft_free(glbl, tl); - return i; /* incomplete code set */ - } - /* read in literal and distance code lengths */ - n = nl + nd; - m = mask[bl]; - i = l = 0; - while ((unsigned) i < n) { - NEEDBITS(glbl, (unsigned) bl) - j = (td = tl + ((unsigned) b & m))->b; - DUMPBITS(j) - j = td->v.n; - if (j < 16) /* length of code in bits (0..15) */ - ll[i++] = l = j; /* save last length in l */ - else if (j == 16) { /* repeat last length 3 to 6 times */ - NEEDBITS(glbl, 2) - j = 3 + ((unsigned) b & 3); - DUMPBITS(2) - if ((unsigned) i + j > n) - return 1; - while (j--) - ll[i++] = l; - } else if (j == 17) { /* 3 to 10 zero length codes */ - NEEDBITS(glbl, 3) - j = 3 + ((unsigned) b & 7); - DUMPBITS(3) - if ((unsigned) i + j > n) - return 1; - while (j--) - ll[i++] = 0; - l = 0; - } else { /* j == 18: 11 to 138 zero length codes */ - NEEDBITS(glbl, 7) - j = 11 + ((unsigned) b & 0x7f); - DUMPBITS(7) - if ((unsigned) i + j > n) - return 1; - while (j--) - ll[i++] = 0; - l = 0; - } - } - - /* free decoding table for trees */ - huft_free(glbl, tl); - - /* restore the global bit buffer */ - glbl->gz_bb = b; - glbl->gz_bk = k; - - /* build the decoding tables for literal/length and distance codes */ - bl = lbits; - i = huft_build(glbl, ll, nl, 257, cplens, cplext, &tl, &bl); - if (i != 0) { - if (i == 1 && !qflag) { - FPRINTF("(incomplete l-tree) "); - huft_free(glbl, tl); - } - return i; /* incomplete code set */ - } - bd = dbits; - i = huft_build(glbl, ll + nl, nd, 0, cpdist, cpdext, &td, &bd); - if (i != 0) { - if (i == 1 && !qflag) { - FPRINTF("(incomplete d-tree) "); -#ifdef PKZIP_BUG_WORKAROUND - i = 0; - } -#else - huft_free(glbl, td); - } - huft_free(glbl, tl); - return i; /* incomplete code set */ -#endif - } - /* decompress until an end-of-block code */ - if (inflate_codes(glbl, tl, td, bl, bd)) - return 1; - - /* free the decoding tables, return */ - huft_free(glbl, tl); - huft_free(glbl, td); - return 0; -} - -/* decompress an inflated block */ -/* - * Arguments: - * e last block flag - */ -static int -inflate_block(struct inflate *glbl, int *e) -{ - unsigned t; /* block type */ - ulg b; /* bit buffer */ - unsigned k; /* number of bits in bit buffer */ - - /* make local bit buffer */ - b = glbl->gz_bb; - k = glbl->gz_bk; - - /* read in last block bit */ - NEEDBITS(glbl, 1) - * e = (int) b & 1; - DUMPBITS(1) - /* read in block type */ - NEEDBITS(glbl, 2) - t = (unsigned) b & 3; - DUMPBITS(2) - /* restore the global bit buffer */ - glbl->gz_bb = b; - glbl->gz_bk = k; - - /* inflate that block type */ - if (t == 2) - return inflate_dynamic(glbl); - if (t == 0) - return inflate_stored(glbl); - if (t == 1) - return inflate_fixed(glbl); - /* bad block type */ - return 2; -} - - - -/* decompress an inflated entry */ -static int -xinflate(struct inflate *glbl) -{ - int e; /* last block flag */ - int r; /* result code */ - unsigned h; /* maximum struct huft's malloc'ed */ - - glbl->gz_fixed_tl = (struct huft *) NULL; - - /* initialize window, bit buffer */ - glbl->gz_wp = 0; - glbl->gz_bk = 0; - glbl->gz_bb = 0; - - /* decompress until the last block */ - h = 0; - do { - glbl->gz_hufts = 0; - if ((r = inflate_block(glbl, &e)) != 0) - return r; - if (glbl->gz_hufts > h) - h = glbl->gz_hufts; - } while (!e); - - /* flush out slide */ - FLUSH(glbl, glbl->gz_wp); - - /* return success */ - return 0; -} - -/* Nobody uses this - why not? */ -int -inflate(struct inflate *glbl) -{ - int i; -#ifdef _KERNEL - u_char *p = NULL; - - if (!glbl->gz_slide) - p = glbl->gz_slide = malloc(GZ_WSIZE, M_GZIP, M_WAITOK); -#endif - if (!glbl->gz_slide) -#ifdef _KERNEL - return(ENOMEM); -#else - return 3; /* kzip expects 3 */ -#endif - i = xinflate(glbl); - - if (glbl->gz_fixed_td != (struct huft *) NULL) { - huft_free(glbl, glbl->gz_fixed_td); - glbl->gz_fixed_td = (struct huft *) NULL; - } - if (glbl->gz_fixed_tl != (struct huft *) NULL) { - huft_free(glbl, glbl->gz_fixed_tl); - glbl->gz_fixed_tl = (struct huft *) NULL; - } -#ifdef _KERNEL - if (p == glbl->gz_slide) { - free(glbl->gz_slide, M_GZIP); - glbl->gz_slide = NULL; - } -#endif - return i; -} -/* ----------------------- END INFLATE.C */ Index: sys/sys/inflate.h =================================================================== --- sys/sys/inflate.h +++ /dev/null @@ -1,53 +0,0 @@ -/*- - * ---------------------------------------------------------------------------- - * "THE BEER-WARE LICENSE" (Revision 42): - * wrote this file. As long as you retain this notice you - * can do whatever you want with this stuff. If we meet some day, and you think - * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp - * ---------------------------------------------------------------------------- - * - * $FreeBSD$ - * - */ -#ifndef _SYS_INFLATE_H_ -#define _SYS_INFLATE_H_ - -#if defined(_KERNEL) || defined(KZIP) - -#define GZ_EOF -1 - -#define GZ_WSIZE 0x8000 - -/* - * Global variables used by inflate and friends. - * This structure is used in order to make inflate() reentrant. - */ -struct inflate { - /* Public part */ - - /* This pointer is passed along to the two functions below */ - void *gz_private; - - /* Fetch next character to be uncompressed */ - int (*gz_input)(void *); - - /* Dispose of uncompressed characters */ - int (*gz_output)(void *, u_char *, u_long); - - /* Private part */ - u_long gz_bb; /* bit buffer */ - unsigned gz_bk; /* bits in bit buffer */ - unsigned gz_hufts; /* track memory usage */ - struct huft *gz_fixed_tl; /* must init to NULL !! */ - struct huft *gz_fixed_td; - int gz_fixed_bl; - int gz_fixed_bd; - u_char *gz_slide; - unsigned gz_wp; -}; - -int inflate(struct inflate *); - -#endif /* _KERNEL || KZIP */ - -#endif /* ! _SYS_INFLATE_H_ */