Skip to content
Snippets Groups Projects
Commit b1b4e89a authored by Stefan Roese's avatar Stefan Roese Committed by Wolfgang Denk
Browse files

Add LZO decompressor support


This patch adds LZO decompression support to U-Boot. It is needed for
the upcoming UBIFS support, since UBIFS uses LZO as default compressor/
decompressor. Since we only support read-only in UBIFS, only the
decompressor is needed.

All this is copied with minor changes from the current Linux kernel
version (2.6.28-rc8).

This patch only implements this LZO decompressor support for PPC.
Other platforms using UBIFS will have to add the required
"include/asm/unaligned.h" as well. It should be fairly easy to copy this
from the Linux source tree as I have done it for PPC in this patch.

Signed-off-by: default avatarStefan Roese <sr@denx.de>
parent 68d7d651
No related branches found
No related tags found
No related merge requests found
...@@ -210,6 +210,7 @@ OBJS := $(addprefix $(obj),$(OBJS)) ...@@ -210,6 +210,7 @@ OBJS := $(addprefix $(obj),$(OBJS))
LIBS = lib_generic/libgeneric.a LIBS = lib_generic/libgeneric.a
LIBS += lib_generic/lzma/liblzma.a LIBS += lib_generic/lzma/liblzma.a
LIBS += lib_generic/lzo/liblzo.a
LIBS += $(shell if [ -f board/$(VENDOR)/common/Makefile ]; then echo \ LIBS += $(shell if [ -f board/$(VENDOR)/common/Makefile ]; then echo \
"board/$(VENDOR)/common/lib$(VENDOR).a"; fi) "board/$(VENDOR)/common/lib$(VENDOR).a"; fi)
LIBS += cpu/$(CPU)/lib$(CPU).a LIBS += cpu/$(CPU)/lib$(CPU).a
......
#ifndef _ASM_POWERPC_UNALIGNED_H
#define _ASM_POWERPC_UNALIGNED_H
#ifdef __KERNEL__
/*
* The PowerPC can do unaligned accesses itself in big endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */
#ifndef __LZO_H__
#define __LZO_H__
/*
* LZO Public Kernel Interface
* A mini subset of the LZO real-time data compression library
*
* Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
* Changed for kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
#define LZO1X_MEM_COMPRESS (16384 * sizeof(unsigned char *))
#define LZO1X_1_MEM_COMPRESS LZO1X_MEM_COMPRESS
#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
/* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len);
/*
* Return values (< 0 = Error)
*/
#define LZO_E_OK 0
#define LZO_E_ERROR (-1)
#define LZO_E_OUT_OF_MEMORY (-2)
#define LZO_E_NOT_COMPRESSIBLE (-3)
#define LZO_E_INPUT_OVERRUN (-4)
#define LZO_E_OUTPUT_OVERRUN (-5)
#define LZO_E_LOOKBEHIND_OVERRUN (-6)
#define LZO_E_EOF_NOT_FOUND (-7)
#define LZO_E_INPUT_NOT_CONSUMED (-8)
#define LZO_E_NOT_YET_IMPLEMENTED (-9)
#endif
#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
#define _LINUX_UNALIGNED_ACCESS_OK_H
#include <asm/byteorder.h>
static inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
static inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
static inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
static inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
static inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
static inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
static inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
static inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
#ifndef _LINUX_UNALIGNED_GENERIC_H
#define _LINUX_UNALIGNED_GENERIC_H
/* define __force to nothing in U-Boot */
#define __force
/*
* Cause a link-time error if we try an unaligned access other than
* 1,2,4 or 8 bytes long
*/
extern void __bad_unaligned_access_size(void);
#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __put_unaligned_le(val, ptr) ({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(u8 *)__gu_p = (__force u8)(val); \
break; \
case 2: \
put_unaligned_le16((__force u16)(val), __gu_p); \
break; \
case 4: \
put_unaligned_le32((__force u32)(val), __gu_p); \
break; \
case 8: \
put_unaligned_le64((__force u64)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; })
#define __put_unaligned_be(val, ptr) ({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(u8 *)__gu_p = (__force u8)(val); \
break; \
case 2: \
put_unaligned_be16((__force u16)(val), __gu_p); \
break; \
case 4: \
put_unaligned_be32((__force u32)(val), __gu_p); \
break; \
case 8: \
put_unaligned_be64((__force u64)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; })
#endif /* _LINUX_UNALIGNED_GENERIC_H */
#
# (C) Copyright 2008
# Stefan Roese, DENX Software Engineering, sr@denx.de.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
include $(TOPDIR)/config.mk
LIB = $(obj)liblzo.a
SOBJS =
COBJS-$(CONFIG_LZO) += lzo1x_decompress.o
COBJS = $(COBJS-y)
SRCS := $(SOBJS:.o=.S) $(COBJS:.o=.c)
OBJS := $(addprefix $(obj),$(SOBJS) $(COBJS))
$(LIB): $(obj).depend $(OBJS)
$(AR) $(ARFLAGS) $@ $(OBJS)
#########################################################################
# defines $(obj).depend target
include $(SRCTREE)/rules.mk
sinclude $(obj).depend
#########################################################################
/*
* LZO1X Decompressor from MiniLZO
*
* Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
* Changed for kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
#include <common.h>
#include <linux/lzo.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include "lzodefs.h"
#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
#define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x))
#define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op)
#define COPY4(dst, src) \
put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
{
const unsigned char * const ip_end = in + in_len;
unsigned char * const op_end = out + *out_len;
const unsigned char *ip = in, *m_pos;
unsigned char *op = out;
size_t t;
*out_len = 0;
if (*ip > 17) {
t = *ip++ - 17;
if (t < 4)
goto match_next;
if (HAVE_OP(t, op_end, op))
goto output_overrun;
if (HAVE_IP(t + 1, ip_end, ip))
goto input_overrun;
do {
*op++ = *ip++;
} while (--t > 0);
goto first_literal_run;
}
while ((ip < ip_end)) {
t = *ip++;
if (t >= 16)
goto match;
if (t == 0) {
if (HAVE_IP(1, ip_end, ip))
goto input_overrun;
while (*ip == 0) {
t += 255;
ip++;
if (HAVE_IP(1, ip_end, ip))
goto input_overrun;
}
t += 15 + *ip++;
}
if (HAVE_OP(t + 3, op_end, op))
goto output_overrun;
if (HAVE_IP(t + 4, ip_end, ip))
goto input_overrun;
COPY4(op, ip);
op += 4;
ip += 4;
if (--t > 0) {
if (t >= 4) {
do {
COPY4(op, ip);
op += 4;
ip += 4;
t -= 4;
} while (t >= 4);
if (t > 0) {
do {
*op++ = *ip++;
} while (--t > 0);
}
} else {
do {
*op++ = *ip++;
} while (--t > 0);
}
}
first_literal_run:
t = *ip++;
if (t >= 16)
goto match;
m_pos = op - (1 + M2_MAX_OFFSET);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
if (HAVE_LB(m_pos, out, op))
goto lookbehind_overrun;
if (HAVE_OP(3, op_end, op))
goto output_overrun;
*op++ = *m_pos++;
*op++ = *m_pos++;
*op++ = *m_pos;
goto match_done;
do {
match:
if (t >= 64) {
m_pos = op - 1;
m_pos -= (t >> 2) & 7;
m_pos -= *ip++ << 3;
t = (t >> 5) - 1;
if (HAVE_LB(m_pos, out, op))
goto lookbehind_overrun;
if (HAVE_OP(t + 3 - 1, op_end, op))
goto output_overrun;
goto copy_match;
} else if (t >= 32) {
t &= 31;
if (t == 0) {
if (HAVE_IP(1, ip_end, ip))
goto input_overrun;
while (*ip == 0) {
t += 255;
ip++;
if (HAVE_IP(1, ip_end, ip))
goto input_overrun;
}
t += 31 + *ip++;
}
m_pos = op - 1;
m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
} else if (t >= 16) {
m_pos = op;
m_pos -= (t & 8) << 11;
t &= 7;
if (t == 0) {
if (HAVE_IP(1, ip_end, ip))
goto input_overrun;
while (*ip == 0) {
t += 255;
ip++;
if (HAVE_IP(1, ip_end, ip))
goto input_overrun;
}
t += 7 + *ip++;
}
m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
if (m_pos == op)
goto eof_found;
m_pos -= 0x4000;
} else {
m_pos = op - 1;
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
if (HAVE_LB(m_pos, out, op))
goto lookbehind_overrun;
if (HAVE_OP(2, op_end, op))
goto output_overrun;
*op++ = *m_pos++;
*op++ = *m_pos;
goto match_done;
}
if (HAVE_LB(m_pos, out, op))
goto lookbehind_overrun;
if (HAVE_OP(t + 3 - 1, op_end, op))
goto output_overrun;
if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) {
COPY4(op, m_pos);
op += 4;
m_pos += 4;
t -= 4 - (3 - 1);
do {
COPY4(op, m_pos);
op += 4;
m_pos += 4;
t -= 4;
} while (t >= 4);
if (t > 0)
do {
*op++ = *m_pos++;
} while (--t > 0);
} else {
copy_match:
*op++ = *m_pos++;
*op++ = *m_pos++;
do {
*op++ = *m_pos++;
} while (--t > 0);
}
match_done:
t = ip[-2] & 3;
if (t == 0)
break;
match_next:
if (HAVE_OP(t, op_end, op))
goto output_overrun;
if (HAVE_IP(t + 1, ip_end, ip))
goto input_overrun;
*op++ = *ip++;
if (t > 1) {
*op++ = *ip++;
if (t > 2)
*op++ = *ip++;
}
t = *ip++;
} while (ip < ip_end);
}
*out_len = op - out;
return LZO_E_EOF_NOT_FOUND;
eof_found:
*out_len = op - out;
return (ip == ip_end ? LZO_E_OK :
(ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
input_overrun:
*out_len = op - out;
return LZO_E_INPUT_OVERRUN;
output_overrun:
*out_len = op - out;
return LZO_E_OUTPUT_OVERRUN;
lookbehind_overrun:
*out_len = op - out;
return LZO_E_LOOKBEHIND_OVERRUN;
}
/*
* lzodefs.h -- architecture, OS and compiler specific defines
*
* Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
* Changed for kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
#define LZO_VERSION 0x2020
#define LZO_VERSION_STRING "2.02"
#define LZO_VERSION_DATE "Oct 17 2005"
#define M1_MAX_OFFSET 0x0400
#define M2_MAX_OFFSET 0x0800
#define M3_MAX_OFFSET 0x4000
#define M4_MAX_OFFSET 0xbfff
#define M1_MIN_LEN 2
#define M1_MAX_LEN 2
#define M2_MIN_LEN 3
#define M2_MAX_LEN 8
#define M3_MIN_LEN 3
#define M3_MAX_LEN 33
#define M4_MIN_LEN 3
#define M4_MAX_LEN 9
#define M1_MARKER 0
#define M2_MARKER 64
#define M3_MARKER 32
#define M4_MARKER 16
#define D_BITS 14
#define D_MASK ((1u << D_BITS) - 1)
#define D_HIGH ((D_MASK >> 1) + 1)
#define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \
<< (s1)) ^ (p)[0])
#define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment