mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
98988fc8e9
Import upstream zstd v1.5.5 to expose upstream's QAT integration.
Import from upstream commit 58b3ef79 [0]. This is one commit before the
tag v1.5.5-kernel [1], which is signed with upstream's signing key. The
next patch in the series imports from v1.5.5-kernel, and is included in
the series, rather than just importing directly from v1.5.5-kernel,
because it is a non-trivial patch applied to improve the kernel's
decompression speed. This commit contains 3 backported patches on top of
v1.5.5: Two from the Linux copy of zstd, and one from upstream's `dev`
branch.
In addition to keeping the kernel's copy of zstd up to date, this update
was requested by Intel to expose upstream zstd's external match provider
API to the kernel, which allows QAT to accelerate the LZ match finding
stage.
This commit was generated by:
export ZSTD=/path/to/repo/zstd/
export LINUX=/path/to/repo/linux/
cd "$ZSTD/contrib/linux-kernel"
git checkout v1.5.5-kernel~
make import LINUX="$LINUX"
I tested and benchmarked this commit on x86-64 with gcc-13.2.1 on an
Intel i9-9900K by running my benchmark scripts that benchmark zstd's
performance in btrfs and squashfs compressed filesystems. This commit
improves compression speed, especially for higher compression levels,
and regresses decompression speed. But the decompression speed
regression is addressed by the next patch in the series.
Component, Level, C. time delta, size delta, D. time delta
Btrfs , 1, -1.9%, +0.0%, +9.5%
Btrfs , 3, -5.6%, +0.0%, +7.4%
Btrfs , 5, -4.9%, +0.0%, +5.0%
Btrfs , 7, -5.7%, +0.0%, +5.2%
Btrfs , 9, -5.7%, +0.0%, +4.0%
Squashfs , 1, N/A, 0.0%, +11.6%
I also boot tested with a zstd compressed kernel on i386 and aarch64.
Link: 58b3ef79eb
Link: https://github.com/facebook/zstd/tree/v1.5.5-kernel
Signed-off-by: Nick Terrell <terrelln@fb.com>
167 lines
6.7 KiB
C
167 lines
6.7 KiB
C
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
|
|
/* ******************************************************************
|
|
* hist : Histogram functions
|
|
* part of Finite State Entropy project
|
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
*
|
|
* You can contact the author at :
|
|
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
|
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
|
*
|
|
* This source code is licensed under both the BSD-style license (found in the
|
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
|
* in the COPYING file in the root directory of this source tree).
|
|
* You may select, at your option, one of the above-listed licenses.
|
|
****************************************************************** */
|
|
|
|
/* --- dependencies --- */
|
|
#include "../common/mem.h" /* U32, BYTE, etc. */
|
|
#include "../common/debug.h" /* assert, DEBUGLOG */
|
|
#include "../common/error_private.h" /* ERROR */
|
|
#include "hist.h"
|
|
|
|
|
|
/* --- Error management --- */
|
|
unsigned HIST_isError(size_t code) { return ERR_isError(code); }
|
|
|
|
/*-**************************************************************
|
|
* Histogram functions
|
|
****************************************************************/
|
|
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* src, size_t srcSize)
|
|
{
|
|
const BYTE* ip = (const BYTE*)src;
|
|
const BYTE* const end = ip + srcSize;
|
|
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
|
unsigned largestCount=0;
|
|
|
|
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
|
|
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
|
|
|
while (ip<end) {
|
|
assert(*ip <= maxSymbolValue);
|
|
count[*ip++]++;
|
|
}
|
|
|
|
while (!count[maxSymbolValue]) maxSymbolValue--;
|
|
*maxSymbolValuePtr = maxSymbolValue;
|
|
|
|
{ U32 s;
|
|
for (s=0; s<=maxSymbolValue; s++)
|
|
if (count[s] > largestCount) largestCount = count[s];
|
|
}
|
|
|
|
return largestCount;
|
|
}
|
|
|
|
typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
|
|
|
|
/* HIST_count_parallel_wksp() :
|
|
* store histogram into 4 intermediate tables, recombined at the end.
|
|
* this design makes better use of OoO cpus,
|
|
* and is noticeably faster when some values are heavily repeated.
|
|
* But it needs some additional workspace for intermediate tables.
|
|
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
|
|
* @return : largest histogram frequency,
|
|
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
|
|
static size_t HIST_count_parallel_wksp(
|
|
unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* source, size_t sourceSize,
|
|
HIST_checkInput_e check,
|
|
U32* const workSpace)
|
|
{
|
|
const BYTE* ip = (const BYTE*)source;
|
|
const BYTE* const iend = ip+sourceSize;
|
|
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
|
|
unsigned max=0;
|
|
U32* const Counting1 = workSpace;
|
|
U32* const Counting2 = Counting1 + 256;
|
|
U32* const Counting3 = Counting2 + 256;
|
|
U32* const Counting4 = Counting3 + 256;
|
|
|
|
/* safety checks */
|
|
assert(*maxSymbolValuePtr <= 255);
|
|
if (!sourceSize) {
|
|
ZSTD_memset(count, 0, countSize);
|
|
*maxSymbolValuePtr = 0;
|
|
return 0;
|
|
}
|
|
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
|
|
|
|
/* by stripes of 16 bytes */
|
|
{ U32 cached = MEM_read32(ip); ip += 4;
|
|
while (ip < iend-15) {
|
|
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
c = cached; cached = MEM_read32(ip); ip += 4;
|
|
Counting1[(BYTE) c ]++;
|
|
Counting2[(BYTE)(c>>8) ]++;
|
|
Counting3[(BYTE)(c>>16)]++;
|
|
Counting4[ c>>24 ]++;
|
|
}
|
|
ip-=4;
|
|
}
|
|
|
|
/* finish last symbols */
|
|
while (ip<iend) Counting1[*ip++]++;
|
|
|
|
{ U32 s;
|
|
for (s=0; s<256; s++) {
|
|
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
|
if (Counting1[s] > max) max = Counting1[s];
|
|
} }
|
|
|
|
{ unsigned maxSymbolValue = 255;
|
|
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
|
|
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
|
|
*maxSymbolValuePtr = maxSymbolValue;
|
|
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
|
|
}
|
|
return (size_t)max;
|
|
}
|
|
|
|
/* HIST_countFast_wksp() :
|
|
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
|
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
|
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
|
*/
|
|
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* source, size_t sourceSize,
|
|
void* workSpace, size_t workSpaceSize)
|
|
{
|
|
if (sourceSize < 1500) /* heuristic threshold */
|
|
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
|
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
|
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
|
|
}
|
|
|
|
/* HIST_count_wksp() :
|
|
* Same as HIST_count(), but using an externally provided scratch buffer.
|
|
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
|
|
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
|
const void* source, size_t sourceSize,
|
|
void* workSpace, size_t workSpaceSize)
|
|
{
|
|
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
|
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
|
if (*maxSymbolValuePtr < 255)
|
|
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
|
|
*maxSymbolValuePtr = 255;
|
|
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
|
|
}
|
|
|