The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/zstd/lib/compress/zstd_cwksp.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) Yann Collet, Facebook, Inc.
    3  * All rights reserved.
    4  *
    5  * This source code is licensed under both the BSD-style license (found in the
    6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
    7  * in the COPYING file in the root directory of this source tree).
    8  * You may select, at your option, one of the above-listed licenses.
    9  */
   10 
   11 #ifndef ZSTD_CWKSP_H
   12 #define ZSTD_CWKSP_H
   13 
   14 /*-*************************************
   15 *  Dependencies
   16 ***************************************/
   17 #include "../common/zstd_internal.h"
   18 
   19 #if defined (__cplusplus)
   20 extern "C" {
   21 #endif
   22 
   23 /*-*************************************
   24 *  Constants
   25 ***************************************/
   26 
   27 /* Since the workspace is effectively its own little malloc implementation /
   28  * arena, when we run under ASAN, we should similarly insert redzones between
   29  * each internal element of the workspace, so ASAN will catch overruns that
   30  * reach outside an object but that stay inside the workspace.
   31  *
   32  * This defines the size of that redzone.
   33  */
   34 #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
   35 #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
   36 #endif
   37 
   38 
   39 /* Set our tables and aligneds to align by 64 bytes */
   40 #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
   41 
   42 /*-*************************************
   43 *  Structures
   44 ***************************************/
   45 typedef enum {
   46     ZSTD_cwksp_alloc_objects,
   47     ZSTD_cwksp_alloc_buffers,
   48     ZSTD_cwksp_alloc_aligned
   49 } ZSTD_cwksp_alloc_phase_e;
   50 
   51 /**
   52  * Used to describe whether the workspace is statically allocated (and will not
   53  * necessarily ever be freed), or if it's dynamically allocated and we can
   54  * expect a well-formed caller to free this.
   55  */
   56 typedef enum {
   57     ZSTD_cwksp_dynamic_alloc,
   58     ZSTD_cwksp_static_alloc
   59 } ZSTD_cwksp_static_alloc_e;
   60 
   61 /**
   62  * Zstd fits all its internal datastructures into a single continuous buffer,
   63  * so that it only needs to perform a single OS allocation (or so that a buffer
   64  * can be provided to it and it can perform no allocations at all). This buffer
   65  * is called the workspace.
   66  *
   67  * Several optimizations complicate that process of allocating memory ranges
   68  * from this workspace for each internal datastructure:
   69  *
   70  * - These different internal datastructures have different setup requirements:
   71  *
   72  *   - The static objects need to be cleared once and can then be trivially
   73  *     reused for each compression.
   74  *
   75  *   - Various buffers don't need to be initialized at all--they are always
   76  *     written into before they're read.
   77  *
   78  *   - The matchstate tables have a unique requirement that they don't need
   79  *     their memory to be totally cleared, but they do need the memory to have
   80  *     some bound, i.e., a guarantee that all values in the memory they've been
   81  *     allocated is less than some maximum value (which is the starting value
   82  *     for the indices that they will then use for compression). When this
   83  *     guarantee is provided to them, they can use the memory without any setup
   84  *     work. When it can't, they have to clear the area.
   85  *
   86  * - These buffers also have different alignment requirements.
   87  *
   88  * - We would like to reuse the objects in the workspace for multiple
   89  *   compressions without having to perform any expensive reallocation or
   90  *   reinitialization work.
   91  *
   92  * - We would like to be able to efficiently reuse the workspace across
   93  *   multiple compressions **even when the compression parameters change** and
   94  *   we need to resize some of the objects (where possible).
   95  *
   96  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
   97  * abstraction was created. It works as follows:
   98  *
   99  * Workspace Layout:
  100  *
  101  * [                        ... workspace ...                         ]
  102  * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
  103  *
  104  * The various objects that live in the workspace are divided into the
  105  * following categories, and are allocated separately:
  106  *
  107  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
  108  *   so that literally everything fits in a single buffer. Note: if present,
  109  *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
  110  *   CDict}() rely on a pointer comparison to see whether one or two frees are
  111  *   required.
  112  *
  113  * - Fixed size objects: these are fixed-size, fixed-count objects that are
  114  *   nonetheless "dynamically" allocated in the workspace so that we can
  115  *   control how they're initialized separately from the broader ZSTD_CCtx.
  116  *   Examples:
  117  *   - Entropy Workspace
  118  *   - 2 x ZSTD_compressedBlockState_t
  119  *   - CDict dictionary contents
  120  *
  121  * - Tables: these are any of several different datastructures (hash tables,
  122  *   chain tables, binary trees) that all respect a common format: they are
  123  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
  124  *   Their sizes depend on the cparams. These tables are 64-byte aligned.
  125  *
  126  * - Aligned: these buffers are used for various purposes that require 4 byte
  127  *   alignment, but don't require any initialization before they're used. These
  128  *   buffers are each aligned to 64 bytes.
  129  *
  130  * - Buffers: these buffers are used for various purposes that don't require
  131  *   any alignment or initialization before they're used. This means they can
  132  *   be moved around at no cost for a new compression.
  133  *
  134  * Allocating Memory:
  135  *
  136  * The various types of objects must be allocated in order, so they can be
  137  * correctly packed into the workspace buffer. That order is:
  138  *
  139  * 1. Objects
  140  * 2. Buffers
  141  * 3. Aligned/Tables
  142  *
  143  * Attempts to reserve objects of different types out of order will fail.
  144  */
  145 typedef struct {
  146     void* workspace;
  147     void* workspaceEnd;
  148 
  149     void* objectEnd;
  150     void* tableEnd;
  151     void* tableValidEnd;
  152     void* allocStart;
  153 
  154     BYTE allocFailed;
  155     int workspaceOversizedDuration;
  156     ZSTD_cwksp_alloc_phase_e phase;
  157     ZSTD_cwksp_static_alloc_e isStatic;
  158 } ZSTD_cwksp;
  159 
  160 /*-*************************************
  161 *  Functions
  162 ***************************************/
  163 
  164 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
  165 
  166 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
  167     (void)ws;
  168     assert(ws->workspace <= ws->objectEnd);
  169     assert(ws->objectEnd <= ws->tableEnd);
  170     assert(ws->objectEnd <= ws->tableValidEnd);
  171     assert(ws->tableEnd <= ws->allocStart);
  172     assert(ws->tableValidEnd <= ws->allocStart);
  173     assert(ws->allocStart <= ws->workspaceEnd);
  174 }
  175 
  176 /**
  177  * Align must be a power of 2.
  178  */
  179 MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
  180     size_t const mask = align - 1;
  181     assert((align & mask) == 0);
  182     return (size + mask) & ~mask;
  183 }
  184 
  185 /**
  186  * Use this to determine how much space in the workspace we will consume to
  187  * allocate this object. (Normally it should be exactly the size of the object,
  188  * but under special conditions, like ASAN, where we pad each object, it might
  189  * be larger.)
  190  *
  191  * Since tables aren't currently redzoned, you don't need to call through this
  192  * to figure out how much space you need for the matchState tables. Everything
  193  * else is though.
  194  *
  195  * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
  196  */
  197 MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
  198     if (size == 0)
  199         return 0;
  200 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  201     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
  202 #else
  203     return size;
  204 #endif
  205 }
  206 
  207 /**
  208  * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
  209  * Used to determine the number of bytes required for a given "aligned".
  210  */
  211 MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
  212     return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
  213 }
  214 
  215 /**
  216  * Returns the amount of additional space the cwksp must allocate
  217  * for internal purposes (currently only alignment).
  218  */
  219 MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
  220     /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
  221      * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
  222      * to align the beginning of the aligned section.
  223      *
  224      * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
  225      * aligneds being sized in multiples of 64 bytes.
  226      */
  227     size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
  228     return slackSpace;
  229 }
  230 
  231 
  232 /**
  233  * Return the number of additional bytes required to align a pointer to the given number of bytes.
  234  * alignBytes must be a power of two.
  235  */
  236 MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
  237     size_t const alignBytesMask = alignBytes - 1;
  238     size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
  239     assert((alignBytes & alignBytesMask) == 0);
  240     assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
  241     return bytes;
  242 }
  243 
  244 /**
  245  * Internal function. Do not use directly.
  246  * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
  247  * which counts from the end of the wksp (as opposed to the object/table segment).
  248  *
  249  * Returns a pointer to the beginning of that space.
  250  */
  251 MEM_STATIC void*
  252 ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
  253 {
  254     void* const alloc = (BYTE*)ws->allocStart - bytes;
  255     void* const bottom = ws->tableEnd;
  256     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
  257         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
  258     ZSTD_cwksp_assert_internal_consistency(ws);
  259     assert(alloc >= bottom);
  260     if (alloc < bottom) {
  261         DEBUGLOG(4, "cwksp: alloc failed!");
  262         ws->allocFailed = 1;
  263         return NULL;
  264     }
  265     /* the area is reserved from the end of wksp.
  266      * If it overlaps with tableValidEnd, it voids guarantees on values' range */
  267     if (alloc < ws->tableValidEnd) {
  268         ws->tableValidEnd = alloc;
  269     }
  270     ws->allocStart = alloc;
  271     return alloc;
  272 }
  273 
  274 /**
  275  * Moves the cwksp to the next phase, and does any necessary allocations.
  276  * cwksp initialization must necessarily go through each phase in order.
  277  * Returns a 0 on success, or zstd error
  278  */
  279 MEM_STATIC size_t
  280 ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
  281 {
  282     assert(phase >= ws->phase);
  283     if (phase > ws->phase) {
  284         /* Going from allocating objects to allocating buffers */
  285         if (ws->phase < ZSTD_cwksp_alloc_buffers &&
  286                 phase >= ZSTD_cwksp_alloc_buffers) {
  287             ws->tableValidEnd = ws->objectEnd;
  288         }
  289 
  290         /* Going from allocating buffers to allocating aligneds/tables */
  291         if (ws->phase < ZSTD_cwksp_alloc_aligned &&
  292                 phase >= ZSTD_cwksp_alloc_aligned) {
  293             {   /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
  294                 size_t const bytesToAlign =
  295                     ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
  296                 DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
  297                 ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
  298                 RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
  299                                 memory_allocation, "aligned phase - alignment initial allocation failed!");
  300             }
  301             {   /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
  302                 void* const alloc = ws->objectEnd;
  303                 size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
  304                 void* const objectEnd = (BYTE*)alloc + bytesToAlign;
  305                 DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
  306                 RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
  307                                 "table phase - alignment initial allocation failed!");
  308                 ws->objectEnd = objectEnd;
  309                 ws->tableEnd = objectEnd;  /* table area starts being empty */
  310                 if (ws->tableValidEnd < ws->tableEnd) {
  311                     ws->tableValidEnd = ws->tableEnd;
  312         }   }   }
  313         ws->phase = phase;
  314         ZSTD_cwksp_assert_internal_consistency(ws);
  315     }
  316     return 0;
  317 }
  318 
  319 /**
  320  * Returns whether this object/buffer/etc was allocated in this workspace.
  321  */
  322 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
  323 {
  324     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
  325 }
  326 
  327 /**
  328  * Internal function. Do not use directly.
  329  */
  330 MEM_STATIC void*
  331 ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
  332 {
  333     void* alloc;
  334     if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
  335         return NULL;
  336     }
  337 
  338 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  339     /* over-reserve space */
  340     bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
  341 #endif
  342 
  343     alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
  344 
  345 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  346     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
  347      * either size. */
  348     if (alloc) {
  349         alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
  350         if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
  351             __asan_unpoison_memory_region(alloc, bytes);
  352         }
  353     }
  354 #endif
  355 
  356     return alloc;
  357 }
  358 
  359 /**
  360  * Reserves and returns unaligned memory.
  361  */
  362 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
  363 {
  364     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
  365 }
  366 
  367 /**
  368  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
  369  */
  370 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
  371 {
  372     void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
  373                                             ZSTD_cwksp_alloc_aligned);
  374     assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
  375     return ptr;
  376 }
  377 
  378 /**
  379  * Aligned on 64 bytes. These buffers have the special property that
  380  * their values remain constrained, allowing us to re-use them without
  381  * memset()-ing them.
  382  */
  383 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
  384 {
  385     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
  386     void* alloc;
  387     void* end;
  388     void* top;
  389 
  390     if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
  391         return NULL;
  392     }
  393     alloc = ws->tableEnd;
  394     end = (BYTE *)alloc + bytes;
  395     top = ws->allocStart;
  396 
  397     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
  398         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
  399     assert((bytes & (sizeof(U32)-1)) == 0);
  400     ZSTD_cwksp_assert_internal_consistency(ws);
  401     assert(end <= top);
  402     if (end > top) {
  403         DEBUGLOG(4, "cwksp: table alloc failed!");
  404         ws->allocFailed = 1;
  405         return NULL;
  406     }
  407     ws->tableEnd = end;
  408 
  409 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  410     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
  411         __asan_unpoison_memory_region(alloc, bytes);
  412     }
  413 #endif
  414 
  415     assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
  416     assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
  417     return alloc;
  418 }
  419 
  420 /**
  421  * Aligned on sizeof(void*).
  422  * Note : should happen only once, at workspace first initialization
  423  */
  424 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
  425 {
  426     size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
  427     void* alloc = ws->objectEnd;
  428     void* end = (BYTE*)alloc + roundedBytes;
  429 
  430 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  431     /* over-reserve space */
  432     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
  433 #endif
  434 
  435     DEBUGLOG(4,
  436         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
  437         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
  438     assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
  439     assert(bytes % ZSTD_ALIGNOF(void*) == 0);
  440     ZSTD_cwksp_assert_internal_consistency(ws);
  441     /* we must be in the first phase, no advance is possible */
  442     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
  443         DEBUGLOG(3, "cwksp: object alloc failed!");
  444         ws->allocFailed = 1;
  445         return NULL;
  446     }
  447     ws->objectEnd = end;
  448     ws->tableEnd = end;
  449     ws->tableValidEnd = end;
  450 
  451 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  452     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
  453      * either size. */
  454     alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
  455     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
  456         __asan_unpoison_memory_region(alloc, bytes);
  457     }
  458 #endif
  459 
  460     return alloc;
  461 }
  462 
  463 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
  464 {
  465     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
  466 
  467 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
  468     /* To validate that the table re-use logic is sound, and that we don't
  469      * access table space that we haven't cleaned, we re-"poison" the table
  470      * space every time we mark it dirty. */
  471     {
  472         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
  473         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
  474         __msan_poison(ws->objectEnd, size);
  475     }
  476 #endif
  477 
  478     assert(ws->tableValidEnd >= ws->objectEnd);
  479     assert(ws->tableValidEnd <= ws->allocStart);
  480     ws->tableValidEnd = ws->objectEnd;
  481     ZSTD_cwksp_assert_internal_consistency(ws);
  482 }
  483 
  484 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
  485     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
  486     assert(ws->tableValidEnd >= ws->objectEnd);
  487     assert(ws->tableValidEnd <= ws->allocStart);
  488     if (ws->tableValidEnd < ws->tableEnd) {
  489         ws->tableValidEnd = ws->tableEnd;
  490     }
  491     ZSTD_cwksp_assert_internal_consistency(ws);
  492 }
  493 
  494 /**
  495  * Zero the part of the allocated tables not already marked clean.
  496  */
  497 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
  498     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
  499     assert(ws->tableValidEnd >= ws->objectEnd);
  500     assert(ws->tableValidEnd <= ws->allocStart);
  501     if (ws->tableValidEnd < ws->tableEnd) {
  502         ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
  503     }
  504     ZSTD_cwksp_mark_tables_clean(ws);
  505 }
  506 
  507 /**
  508  * Invalidates table allocations.
  509  * All other allocations remain valid.
  510  */
  511 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
  512     DEBUGLOG(4, "cwksp: clearing tables!");
  513 
  514 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  515     /* We don't do this when the workspace is statically allocated, because
  516      * when that is the case, we have no capability to hook into the end of the
  517      * workspace's lifecycle to unpoison the memory.
  518      */
  519     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
  520         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
  521         __asan_poison_memory_region(ws->objectEnd, size);
  522     }
  523 #endif
  524 
  525     ws->tableEnd = ws->objectEnd;
  526     ZSTD_cwksp_assert_internal_consistency(ws);
  527 }
  528 
  529 /**
  530  * Invalidates all buffer, aligned, and table allocations.
  531  * Object allocations remain valid.
  532  */
  533 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
  534     DEBUGLOG(4, "cwksp: clearing!");
  535 
  536 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
  537     /* To validate that the context re-use logic is sound, and that we don't
  538      * access stuff that this compression hasn't initialized, we re-"poison"
  539      * the workspace (or at least the non-static, non-table parts of it)
  540      * every time we start a new compression. */
  541     {
  542         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
  543         __msan_poison(ws->tableValidEnd, size);
  544     }
  545 #endif
  546 
  547 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
  548     /* We don't do this when the workspace is statically allocated, because
  549      * when that is the case, we have no capability to hook into the end of the
  550      * workspace's lifecycle to unpoison the memory.
  551      */
  552     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
  553         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
  554         __asan_poison_memory_region(ws->objectEnd, size);
  555     }
  556 #endif
  557 
  558     ws->tableEnd = ws->objectEnd;
  559     ws->allocStart = ws->workspaceEnd;
  560     ws->allocFailed = 0;
  561     if (ws->phase > ZSTD_cwksp_alloc_buffers) {
  562         ws->phase = ZSTD_cwksp_alloc_buffers;
  563     }
  564     ZSTD_cwksp_assert_internal_consistency(ws);
  565 }
  566 
  567 /**
  568  * The provided workspace takes ownership of the buffer [start, start+size).
  569  * Any existing values in the workspace are ignored (the previously managed
  570  * buffer, if present, must be separately freed).
  571  */
  572 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
  573     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
  574     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
  575     ws->workspace = start;
  576     ws->workspaceEnd = (BYTE*)start + size;
  577     ws->objectEnd = ws->workspace;
  578     ws->tableValidEnd = ws->objectEnd;
  579     ws->phase = ZSTD_cwksp_alloc_objects;
  580     ws->isStatic = isStatic;
  581     ZSTD_cwksp_clear(ws);
  582     ws->workspaceOversizedDuration = 0;
  583     ZSTD_cwksp_assert_internal_consistency(ws);
  584 }
  585 
  586 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
  587     void* workspace = ZSTD_customMalloc(size, customMem);
  588     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
  589     RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
  590     ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
  591     return 0;
  592 }
  593 
  594 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
  595     void *ptr = ws->workspace;
  596     DEBUGLOG(4, "cwksp: freeing workspace");
  597     ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
  598     ZSTD_customFree(ptr, customMem);
  599 }
  600 
  601 /**
  602  * Moves the management of a workspace from one cwksp to another. The src cwksp
  603  * is left in an invalid state (src must be re-init()'ed before it's used again).
  604  */
  605 MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
  606     *dst = *src;
  607     ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
  608 }
  609 
  610 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
  611     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
  612 }
  613 
  614 MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
  615     return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
  616          + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
  617 }
  618 
  619 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
  620     return ws->allocFailed;
  621 }
  622 
  623 /*-*************************************
  624 *  Functions Checking Free Space
  625 ***************************************/
  626 
  627 /* ZSTD_alignmentSpaceWithinBounds() :
  628  * Returns if the estimated space needed for a wksp is within an acceptable limit of the
  629  * actual amount of space used.
  630  */
  631 MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
  632                                                         size_t const estimatedSpace, int resizedWorkspace) {
  633     if (resizedWorkspace) {
  634         /* Resized/newly allocated wksp should have exact bounds */
  635         return ZSTD_cwksp_used(ws) == estimatedSpace;
  636     } else {
  637         /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
  638          * than estimatedSpace. See the comments in zstd_cwksp.h for details.
  639          */
  640         return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
  641     }
  642 }
  643 
  644 
  645 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
  646     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
  647 }
  648 
  649 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
  650     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
  651 }
  652 
  653 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
  654     return ZSTD_cwksp_check_available(
  655         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
  656 }
  657 
  658 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
  659     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
  660         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
  661 }
  662 
  663 MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
  664         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
  665     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
  666         ws->workspaceOversizedDuration++;
  667     } else {
  668         ws->workspaceOversizedDuration = 0;
  669     }
  670 }
  671 
  672 #if defined (__cplusplus)
  673 }
  674 #endif
  675 
  676 #endif /* ZSTD_CWKSP_H */

Cache object: 667df758295aaa29a06d6c0f31d34e60


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.