feat: indie status page MVP -- FastAPI + SQLite
- 8 DB models (services, incidents, monitors, subscribers, etc.) - Full CRUD API for services, incidents, monitors - Public status page with live data - Incident detail page with timeline - API key authentication - Uptime monitoring scheduler - 13 tests passing - TECHNICAL_DESIGN.md with full spec
This commit is contained in:
commit
902133edd3
4655 changed files with 1342691 additions and 0 deletions
|
|
@ -0,0 +1,76 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_AVX
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute push (__attribute__((target("avx"))), apply_to=function)
|
||||
#else
|
||||
#pragma GCC target("avx")
|
||||
#endif
|
||||
#include <immintrin.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
|
||||
#ifndef BASE64_AVX_USE_ASM
|
||||
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
|
||||
# define BASE64_AVX_USE_ASM 1
|
||||
# else
|
||||
# define BASE64_AVX_USE_ASM 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include "../ssse3/dec_reshuffle.c"
|
||||
#include "../ssse3/dec_loop.c"
|
||||
|
||||
#if BASE64_AVX_USE_ASM
|
||||
# include "./enc_loop_asm.c"
|
||||
#else
|
||||
# include "../ssse3/enc_translate.c"
|
||||
# include "../ssse3/enc_reshuffle.c"
|
||||
# include "../ssse3/enc_loop.c"
|
||||
#endif
|
||||
|
||||
#endif // HAVE_AVX
|
||||
|
||||
void
|
||||
base64_stream_encode_avx BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_AVX
|
||||
#include "../generic/enc_head.c"
|
||||
|
||||
// For supported compilers, use a hand-optimized inline assembly
|
||||
// encoder. Otherwise fall back on the SSSE3 encoder, but compiled with
|
||||
// AVX flags to generate better optimized AVX code.
|
||||
|
||||
#if BASE64_AVX_USE_ASM
|
||||
enc_loop_avx(&s, &slen, &o, &olen);
|
||||
#else
|
||||
enc_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#endif
|
||||
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_avx BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_AVX
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute pop
|
||||
#endif
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,264 @@
|
|||
// Apologies in advance for combining the preprocessor with inline assembly,
|
||||
// two notoriously gnarly parts of C, but it was necessary to avoid a lot of
|
||||
// code repetition. The preprocessor is used to template large sections of
|
||||
// inline assembly that differ only in the registers used. If the code was
|
||||
// written out by hand, it would become very large and hard to audit.
|
||||
|
||||
// Generate a block of inline assembly that loads register R0 from memory. The
|
||||
// offset at which the register is loaded is set by the given round.
|
||||
#define LOAD(R0, ROUND) \
|
||||
"vlddqu ("#ROUND" * 12)(%[src]), %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that deinterleaves and shuffles register
|
||||
// R0 using preloaded constants. Outputs in R0 and R1.
|
||||
#define SHUF(R0, R1, R2) \
|
||||
"vpshufb %[lut0], %["R0"], %["R1"] \n\t" \
|
||||
"vpand %["R1"], %[msk0], %["R2"] \n\t" \
|
||||
"vpand %["R1"], %[msk2], %["R1"] \n\t" \
|
||||
"vpmulhuw %["R2"], %[msk1], %["R2"] \n\t" \
|
||||
"vpmullw %["R1"], %[msk3], %["R1"] \n\t" \
|
||||
"vpor %["R1"], %["R2"], %["R1"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that takes R0 and R1 and translates
|
||||
// their contents to the base64 alphabet, using preloaded constants.
|
||||
#define TRAN(R0, R1, R2) \
|
||||
"vpsubusb %[n51], %["R1"], %["R0"] \n\t" \
|
||||
"vpcmpgtb %[n25], %["R1"], %["R2"] \n\t" \
|
||||
"vpsubb %["R2"], %["R0"], %["R0"] \n\t" \
|
||||
"vpshufb %["R0"], %[lut1], %["R2"] \n\t" \
|
||||
"vpaddb %["R1"], %["R2"], %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that stores the given register R0 at an
|
||||
// offset set by the given round.
|
||||
#define STOR(R0, ROUND) \
|
||||
"vmovdqu %["R0"], ("#ROUND" * 16)(%[dst]) \n\t"
|
||||
|
||||
// Generate a block of inline assembly that generates a single self-contained
|
||||
// encoder round: fetch the data, process it, and store the result. Then update
|
||||
// the source and destination pointers.
|
||||
#define ROUND() \
|
||||
LOAD("a", 0) \
|
||||
SHUF("a", "b", "c") \
|
||||
TRAN("a", "b", "c") \
|
||||
STOR("a", 0) \
|
||||
"add $12, %[src] \n\t" \
|
||||
"add $16, %[dst] \n\t"
|
||||
|
||||
// Define a macro that initiates a three-way interleaved encoding round by
|
||||
// preloading registers a, b and c from memory.
|
||||
// The register graph shows which registers are in use during each step, and
|
||||
// is a visual aid for choosing registers for that step. Symbol index:
|
||||
//
|
||||
// + indicates that a register is loaded by that step.
|
||||
// | indicates that a register is in use and must not be touched.
|
||||
// - indicates that a register is decommissioned by that step.
|
||||
// x indicates that a register is used as a temporary by that step.
|
||||
// V indicates that a register is an input or output to the macro.
|
||||
//
|
||||
#define ROUND_3_INIT() /* a b c d e f */ \
|
||||
LOAD("a", 0) /* + */ \
|
||||
SHUF("a", "d", "e") /* | + x */ \
|
||||
LOAD("b", 1) /* | + | */ \
|
||||
TRAN("a", "d", "e") /* | | - x */ \
|
||||
LOAD("c", 2) /* V V V */
|
||||
|
||||
// Define a macro that translates, shuffles and stores the input registers A, B
|
||||
// and C, and preloads registers D, E and F for the next round.
|
||||
// This macro can be arbitrarily daisy-chained by feeding output registers D, E
|
||||
// and F back into the next round as input registers A, B and C. The macro
|
||||
// carefully interleaves memory operations with data operations for optimal
|
||||
// pipelined performance.
|
||||
|
||||
#define ROUND_3(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
|
||||
LOAD(D, (ROUND + 3)) /* V V V + */ \
|
||||
SHUF(B, E, F) /* | | | | + x */ \
|
||||
STOR(A, (ROUND + 0)) /* - | | | | */ \
|
||||
TRAN(B, E, F) /* | | | - x */ \
|
||||
LOAD(E, (ROUND + 4)) /* | | | + */ \
|
||||
SHUF(C, A, F) /* + | | | | x */ \
|
||||
STOR(B, (ROUND + 1)) /* | - | | | */ \
|
||||
TRAN(C, A, F) /* - | | | x */ \
|
||||
LOAD(F, (ROUND + 5)) /* | | | + */ \
|
||||
SHUF(D, A, B) /* + x | | | | */ \
|
||||
STOR(C, (ROUND + 2)) /* | - | | | */ \
|
||||
TRAN(D, A, B) /* - x V V V */
|
||||
|
||||
// Define a macro that terminates a ROUND_3 macro by taking pre-loaded
|
||||
// registers D, E and F, and translating, shuffling and storing them.
|
||||
#define ROUND_3_END(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
|
||||
SHUF(E, A, B) /* + x V V V */ \
|
||||
STOR(D, (ROUND + 3)) /* | - | | */ \
|
||||
TRAN(E, A, B) /* - x | | */ \
|
||||
SHUF(F, C, D) /* + x | | */ \
|
||||
STOR(E, (ROUND + 4)) /* | - | */ \
|
||||
TRAN(F, C, D) /* - x | */ \
|
||||
STOR(F, (ROUND + 5)) /* - */
|
||||
|
||||
// Define a type A round. Inputs are a, b, and c, outputs are d, e, and f.
|
||||
#define ROUND_3_A(ROUND) \
|
||||
ROUND_3(ROUND, "a", "b", "c", "d", "e", "f")
|
||||
|
||||
// Define a type B round. Inputs and outputs are swapped with regard to type A.
|
||||
#define ROUND_3_B(ROUND) \
|
||||
ROUND_3(ROUND, "d", "e", "f", "a", "b", "c")
|
||||
|
||||
// Terminating macro for a type A round.
|
||||
#define ROUND_3_A_LAST(ROUND) \
|
||||
ROUND_3_A(ROUND) \
|
||||
ROUND_3_END(ROUND, "a", "b", "c", "d", "e", "f")
|
||||
|
||||
// Terminating macro for a type B round.
|
||||
#define ROUND_3_B_LAST(ROUND) \
|
||||
ROUND_3_B(ROUND) \
|
||||
ROUND_3_END(ROUND, "d", "e", "f", "a", "b", "c")
|
||||
|
||||
// Suppress clang's warning that the literal string in the asm statement is
|
||||
// overlong (longer than the ISO-mandated minimum size of 4095 bytes for C99
|
||||
// compilers). It may be true, but the goal here is not C99 portability.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
||||
|
||||
static inline void
|
||||
enc_loop_avx (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
// For a clearer explanation of the algorithm used by this function,
|
||||
// please refer to the plain (not inline assembly) implementation. This
|
||||
// function follows the same basic logic.
|
||||
|
||||
if (*slen < 16) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 12 bytes at a time. Input is read in blocks of 16
|
||||
// bytes, so "reserve" four bytes from the input buffer to ensure that
|
||||
// we never read beyond the end of the input buffer.
|
||||
size_t rounds = (*slen - 4) / 12;
|
||||
|
||||
*slen -= rounds * 12; // 12 bytes consumed per round
|
||||
*olen += rounds * 16; // 16 bytes produced per round
|
||||
|
||||
// Number of times to go through the 36x loop.
|
||||
size_t loops = rounds / 36;
|
||||
|
||||
// Number of rounds remaining after the 36x loop.
|
||||
rounds %= 36;
|
||||
|
||||
// Lookup tables.
|
||||
const __m128i lut0 = _mm_set_epi8(
|
||||
10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1);
|
||||
|
||||
const __m128i lut1 = _mm_setr_epi8(
|
||||
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0);
|
||||
|
||||
// Temporary registers.
|
||||
__m128i a, b, c, d, e, f;
|
||||
|
||||
__asm__ volatile (
|
||||
|
||||
// If there are 36 rounds or more, enter a 36x unrolled loop of
|
||||
// interleaved encoding rounds. The rounds interleave memory
|
||||
// operations (load/store) with data operations (table lookups,
|
||||
// etc) to maximize pipeline throughput.
|
||||
" test %[loops], %[loops] \n\t"
|
||||
" jz 18f \n\t"
|
||||
" jmp 36f \n\t"
|
||||
" \n\t"
|
||||
".balign 64 \n\t"
|
||||
"36: " ROUND_3_INIT()
|
||||
" " ROUND_3_A( 0)
|
||||
" " ROUND_3_B( 3)
|
||||
" " ROUND_3_A( 6)
|
||||
" " ROUND_3_B( 9)
|
||||
" " ROUND_3_A(12)
|
||||
" " ROUND_3_B(15)
|
||||
" " ROUND_3_A(18)
|
||||
" " ROUND_3_B(21)
|
||||
" " ROUND_3_A(24)
|
||||
" " ROUND_3_B(27)
|
||||
" " ROUND_3_A_LAST(30)
|
||||
" add $(12 * 36), %[src] \n\t"
|
||||
" add $(16 * 36), %[dst] \n\t"
|
||||
" dec %[loops] \n\t"
|
||||
" jnz 36b \n\t"
|
||||
|
||||
// Enter an 18x unrolled loop for rounds of 18 or more.
|
||||
"18: cmp $18, %[rounds] \n\t"
|
||||
" jl 9f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A(0)
|
||||
" " ROUND_3_B(3)
|
||||
" " ROUND_3_A(6)
|
||||
" " ROUND_3_B(9)
|
||||
" " ROUND_3_A_LAST(12)
|
||||
" sub $18, %[rounds] \n\t"
|
||||
" add $(12 * 18), %[src] \n\t"
|
||||
" add $(16 * 18), %[dst] \n\t"
|
||||
|
||||
// Enter a 9x unrolled loop for rounds of 9 or more.
|
||||
"9: cmp $9, %[rounds] \n\t"
|
||||
" jl 6f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A(0)
|
||||
" " ROUND_3_B_LAST(3)
|
||||
" sub $9, %[rounds] \n\t"
|
||||
" add $(12 * 9), %[src] \n\t"
|
||||
" add $(16 * 9), %[dst] \n\t"
|
||||
|
||||
// Enter a 6x unrolled loop for rounds of 6 or more.
|
||||
"6: cmp $6, %[rounds] \n\t"
|
||||
" jl 55f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A_LAST(0)
|
||||
" sub $6, %[rounds] \n\t"
|
||||
" add $(12 * 6), %[src] \n\t"
|
||||
" add $(16 * 6), %[dst] \n\t"
|
||||
|
||||
// Dispatch the remaining rounds 0..5.
|
||||
"55: cmp $3, %[rounds] \n\t"
|
||||
" jg 45f \n\t"
|
||||
" je 3f \n\t"
|
||||
" cmp $1, %[rounds] \n\t"
|
||||
" jg 2f \n\t"
|
||||
" je 1f \n\t"
|
||||
" jmp 0f \n\t"
|
||||
|
||||
"45: cmp $4, %[rounds] \n\t"
|
||||
" je 4f \n\t"
|
||||
|
||||
// Block of non-interlaced encoding rounds, which can each
|
||||
// individually be jumped to. Rounds fall through to the next.
|
||||
"5: " ROUND()
|
||||
"4: " ROUND()
|
||||
"3: " ROUND()
|
||||
"2: " ROUND()
|
||||
"1: " ROUND()
|
||||
"0: \n\t"
|
||||
|
||||
// Outputs (modified).
|
||||
: [rounds] "+r" (rounds),
|
||||
[loops] "+r" (loops),
|
||||
[src] "+r" (*s),
|
||||
[dst] "+r" (*o),
|
||||
[a] "=&x" (a),
|
||||
[b] "=&x" (b),
|
||||
[c] "=&x" (c),
|
||||
[d] "=&x" (d),
|
||||
[e] "=&x" (e),
|
||||
[f] "=&x" (f)
|
||||
|
||||
// Inputs (not modified).
|
||||
: [lut0] "x" (lut0),
|
||||
[lut1] "x" (lut1),
|
||||
[msk0] "x" (_mm_set1_epi32(0x0FC0FC00)),
|
||||
[msk1] "x" (_mm_set1_epi32(0x04000040)),
|
||||
[msk2] "x" (_mm_set1_epi32(0x003F03F0)),
|
||||
[msk3] "x" (_mm_set1_epi32(0x01000010)),
|
||||
[n51] "x" (_mm_set1_epi8(51)),
|
||||
[n25] "x" (_mm_set1_epi8(25))
|
||||
|
||||
// Clobbers.
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_AVX2
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute push (__attribute__((target("avx2"))), apply_to=function)
|
||||
#else
|
||||
#pragma GCC target("avx2")
|
||||
#endif
|
||||
#include <immintrin.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
|
||||
#ifndef BASE64_AVX2_USE_ASM
|
||||
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
|
||||
# define BASE64_AVX2_USE_ASM 1
|
||||
# else
|
||||
# define BASE64_AVX2_USE_ASM 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include "./dec_reshuffle.c"
|
||||
#include "./dec_loop.c"
|
||||
|
||||
#if BASE64_AVX2_USE_ASM
|
||||
# include "./enc_loop_asm.c"
|
||||
#else
|
||||
# include "./enc_translate.c"
|
||||
# include "./enc_reshuffle.c"
|
||||
# include "./enc_loop.c"
|
||||
#endif
|
||||
|
||||
#endif // HAVE_AVX2
|
||||
|
||||
void
|
||||
base64_stream_encode_avx2 BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_AVX2
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_avx2(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_avx2 BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_AVX2
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_avx2(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute pop
|
||||
#endif
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
static BASE64_FORCE_INLINE int
|
||||
dec_loop_avx2_inner (const uint8_t **s, uint8_t **o, size_t *rounds)
|
||||
{
|
||||
const __m256i lut_lo = _mm256_setr_epi8(
|
||||
0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
|
||||
0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A,
|
||||
0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
|
||||
0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A);
|
||||
|
||||
const __m256i lut_hi = _mm256_setr_epi8(
|
||||
0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08,
|
||||
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
|
||||
0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08,
|
||||
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10);
|
||||
|
||||
const __m256i lut_roll = _mm256_setr_epi8(
|
||||
0, 16, 19, 4, -65, -65, -71, -71,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 16, 19, 4, -65, -65, -71, -71,
|
||||
0, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
const __m256i mask_2F = _mm256_set1_epi8(0x2F);
|
||||
|
||||
// Load input:
|
||||
__m256i str = _mm256_loadu_si256((__m256i *) *s);
|
||||
|
||||
// See the SSSE3 decoder for an explanation of the algorithm.
|
||||
const __m256i hi_nibbles = _mm256_and_si256(_mm256_srli_epi32(str, 4), mask_2F);
|
||||
const __m256i lo_nibbles = _mm256_and_si256(str, mask_2F);
|
||||
const __m256i hi = _mm256_shuffle_epi8(lut_hi, hi_nibbles);
|
||||
const __m256i lo = _mm256_shuffle_epi8(lut_lo, lo_nibbles);
|
||||
|
||||
if (!_mm256_testz_si256(lo, hi)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const __m256i eq_2F = _mm256_cmpeq_epi8(str, mask_2F);
|
||||
const __m256i roll = _mm256_shuffle_epi8(lut_roll, _mm256_add_epi8(eq_2F, hi_nibbles));
|
||||
|
||||
// Now simply add the delta values to the input:
|
||||
str = _mm256_add_epi8(str, roll);
|
||||
|
||||
// Reshuffle the input to packed 12-byte output format:
|
||||
str = dec_reshuffle(str);
|
||||
|
||||
// Store the output:
|
||||
_mm256_storeu_si256((__m256i *) *o, str);
|
||||
|
||||
*s += 32;
|
||||
*o += 24;
|
||||
*rounds -= 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dec_loop_avx2 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 45) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 32 bytes per round. Because 8 extra zero bytes are
|
||||
// written after the output, ensure that there will be at least 13
|
||||
// bytes of input data left to cover the gap. (11 data bytes and up to
|
||||
// two end-of-string markers.)
|
||||
size_t rounds = (*slen - 13) / 32;
|
||||
|
||||
*slen -= rounds * 32; // 32 bytes consumed per round
|
||||
*olen += rounds * 24; // 24 bytes produced per round
|
||||
|
||||
do {
|
||||
if (rounds >= 8) {
|
||||
if (dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
if (dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
if (dec_loop_avx2_inner(s, o, &rounds) &&
|
||||
dec_loop_avx2_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
dec_loop_avx2_inner(s, o, &rounds);
|
||||
break;
|
||||
|
||||
} while (rounds > 0);
|
||||
|
||||
// Adjust for any rounds that were skipped:
|
||||
*slen += rounds * 32;
|
||||
*olen -= rounds * 24;
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
static BASE64_FORCE_INLINE __m256i
|
||||
dec_reshuffle (const __m256i in)
|
||||
{
|
||||
// in, lower lane, bits, upper case are most significant bits, lower
|
||||
// case are least significant bits:
|
||||
// 00llllll 00kkkkLL 00jjKKKK 00JJJJJJ
|
||||
// 00iiiiii 00hhhhII 00ggHHHH 00GGGGGG
|
||||
// 00ffffff 00eeeeFF 00ddEEEE 00DDDDDD
|
||||
// 00cccccc 00bbbbCC 00aaBBBB 00AAAAAA
|
||||
|
||||
const __m256i merge_ab_and_bc = _mm256_maddubs_epi16(in, _mm256_set1_epi32(0x01400140));
|
||||
// 0000kkkk LLllllll 0000JJJJ JJjjKKKK
|
||||
// 0000hhhh IIiiiiii 0000GGGG GGggHHHH
|
||||
// 0000eeee FFffffff 0000DDDD DDddEEEE
|
||||
// 0000bbbb CCcccccc 0000AAAA AAaaBBBB
|
||||
|
||||
__m256i out = _mm256_madd_epi16(merge_ab_and_bc, _mm256_set1_epi32(0x00011000));
|
||||
// 00000000 JJJJJJjj KKKKkkkk LLllllll
|
||||
// 00000000 GGGGGGgg HHHHhhhh IIiiiiii
|
||||
// 00000000 DDDDDDdd EEEEeeee FFffffff
|
||||
// 00000000 AAAAAAaa BBBBbbbb CCcccccc
|
||||
|
||||
// Pack bytes together in each lane:
|
||||
out = _mm256_shuffle_epi8(out, _mm256_setr_epi8(
|
||||
2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1,
|
||||
2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1));
|
||||
// 00000000 00000000 00000000 00000000
|
||||
// LLllllll KKKKkkkk JJJJJJjj IIiiiiii
|
||||
// HHHHhhhh GGGGGGgg FFffffff EEEEeeee
|
||||
// DDDDDDdd CCcccccc BBBBbbbb AAAAAAaa
|
||||
|
||||
// Pack lanes:
|
||||
return _mm256_permutevar8x32_epi32(out, _mm256_setr_epi32(0, 1, 2, 4, 5, 6, -1, -1));
|
||||
}
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_avx2_inner_first (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
// First load is done at s - 0 to not get a segfault:
|
||||
__m256i src = _mm256_loadu_si256((__m256i *) *s);
|
||||
|
||||
// Shift by 4 bytes, as required by enc_reshuffle:
|
||||
src = _mm256_permutevar8x32_epi32(src, _mm256_setr_epi32(0, 0, 1, 2, 3, 4, 5, 6));
|
||||
|
||||
// Reshuffle, translate, store:
|
||||
src = enc_reshuffle(src);
|
||||
src = enc_translate(src);
|
||||
_mm256_storeu_si256((__m256i *) *o, src);
|
||||
|
||||
// Subsequent loads will be done at s - 4, set pointer for next round:
|
||||
*s += 20;
|
||||
*o += 32;
|
||||
}
|
||||
|
||||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_avx2_inner (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
// Load input:
|
||||
__m256i src = _mm256_loadu_si256((__m256i *) *s);
|
||||
|
||||
// Reshuffle, translate, store:
|
||||
src = enc_reshuffle(src);
|
||||
src = enc_translate(src);
|
||||
_mm256_storeu_si256((__m256i *) *o, src);
|
||||
|
||||
*s += 24;
|
||||
*o += 32;
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_avx2 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 24 bytes at a time. Because blocks are loaded 32
|
||||
// bytes at a time an offset of -4, ensure that there will be at least
|
||||
// 4 remaining bytes after the last round, so that the final read will
|
||||
// not pass beyond the bounds of the input buffer:
|
||||
size_t rounds = (*slen - 4) / 24;
|
||||
|
||||
*slen -= rounds * 24; // 24 bytes consumed per round
|
||||
*olen += rounds * 32; // 32 bytes produced per round
|
||||
|
||||
// The first loop iteration requires special handling to ensure that
|
||||
// the read, which is done at an offset, does not underflow the buffer:
|
||||
enc_loop_avx2_inner_first(s, o);
|
||||
rounds--;
|
||||
|
||||
while (rounds > 0) {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_avx2_inner(s, o);
|
||||
enc_loop_avx2_inner(s, o);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_avx2_inner(s, o);
|
||||
break;
|
||||
}
|
||||
|
||||
// Add the offset back:
|
||||
*s += 4;
|
||||
}
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
// Apologies in advance for combining the preprocessor with inline assembly,
|
||||
// two notoriously gnarly parts of C, but it was necessary to avoid a lot of
|
||||
// code repetition. The preprocessor is used to template large sections of
|
||||
// inline assembly that differ only in the registers used. If the code was
|
||||
// written out by hand, it would become very large and hard to audit.
|
||||
|
||||
// Generate a block of inline assembly that loads register R0 from memory. The
|
||||
// offset at which the register is loaded is set by the given round and a
|
||||
// constant offset.
|
||||
#define LOAD(R0, ROUND, OFFSET) \
|
||||
"vlddqu ("#ROUND" * 24 + "#OFFSET")(%[src]), %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that deinterleaves and shuffles register
|
||||
// R0 using preloaded constants. Outputs in R0 and R1.
|
||||
#define SHUF(R0, R1, R2) \
|
||||
"vpshufb %[lut0], %["R0"], %["R1"] \n\t" \
|
||||
"vpand %["R1"], %[msk0], %["R2"] \n\t" \
|
||||
"vpand %["R1"], %[msk2], %["R1"] \n\t" \
|
||||
"vpmulhuw %["R2"], %[msk1], %["R2"] \n\t" \
|
||||
"vpmullw %["R1"], %[msk3], %["R1"] \n\t" \
|
||||
"vpor %["R1"], %["R2"], %["R1"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that takes R0 and R1 and translates
|
||||
// their contents to the base64 alphabet, using preloaded constants.
|
||||
#define TRAN(R0, R1, R2) \
|
||||
"vpsubusb %[n51], %["R1"], %["R0"] \n\t" \
|
||||
"vpcmpgtb %[n25], %["R1"], %["R2"] \n\t" \
|
||||
"vpsubb %["R2"], %["R0"], %["R0"] \n\t" \
|
||||
"vpshufb %["R0"], %[lut1], %["R2"] \n\t" \
|
||||
"vpaddb %["R1"], %["R2"], %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that stores the given register R0 at an
|
||||
// offset set by the given round.
|
||||
#define STOR(R0, ROUND) \
|
||||
"vmovdqu %["R0"], ("#ROUND" * 32)(%[dst]) \n\t"
|
||||
|
||||
// Generate a block of inline assembly that generates a single self-contained
|
||||
// encoder round: fetch the data, process it, and store the result. Then update
|
||||
// the source and destination pointers.
|
||||
#define ROUND() \
|
||||
LOAD("a", 0, -4) \
|
||||
SHUF("a", "b", "c") \
|
||||
TRAN("a", "b", "c") \
|
||||
STOR("a", 0) \
|
||||
"add $24, %[src] \n\t" \
|
||||
"add $32, %[dst] \n\t"
|
||||
|
||||
// Define a macro that initiates a three-way interleaved encoding round by
|
||||
// preloading registers a, b and c from memory.
|
||||
// The register graph shows which registers are in use during each step, and
|
||||
// is a visual aid for choosing registers for that step. Symbol index:
|
||||
//
|
||||
// + indicates that a register is loaded by that step.
|
||||
// | indicates that a register is in use and must not be touched.
|
||||
// - indicates that a register is decommissioned by that step.
|
||||
// x indicates that a register is used as a temporary by that step.
|
||||
// V indicates that a register is an input or output to the macro.
|
||||
//
|
||||
#define ROUND_3_INIT() /* a b c d e f */ \
|
||||
LOAD("a", 0, -4) /* + */ \
|
||||
SHUF("a", "d", "e") /* | + x */ \
|
||||
LOAD("b", 1, -4) /* | + | */ \
|
||||
TRAN("a", "d", "e") /* | | - x */ \
|
||||
LOAD("c", 2, -4) /* V V V */
|
||||
|
||||
// Define a macro that translates, shuffles and stores the input registers A, B
|
||||
// and C, and preloads registers D, E and F for the next round.
|
||||
// This macro can be arbitrarily daisy-chained by feeding output registers D, E
|
||||
// and F back into the next round as input registers A, B and C. The macro
|
||||
// carefully interleaves memory operations with data operations for optimal
|
||||
// pipelined performance.
|
||||
|
||||
#define ROUND_3(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
|
||||
LOAD(D, (ROUND + 3), -4) /* V V V + */ \
|
||||
SHUF(B, E, F) /* | | | | + x */ \
|
||||
STOR(A, (ROUND + 0)) /* - | | | | */ \
|
||||
TRAN(B, E, F) /* | | | - x */ \
|
||||
LOAD(E, (ROUND + 4), -4) /* | | | + */ \
|
||||
SHUF(C, A, F) /* + | | | | x */ \
|
||||
STOR(B, (ROUND + 1)) /* | - | | | */ \
|
||||
TRAN(C, A, F) /* - | | | x */ \
|
||||
LOAD(F, (ROUND + 5), -4) /* | | | + */ \
|
||||
SHUF(D, A, B) /* + x | | | | */ \
|
||||
STOR(C, (ROUND + 2)) /* | - | | | */ \
|
||||
TRAN(D, A, B) /* - x V V V */
|
||||
|
||||
// Define a macro that terminates a ROUND_3 macro by taking pre-loaded
|
||||
// registers D, E and F, and translating, shuffling and storing them.
|
||||
#define ROUND_3_END(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
|
||||
SHUF(E, A, B) /* + x V V V */ \
|
||||
STOR(D, (ROUND + 3)) /* | - | | */ \
|
||||
TRAN(E, A, B) /* - x | | */ \
|
||||
SHUF(F, C, D) /* + x | | */ \
|
||||
STOR(E, (ROUND + 4)) /* | - | */ \
|
||||
TRAN(F, C, D) /* - x | */ \
|
||||
STOR(F, (ROUND + 5)) /* - */
|
||||
|
||||
// Define a type A round. Inputs are a, b, and c, outputs are d, e, and f.
|
||||
#define ROUND_3_A(ROUND) \
|
||||
ROUND_3(ROUND, "a", "b", "c", "d", "e", "f")
|
||||
|
||||
// Define a type B round. Inputs and outputs are swapped with regard to type A.
|
||||
#define ROUND_3_B(ROUND) \
|
||||
ROUND_3(ROUND, "d", "e", "f", "a", "b", "c")
|
||||
|
||||
// Terminating macro for a type A round.
|
||||
#define ROUND_3_A_LAST(ROUND) \
|
||||
ROUND_3_A(ROUND) \
|
||||
ROUND_3_END(ROUND, "a", "b", "c", "d", "e", "f")
|
||||
|
||||
// Terminating macro for a type B round.
|
||||
#define ROUND_3_B_LAST(ROUND) \
|
||||
ROUND_3_B(ROUND) \
|
||||
ROUND_3_END(ROUND, "d", "e", "f", "a", "b", "c")
|
||||
|
||||
// Suppress clang's warning that the literal string in the asm statement is
|
||||
// overlong (longer than the ISO-mandated minimum size of 4095 bytes for C99
|
||||
// compilers). It may be true, but the goal here is not C99 portability.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
||||
|
||||
static inline void
|
||||
enc_loop_avx2 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
// For a clearer explanation of the algorithm used by this function,
|
||||
// please refer to the plain (not inline assembly) implementation. This
|
||||
// function follows the same basic logic.
|
||||
|
||||
if (*slen < 32) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 24 bytes at a time. Because blocks are loaded 32
|
||||
// bytes at a time an offset of -4, ensure that there will be at least
|
||||
// 4 remaining bytes after the last round, so that the final read will
|
||||
// not pass beyond the bounds of the input buffer.
|
||||
size_t rounds = (*slen - 4) / 24;
|
||||
|
||||
*slen -= rounds * 24; // 24 bytes consumed per round
|
||||
*olen += rounds * 32; // 32 bytes produced per round
|
||||
|
||||
// Pre-decrement the number of rounds to get the number of rounds
|
||||
// *after* the first round, which is handled as a special case.
|
||||
rounds--;
|
||||
|
||||
// Number of times to go through the 36x loop.
|
||||
size_t loops = rounds / 36;
|
||||
|
||||
// Number of rounds remaining after the 36x loop.
|
||||
rounds %= 36;
|
||||
|
||||
// Lookup tables.
|
||||
const __m256i lut0 = _mm256_set_epi8(
|
||||
10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1,
|
||||
14, 15, 13, 14, 11, 12, 10, 11, 8, 9, 7, 8, 5, 6, 4, 5);
|
||||
|
||||
const __m256i lut1 = _mm256_setr_epi8(
|
||||
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0,
|
||||
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0);
|
||||
|
||||
// Temporary registers.
|
||||
__m256i a, b, c, d, e;
|
||||
|
||||
// Temporary register f doubles as the shift mask for the first round.
|
||||
__m256i f = _mm256_setr_epi32(0, 0, 1, 2, 3, 4, 5, 6);
|
||||
|
||||
__asm__ volatile (
|
||||
|
||||
// The first loop iteration requires special handling to ensure
|
||||
// that the read, which is normally done at an offset of -4,
|
||||
// does not underflow the buffer. Load the buffer at an offset
|
||||
// of 0 and permute the input to achieve the same effect.
|
||||
LOAD("a", 0, 0)
|
||||
"vpermd %[a], %[f], %[a] \n\t"
|
||||
|
||||
// Perform the standard shuffling and translation steps.
|
||||
SHUF("a", "b", "c")
|
||||
TRAN("a", "b", "c")
|
||||
|
||||
// Store the result and increment the source and dest pointers.
|
||||
"vmovdqu %[a], (%[dst]) \n\t"
|
||||
"add $24, %[src] \n\t"
|
||||
"add $32, %[dst] \n\t"
|
||||
|
||||
// If there are 36 rounds or more, enter a 36x unrolled loop of
|
||||
// interleaved encoding rounds. The rounds interleave memory
|
||||
// operations (load/store) with data operations (table lookups,
|
||||
// etc) to maximize pipeline throughput.
|
||||
" test %[loops], %[loops] \n\t"
|
||||
" jz 18f \n\t"
|
||||
" jmp 36f \n\t"
|
||||
" \n\t"
|
||||
".balign 64 \n\t"
|
||||
"36: " ROUND_3_INIT()
|
||||
" " ROUND_3_A( 0)
|
||||
" " ROUND_3_B( 3)
|
||||
" " ROUND_3_A( 6)
|
||||
" " ROUND_3_B( 9)
|
||||
" " ROUND_3_A(12)
|
||||
" " ROUND_3_B(15)
|
||||
" " ROUND_3_A(18)
|
||||
" " ROUND_3_B(21)
|
||||
" " ROUND_3_A(24)
|
||||
" " ROUND_3_B(27)
|
||||
" " ROUND_3_A_LAST(30)
|
||||
" add $(24 * 36), %[src] \n\t"
|
||||
" add $(32 * 36), %[dst] \n\t"
|
||||
" dec %[loops] \n\t"
|
||||
" jnz 36b \n\t"
|
||||
|
||||
// Enter an 18x unrolled loop for rounds of 18 or more.
|
||||
"18: cmp $18, %[rounds] \n\t"
|
||||
" jl 9f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A(0)
|
||||
" " ROUND_3_B(3)
|
||||
" " ROUND_3_A(6)
|
||||
" " ROUND_3_B(9)
|
||||
" " ROUND_3_A_LAST(12)
|
||||
" sub $18, %[rounds] \n\t"
|
||||
" add $(24 * 18), %[src] \n\t"
|
||||
" add $(32 * 18), %[dst] \n\t"
|
||||
|
||||
// Enter a 9x unrolled loop for rounds of 9 or more.
|
||||
"9: cmp $9, %[rounds] \n\t"
|
||||
" jl 6f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A(0)
|
||||
" " ROUND_3_B_LAST(3)
|
||||
" sub $9, %[rounds] \n\t"
|
||||
" add $(24 * 9), %[src] \n\t"
|
||||
" add $(32 * 9), %[dst] \n\t"
|
||||
|
||||
// Enter a 6x unrolled loop for rounds of 6 or more.
|
||||
"6: cmp $6, %[rounds] \n\t"
|
||||
" jl 55f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A_LAST(0)
|
||||
" sub $6, %[rounds] \n\t"
|
||||
" add $(24 * 6), %[src] \n\t"
|
||||
" add $(32 * 6), %[dst] \n\t"
|
||||
|
||||
// Dispatch the remaining rounds 0..5.
|
||||
"55: cmp $3, %[rounds] \n\t"
|
||||
" jg 45f \n\t"
|
||||
" je 3f \n\t"
|
||||
" cmp $1, %[rounds] \n\t"
|
||||
" jg 2f \n\t"
|
||||
" je 1f \n\t"
|
||||
" jmp 0f \n\t"
|
||||
|
||||
"45: cmp $4, %[rounds] \n\t"
|
||||
" je 4f \n\t"
|
||||
|
||||
// Block of non-interlaced encoding rounds, which can each
|
||||
// individually be jumped to. Rounds fall through to the next.
|
||||
"5: " ROUND()
|
||||
"4: " ROUND()
|
||||
"3: " ROUND()
|
||||
"2: " ROUND()
|
||||
"1: " ROUND()
|
||||
"0: \n\t"
|
||||
|
||||
// Outputs (modified).
|
||||
: [rounds] "+r" (rounds),
|
||||
[loops] "+r" (loops),
|
||||
[src] "+r" (*s),
|
||||
[dst] "+r" (*o),
|
||||
[a] "=&x" (a),
|
||||
[b] "=&x" (b),
|
||||
[c] "=&x" (c),
|
||||
[d] "=&x" (d),
|
||||
[e] "=&x" (e),
|
||||
[f] "+x" (f)
|
||||
|
||||
// Inputs (not modified).
|
||||
: [lut0] "x" (lut0),
|
||||
[lut1] "x" (lut1),
|
||||
[msk0] "x" (_mm256_set1_epi32(0x0FC0FC00)),
|
||||
[msk1] "x" (_mm256_set1_epi32(0x04000040)),
|
||||
[msk2] "x" (_mm256_set1_epi32(0x003F03F0)),
|
||||
[msk3] "x" (_mm256_set1_epi32(0x01000010)),
|
||||
[n51] "x" (_mm256_set1_epi8(51)),
|
||||
[n25] "x" (_mm256_set1_epi8(25))
|
||||
|
||||
// Clobbers.
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
static BASE64_FORCE_INLINE __m256i
|
||||
enc_reshuffle (const __m256i input)
|
||||
{
|
||||
// Translation of the SSSE3 reshuffling algorithm to AVX2. This one
|
||||
// works with shifted (4 bytes) input in order to be able to work
|
||||
// efficiently in the two 128-bit lanes.
|
||||
|
||||
// Input, bytes MSB to LSB:
|
||||
// 0 0 0 0 x w v u t s r q p o n m
|
||||
// l k j i h g f e d c b a 0 0 0 0
|
||||
|
||||
const __m256i in = _mm256_shuffle_epi8(input, _mm256_set_epi8(
|
||||
10, 11, 9, 10,
|
||||
7, 8, 6, 7,
|
||||
4, 5, 3, 4,
|
||||
1, 2, 0, 1,
|
||||
|
||||
14, 15, 13, 14,
|
||||
11, 12, 10, 11,
|
||||
8, 9, 7, 8,
|
||||
5, 6, 4, 5));
|
||||
// in, bytes MSB to LSB:
|
||||
// w x v w
|
||||
// t u s t
|
||||
// q r p q
|
||||
// n o m n
|
||||
// k l j k
|
||||
// h i g h
|
||||
// e f d e
|
||||
// b c a b
|
||||
|
||||
const __m256i t0 = _mm256_and_si256(in, _mm256_set1_epi32(0x0FC0FC00));
|
||||
// bits, upper case are most significant bits, lower case are least
|
||||
// significant bits.
|
||||
// 0000wwww XX000000 VVVVVV00 00000000
|
||||
// 0000tttt UU000000 SSSSSS00 00000000
|
||||
// 0000qqqq RR000000 PPPPPP00 00000000
|
||||
// 0000nnnn OO000000 MMMMMM00 00000000
|
||||
// 0000kkkk LL000000 JJJJJJ00 00000000
|
||||
// 0000hhhh II000000 GGGGGG00 00000000
|
||||
// 0000eeee FF000000 DDDDDD00 00000000
|
||||
// 0000bbbb CC000000 AAAAAA00 00000000
|
||||
|
||||
const __m256i t1 = _mm256_mulhi_epu16(t0, _mm256_set1_epi32(0x04000040));
|
||||
// 00000000 00wwwwXX 00000000 00VVVVVV
|
||||
// 00000000 00ttttUU 00000000 00SSSSSS
|
||||
// 00000000 00qqqqRR 00000000 00PPPPPP
|
||||
// 00000000 00nnnnOO 00000000 00MMMMMM
|
||||
// 00000000 00kkkkLL 00000000 00JJJJJJ
|
||||
// 00000000 00hhhhII 00000000 00GGGGGG
|
||||
// 00000000 00eeeeFF 00000000 00DDDDDD
|
||||
// 00000000 00bbbbCC 00000000 00AAAAAA
|
||||
|
||||
const __m256i t2 = _mm256_and_si256(in, _mm256_set1_epi32(0x003F03F0));
|
||||
// 00000000 00xxxxxx 000000vv WWWW0000
|
||||
// 00000000 00uuuuuu 000000ss TTTT0000
|
||||
// 00000000 00rrrrrr 000000pp QQQQ0000
|
||||
// 00000000 00oooooo 000000mm NNNN0000
|
||||
// 00000000 00llllll 000000jj KKKK0000
|
||||
// 00000000 00iiiiii 000000gg HHHH0000
|
||||
// 00000000 00ffffff 000000dd EEEE0000
|
||||
// 00000000 00cccccc 000000aa BBBB0000
|
||||
|
||||
const __m256i t3 = _mm256_mullo_epi16(t2, _mm256_set1_epi32(0x01000010));
|
||||
// 00xxxxxx 00000000 00vvWWWW 00000000
|
||||
// 00uuuuuu 00000000 00ssTTTT 00000000
|
||||
// 00rrrrrr 00000000 00ppQQQQ 00000000
|
||||
// 00oooooo 00000000 00mmNNNN 00000000
|
||||
// 00llllll 00000000 00jjKKKK 00000000
|
||||
// 00iiiiii 00000000 00ggHHHH 00000000
|
||||
// 00ffffff 00000000 00ddEEEE 00000000
|
||||
// 00cccccc 00000000 00aaBBBB 00000000
|
||||
|
||||
return _mm256_or_si256(t1, t3);
|
||||
// 00xxxxxx 00wwwwXX 00vvWWWW 00VVVVVV
|
||||
// 00uuuuuu 00ttttUU 00ssTTTT 00SSSSSS
|
||||
// 00rrrrrr 00qqqqRR 00ppQQQQ 00PPPPPP
|
||||
// 00oooooo 00nnnnOO 00mmNNNN 00MMMMMM
|
||||
// 00llllll 00kkkkLL 00jjKKKK 00JJJJJJ
|
||||
// 00iiiiii 00hhhhII 00ggHHHH 00GGGGGG
|
||||
// 00ffffff 00eeeeFF 00ddEEEE 00DDDDDD
|
||||
// 00cccccc 00bbbbCC 00aaBBBB 00AAAAAA
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
static BASE64_FORCE_INLINE __m256i
|
||||
enc_translate (const __m256i in)
|
||||
{
|
||||
// A lookup table containing the absolute offsets for all ranges:
|
||||
const __m256i lut = _mm256_setr_epi8(
|
||||
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0,
|
||||
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0);
|
||||
|
||||
// Translate values 0..63 to the Base64 alphabet. There are five sets:
|
||||
// # From To Abs Index Characters
|
||||
// 0 [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
// 1 [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz
|
||||
// 2 [52..61] [48..57] -4 [2..11] 0123456789
|
||||
// 3 [62] [43] -19 12 +
|
||||
// 4 [63] [47] -16 13 /
|
||||
|
||||
// Create LUT indices from the input. The index for range #0 is right,
|
||||
// others are 1 less than expected:
|
||||
__m256i indices = _mm256_subs_epu8(in, _mm256_set1_epi8(51));
|
||||
|
||||
// mask is 0xFF (-1) for range #[1..4] and 0x00 for range #0:
|
||||
const __m256i mask = _mm256_cmpgt_epi8(in, _mm256_set1_epi8(25));
|
||||
|
||||
// Subtract -1, so add 1 to indices for range #[1..4]. All indices are
|
||||
// now correct:
|
||||
indices = _mm256_sub_epi8(indices, mask);
|
||||
|
||||
// Add offsets to input values:
|
||||
return _mm256_add_epi8(in, _mm256_shuffle_epi8(lut, indices));
|
||||
}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_AVX512
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute push (__attribute__((target("avx512vbmi"))), apply_to=function)
|
||||
#else
|
||||
#pragma GCC target("avx512vbmi")
|
||||
#endif
|
||||
#include <immintrin.h>
|
||||
|
||||
#include "../avx2/dec_reshuffle.c"
|
||||
#include "../avx2/dec_loop.c"
|
||||
#include "enc_reshuffle_translate.c"
|
||||
#include "enc_loop.c"
|
||||
|
||||
#endif // HAVE_AVX512
|
||||
|
||||
void
|
||||
base64_stream_encode_avx512 BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_AVX512
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_avx512(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Reuse AVX2 decoding. Not supporting AVX512 at present
|
||||
int
|
||||
base64_stream_decode_avx512 BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_AVX512
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_avx2(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute pop
|
||||
#endif
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_avx512_inner (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
// Load input.
|
||||
__m512i src = _mm512_loadu_si512((__m512i *) *s);
|
||||
|
||||
// Reshuffle, translate, store.
|
||||
src = enc_reshuffle_translate(src);
|
||||
_mm512_storeu_si512((__m512i *) *o, src);
|
||||
|
||||
*s += 48;
|
||||
*o += 64;
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_avx512 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 64) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 48 bytes at a time. Because blocks are loaded 64
|
||||
// bytes at a time, ensure that there will be at least 24 remaining
|
||||
// bytes after the last round, so that the final read will not pass
|
||||
// beyond the bounds of the input buffer.
|
||||
size_t rounds = (*slen - 24) / 48;
|
||||
|
||||
*slen -= rounds * 48; // 48 bytes consumed per round
|
||||
*olen += rounds * 64; // 64 bytes produced per round
|
||||
|
||||
while (rounds > 0) {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_avx512_inner(s, o);
|
||||
enc_loop_avx512_inner(s, o);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_avx512_inner(s, o);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
// AVX512 algorithm is based on permutevar and multishift. The code is based on
|
||||
// https://github.com/WojciechMula/base64simd which is under BSD-2 license.
|
||||
|
||||
static BASE64_FORCE_INLINE __m512i
|
||||
enc_reshuffle_translate (const __m512i input)
|
||||
{
|
||||
// 32-bit input
|
||||
// [ 0 0 0 0 0 0 0 0|c1 c0 d5 d4 d3 d2 d1 d0|
|
||||
// b3 b2 b1 b0 c5 c4 c3 c2|a5 a4 a3 a2 a1 a0 b5 b4]
|
||||
// output order [1, 2, 0, 1]
|
||||
// [b3 b2 b1 b0 c5 c4 c3 c2|c1 c0 d5 d4 d3 d2 d1 d0|
|
||||
// a5 a4 a3 a2 a1 a0 b5 b4|b3 b2 b1 b0 c3 c2 c1 c0]
|
||||
|
||||
const __m512i shuffle_input = _mm512_setr_epi32(0x01020001,
|
||||
0x04050304,
|
||||
0x07080607,
|
||||
0x0a0b090a,
|
||||
0x0d0e0c0d,
|
||||
0x10110f10,
|
||||
0x13141213,
|
||||
0x16171516,
|
||||
0x191a1819,
|
||||
0x1c1d1b1c,
|
||||
0x1f201e1f,
|
||||
0x22232122,
|
||||
0x25262425,
|
||||
0x28292728,
|
||||
0x2b2c2a2b,
|
||||
0x2e2f2d2e);
|
||||
|
||||
// Reorder bytes
|
||||
// [b3 b2 b1 b0 c5 c4 c3 c2|c1 c0 d5 d4 d3 d2 d1 d0|
|
||||
// a5 a4 a3 a2 a1 a0 b5 b4|b3 b2 b1 b0 c3 c2 c1 c0]
|
||||
const __m512i in = _mm512_permutexvar_epi8(shuffle_input, input);
|
||||
|
||||
// After multishift a single 32-bit lane has following layout
|
||||
// [c1 c0 d5 d4 d3 d2 d1 d0|b1 b0 c5 c4 c3 c2 c1 c0|
|
||||
// a1 a0 b5 b4 b3 b2 b1 b0|d1 d0 a5 a4 a3 a2 a1 a0]
|
||||
// (a = [10:17], b = [4:11], c = [22:27], d = [16:21])
|
||||
|
||||
// 48, 54, 36, 42, 16, 22, 4, 10
|
||||
const __m512i shifts = _mm512_set1_epi64(0x3036242a1016040alu);
|
||||
__m512i shuffled_in = _mm512_multishift_epi64_epi8(shifts, in);
|
||||
|
||||
// Translate immediately after reshuffled.
|
||||
const __m512i lookup = _mm512_loadu_si512(base64_table_enc_6bit);
|
||||
|
||||
// Translation 6-bit values to ASCII.
|
||||
return _mm512_permutexvar_epi8(shuffled_in, lookup);
|
||||
}
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
static BASE64_FORCE_INLINE int
|
||||
dec_loop_generic_32_inner (const uint8_t **s, uint8_t **o, size_t *rounds)
|
||||
{
|
||||
const uint32_t str
|
||||
= base64_table_dec_32bit_d0[(*s)[0]]
|
||||
| base64_table_dec_32bit_d1[(*s)[1]]
|
||||
| base64_table_dec_32bit_d2[(*s)[2]]
|
||||
| base64_table_dec_32bit_d3[(*s)[3]];
|
||||
|
||||
#if BASE64_LITTLE_ENDIAN
|
||||
|
||||
// LUTs for little-endian set MSB in case of invalid character:
|
||||
if (str & UINT32_C(0x80000000)) {
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// LUTs for big-endian set LSB in case of invalid character:
|
||||
if (str & UINT32_C(1)) {
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
// Store the output:
|
||||
memcpy(*o, &str, sizeof (str));
|
||||
|
||||
*s += 4;
|
||||
*o += 3;
|
||||
*rounds -= 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dec_loop_generic_32 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 8) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 4 bytes per round. Because one extra zero byte is
|
||||
// written after the output, ensure that there will be at least 4 bytes
|
||||
// of input data left to cover the gap. (Two data bytes and up to two
|
||||
// end-of-string markers.)
|
||||
size_t rounds = (*slen - 4) / 4;
|
||||
|
||||
*slen -= rounds * 4; // 4 bytes consumed per round
|
||||
*olen += rounds * 3; // 3 bytes produced per round
|
||||
|
||||
do {
|
||||
if (rounds >= 8) {
|
||||
if (dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
if (dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
if (dec_loop_generic_32_inner(s, o, &rounds) &&
|
||||
dec_loop_generic_32_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
dec_loop_generic_32_inner(s, o, &rounds);
|
||||
break;
|
||||
|
||||
} while (rounds > 0);
|
||||
|
||||
// Adjust for any rounds that were skipped:
|
||||
*slen += rounds * 4;
|
||||
*olen -= rounds * 3;
|
||||
}
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_generic_32_inner (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
uint32_t src;
|
||||
|
||||
// Load input:
|
||||
memcpy(&src, *s, sizeof (src));
|
||||
|
||||
// Reorder to 32-bit big-endian, if not already in that format. The
|
||||
// workset must be in big-endian, otherwise the shifted bits do not
|
||||
// carry over properly among adjacent bytes:
|
||||
src = BASE64_HTOBE32(src);
|
||||
|
||||
// Two indices for the 12-bit lookup table:
|
||||
const size_t index0 = (src >> 20) & 0xFFFU;
|
||||
const size_t index1 = (src >> 8) & 0xFFFU;
|
||||
|
||||
// Table lookup and store:
|
||||
memcpy(*o + 0, base64_table_enc_12bit + index0, 2);
|
||||
memcpy(*o + 2, base64_table_enc_12bit + index1, 2);
|
||||
|
||||
*s += 3;
|
||||
*o += 4;
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_generic_32 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 4) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 3 bytes at a time. Because blocks are loaded 4
|
||||
// bytes at a time, ensure that there will be at least one remaining
|
||||
// byte after the last round, so that the final read will not pass
|
||||
// beyond the bounds of the input buffer:
|
||||
size_t rounds = (*slen - 1) / 3;
|
||||
|
||||
*slen -= rounds * 3; // 3 bytes consumed per round
|
||||
*olen += rounds * 4; // 4 bytes produced per round
|
||||
|
||||
do {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_generic_32_inner(s, o);
|
||||
break;
|
||||
|
||||
} while (rounds > 0);
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_generic_64_inner (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
uint64_t src;
|
||||
|
||||
// Load input:
|
||||
memcpy(&src, *s, sizeof (src));
|
||||
|
||||
// Reorder to 64-bit big-endian, if not already in that format. The
|
||||
// workset must be in big-endian, otherwise the shifted bits do not
|
||||
// carry over properly among adjacent bytes:
|
||||
src = BASE64_HTOBE64(src);
|
||||
|
||||
// Four indices for the 12-bit lookup table:
|
||||
const size_t index0 = (src >> 52) & 0xFFFU;
|
||||
const size_t index1 = (src >> 40) & 0xFFFU;
|
||||
const size_t index2 = (src >> 28) & 0xFFFU;
|
||||
const size_t index3 = (src >> 16) & 0xFFFU;
|
||||
|
||||
// Table lookup and store:
|
||||
memcpy(*o + 0, base64_table_enc_12bit + index0, 2);
|
||||
memcpy(*o + 2, base64_table_enc_12bit + index1, 2);
|
||||
memcpy(*o + 4, base64_table_enc_12bit + index2, 2);
|
||||
memcpy(*o + 6, base64_table_enc_12bit + index3, 2);
|
||||
|
||||
*s += 6;
|
||||
*o += 8;
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_generic_64 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 8) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 6 bytes at a time. Because blocks are loaded 8
|
||||
// bytes at a time, ensure that there will be at least 2 remaining
|
||||
// bytes after the last round, so that the final read will not pass
|
||||
// beyond the bounds of the input buffer:
|
||||
size_t rounds = (*slen - 2) / 6;
|
||||
|
||||
*slen -= rounds * 6; // 6 bytes consumed per round
|
||||
*olen += rounds * 8; // 8 bytes produced per round
|
||||
|
||||
do {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_generic_64_inner(s, o);
|
||||
break;
|
||||
|
||||
} while (rounds > 0);
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if BASE64_WORDSIZE == 32
|
||||
# include "32/enc_loop.c"
|
||||
#elif BASE64_WORDSIZE == 64
|
||||
# include "64/enc_loop.c"
|
||||
#endif
|
||||
|
||||
#if BASE64_WORDSIZE >= 32
|
||||
# include "32/dec_loop.c"
|
||||
#endif
|
||||
|
||||
void
|
||||
base64_stream_encode_plain BASE64_ENC_PARAMS
|
||||
{
|
||||
#include "enc_head.c"
|
||||
#if BASE64_WORDSIZE == 32
|
||||
enc_loop_generic_32(&s, &slen, &o, &olen);
|
||||
#elif BASE64_WORDSIZE == 64
|
||||
enc_loop_generic_64(&s, &slen, &o, &olen);
|
||||
#endif
|
||||
#include "enc_tail.c"
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_plain BASE64_DEC_PARAMS
|
||||
{
|
||||
#include "dec_head.c"
|
||||
#if BASE64_WORDSIZE >= 32
|
||||
dec_loop_generic_32(&s, &slen, &o, &olen);
|
||||
#endif
|
||||
#include "dec_tail.c"
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
int ret = 0;
|
||||
const uint8_t *s = (const uint8_t *) src;
|
||||
uint8_t *o = (uint8_t *) out;
|
||||
uint8_t q;
|
||||
|
||||
// Use local temporaries to avoid cache thrashing:
|
||||
size_t olen = 0;
|
||||
size_t slen = srclen;
|
||||
struct base64_state st;
|
||||
st.eof = state->eof;
|
||||
st.bytes = state->bytes;
|
||||
st.carry = state->carry;
|
||||
|
||||
// If we previously saw an EOF or an invalid character, bail out:
|
||||
if (st.eof) {
|
||||
*outlen = 0;
|
||||
ret = 0;
|
||||
// If there was a trailing '=' to check, check it:
|
||||
if (slen && (st.eof == BASE64_AEOF)) {
|
||||
state->bytes = 0;
|
||||
state->eof = BASE64_EOF;
|
||||
ret = ((base64_table_dec_8bit[*s++] == 254) && (slen == 1)) ? 1 : 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Turn four 6-bit numbers into three bytes:
|
||||
// out[0] = 11111122
|
||||
// out[1] = 22223333
|
||||
// out[2] = 33444444
|
||||
|
||||
// Duff's device again:
|
||||
switch (st.bytes)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
case 0:
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
if (slen-- == 0) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
if ((q = base64_table_dec_8bit[*s++]) >= 254) {
|
||||
st.eof = BASE64_EOF;
|
||||
// Treat character '=' as invalid for byte 0:
|
||||
break;
|
||||
}
|
||||
st.carry = q << 2;
|
||||
st.bytes++;
|
||||
|
||||
// Deliberate fallthrough:
|
||||
BASE64_FALLTHROUGH
|
||||
|
||||
case 1: if (slen-- == 0) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
if ((q = base64_table_dec_8bit[*s++]) >= 254) {
|
||||
st.eof = BASE64_EOF;
|
||||
// Treat character '=' as invalid for byte 1:
|
||||
break;
|
||||
}
|
||||
*o++ = st.carry | (q >> 4);
|
||||
st.carry = q << 4;
|
||||
st.bytes++;
|
||||
olen++;
|
||||
|
||||
// Deliberate fallthrough:
|
||||
BASE64_FALLTHROUGH
|
||||
|
||||
case 2: if (slen-- == 0) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
if ((q = base64_table_dec_8bit[*s++]) >= 254) {
|
||||
st.bytes++;
|
||||
// When q == 254, the input char is '='.
|
||||
// Check if next byte is also '=':
|
||||
if (q == 254) {
|
||||
if (slen-- != 0) {
|
||||
st.bytes = 0;
|
||||
// EOF:
|
||||
st.eof = BASE64_EOF;
|
||||
q = base64_table_dec_8bit[*s++];
|
||||
ret = ((q == 254) && (slen == 0)) ? 1 : 0;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// Almost EOF
|
||||
st.eof = BASE64_AEOF;
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If we get here, there was an error:
|
||||
break;
|
||||
}
|
||||
*o++ = st.carry | (q >> 2);
|
||||
st.carry = q << 6;
|
||||
st.bytes++;
|
||||
olen++;
|
||||
|
||||
// Deliberate fallthrough:
|
||||
BASE64_FALLTHROUGH
|
||||
|
||||
case 3: if (slen-- == 0) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
if ((q = base64_table_dec_8bit[*s++]) >= 254) {
|
||||
st.bytes = 0;
|
||||
st.eof = BASE64_EOF;
|
||||
// When q == 254, the input char is '='. Return 1 and EOF.
|
||||
// When q == 255, the input char is invalid. Return 0 and EOF.
|
||||
ret = ((q == 254) && (slen == 0)) ? 1 : 0;
|
||||
break;
|
||||
}
|
||||
*o++ = st.carry | q;
|
||||
st.carry = 0;
|
||||
st.bytes = 0;
|
||||
olen++;
|
||||
}
|
||||
}
|
||||
|
||||
state->eof = st.eof;
|
||||
state->bytes = st.bytes;
|
||||
state->carry = st.carry;
|
||||
*outlen = olen;
|
||||
return ret;
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
// Assume that *out is large enough to contain the output.
|
||||
// Theoretically it should be 4/3 the length of src.
|
||||
const uint8_t *s = (const uint8_t *) src;
|
||||
uint8_t *o = (uint8_t *) out;
|
||||
|
||||
// Use local temporaries to avoid cache thrashing:
|
||||
size_t olen = 0;
|
||||
size_t slen = srclen;
|
||||
struct base64_state st;
|
||||
st.bytes = state->bytes;
|
||||
st.carry = state->carry;
|
||||
|
||||
// Turn three bytes into four 6-bit numbers:
|
||||
// in[0] = 00111111
|
||||
// in[1] = 00112222
|
||||
// in[2] = 00222233
|
||||
// in[3] = 00333333
|
||||
|
||||
// Duff's device, a for() loop inside a switch() statement. Legal!
|
||||
switch (st.bytes)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
case 0:
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
if (slen-- == 0) {
|
||||
break;
|
||||
}
|
||||
*o++ = base64_table_enc_6bit[*s >> 2];
|
||||
st.carry = (*s++ << 4) & 0x30;
|
||||
st.bytes++;
|
||||
olen += 1;
|
||||
|
||||
// Deliberate fallthrough:
|
||||
BASE64_FALLTHROUGH
|
||||
|
||||
case 1: if (slen-- == 0) {
|
||||
break;
|
||||
}
|
||||
*o++ = base64_table_enc_6bit[st.carry | (*s >> 4)];
|
||||
st.carry = (*s++ << 2) & 0x3C;
|
||||
st.bytes++;
|
||||
olen += 1;
|
||||
|
||||
// Deliberate fallthrough:
|
||||
BASE64_FALLTHROUGH
|
||||
|
||||
case 2: if (slen-- == 0) {
|
||||
break;
|
||||
}
|
||||
*o++ = base64_table_enc_6bit[st.carry | (*s >> 6)];
|
||||
*o++ = base64_table_enc_6bit[*s++ & 0x3F];
|
||||
st.bytes = 0;
|
||||
olen += 2;
|
||||
}
|
||||
}
|
||||
state->bytes = st.bytes;
|
||||
state->carry = st.carry;
|
||||
*outlen = olen;
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#ifdef __arm__
|
||||
# if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && HAVE_NEON32
|
||||
# define BASE64_USE_NEON32
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef BASE64_USE_NEON32
|
||||
#include <arm_neon.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers.
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define BASE64_NEON32_USE_ASM
|
||||
#endif
|
||||
|
||||
static BASE64_FORCE_INLINE uint8x16_t
|
||||
vqtbl1q_u8 (const uint8x16_t lut, const uint8x16_t indices)
|
||||
{
|
||||
// NEON32 only supports 64-bit wide lookups in 128-bit tables. Emulate
|
||||
// the NEON64 `vqtbl1q_u8` intrinsic to do 128-bit wide lookups.
|
||||
uint8x8x2_t lut2;
|
||||
uint8x8x2_t result;
|
||||
|
||||
lut2.val[0] = vget_low_u8(lut);
|
||||
lut2.val[1] = vget_high_u8(lut);
|
||||
|
||||
result.val[0] = vtbl2_u8(lut2, vget_low_u8(indices));
|
||||
result.val[1] = vtbl2_u8(lut2, vget_high_u8(indices));
|
||||
|
||||
return vcombine_u8(result.val[0], result.val[1]);
|
||||
}
|
||||
|
||||
#include "../generic/32/dec_loop.c"
|
||||
#include "../generic/32/enc_loop.c"
|
||||
#include "dec_loop.c"
|
||||
#include "enc_reshuffle.c"
|
||||
#include "enc_translate.c"
|
||||
#include "enc_loop.c"
|
||||
|
||||
#endif // BASE64_USE_NEON32
|
||||
|
||||
// Stride size is so large on these NEON 32-bit functions
|
||||
// (48 bytes encode, 32 bytes decode) that we inline the
|
||||
// uint32 codec to stay performant on smaller inputs.
|
||||
|
||||
void
|
||||
base64_stream_encode_neon32 BASE64_ENC_PARAMS
|
||||
{
|
||||
#ifdef BASE64_USE_NEON32
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_neon32(&s, &slen, &o, &olen);
|
||||
enc_loop_generic_32(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_neon32 BASE64_DEC_PARAMS
|
||||
{
|
||||
#ifdef BASE64_USE_NEON32
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_neon32(&s, &slen, &o, &olen);
|
||||
dec_loop_generic_32(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
static BASE64_FORCE_INLINE int
|
||||
is_nonzero (const uint8x16_t v)
|
||||
{
|
||||
uint64_t u64;
|
||||
const uint64x2_t v64 = vreinterpretq_u64_u8(v);
|
||||
const uint32x2_t v32 = vqmovn_u64(v64);
|
||||
|
||||
vst1_u64(&u64, vreinterpret_u64_u32(v32));
|
||||
return u64 != 0;
|
||||
}
|
||||
|
||||
static BASE64_FORCE_INLINE uint8x16_t
|
||||
delta_lookup (const uint8x16_t v)
|
||||
{
|
||||
const uint8x8_t lut = {
|
||||
0, 16, 19, 4, (uint8_t) -65, (uint8_t) -65, (uint8_t) -71, (uint8_t) -71,
|
||||
};
|
||||
|
||||
return vcombine_u8(
|
||||
vtbl1_u8(lut, vget_low_u8(v)),
|
||||
vtbl1_u8(lut, vget_high_u8(v)));
|
||||
}
|
||||
|
||||
static BASE64_FORCE_INLINE uint8x16_t
|
||||
dec_loop_neon32_lane (uint8x16_t *lane)
|
||||
{
|
||||
// See the SSSE3 decoder for an explanation of the algorithm.
|
||||
const uint8x16_t lut_lo = {
|
||||
0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
|
||||
0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A
|
||||
};
|
||||
|
||||
const uint8x16_t lut_hi = {
|
||||
0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08,
|
||||
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10
|
||||
};
|
||||
|
||||
const uint8x16_t mask_0F = vdupq_n_u8(0x0F);
|
||||
const uint8x16_t mask_2F = vdupq_n_u8(0x2F);
|
||||
|
||||
const uint8x16_t hi_nibbles = vshrq_n_u8(*lane, 4);
|
||||
const uint8x16_t lo_nibbles = vandq_u8(*lane, mask_0F);
|
||||
const uint8x16_t eq_2F = vceqq_u8(*lane, mask_2F);
|
||||
|
||||
const uint8x16_t hi = vqtbl1q_u8(lut_hi, hi_nibbles);
|
||||
const uint8x16_t lo = vqtbl1q_u8(lut_lo, lo_nibbles);
|
||||
|
||||
// Now simply add the delta values to the input:
|
||||
*lane = vaddq_u8(*lane, delta_lookup(vaddq_u8(eq_2F, hi_nibbles)));
|
||||
|
||||
// Return the validity mask:
|
||||
return vandq_u8(lo, hi);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dec_loop_neon32 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 64) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 64 bytes per round. Unlike the SSE codecs, no
|
||||
// extra trailing zero bytes are written, so it is not necessary to
|
||||
// reserve extra input bytes:
|
||||
size_t rounds = *slen / 64;
|
||||
|
||||
*slen -= rounds * 64; // 64 bytes consumed per round
|
||||
*olen += rounds * 48; // 48 bytes produced per round
|
||||
|
||||
do {
|
||||
uint8x16x3_t dec;
|
||||
|
||||
// Load 64 bytes and deinterleave:
|
||||
uint8x16x4_t str = vld4q_u8(*s);
|
||||
|
||||
// Decode each lane, collect a mask of invalid inputs:
|
||||
const uint8x16_t classified
|
||||
= dec_loop_neon32_lane(&str.val[0])
|
||||
| dec_loop_neon32_lane(&str.val[1])
|
||||
| dec_loop_neon32_lane(&str.val[2])
|
||||
| dec_loop_neon32_lane(&str.val[3]);
|
||||
|
||||
// Check for invalid input: if any of the delta values are
|
||||
// zero, fall back on bytewise code to do error checking and
|
||||
// reporting:
|
||||
if (is_nonzero(classified)) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Compress four bytes into three:
|
||||
dec.val[0] = vorrq_u8(vshlq_n_u8(str.val[0], 2), vshrq_n_u8(str.val[1], 4));
|
||||
dec.val[1] = vorrq_u8(vshlq_n_u8(str.val[1], 4), vshrq_n_u8(str.val[2], 2));
|
||||
dec.val[2] = vorrq_u8(vshlq_n_u8(str.val[2], 6), str.val[3]);
|
||||
|
||||
// Interleave and store decoded result:
|
||||
vst3q_u8(*o, dec);
|
||||
|
||||
*s += 64;
|
||||
*o += 48;
|
||||
|
||||
} while (--rounds > 0);
|
||||
|
||||
// Adjust for any rounds that were skipped:
|
||||
*slen += rounds * 64;
|
||||
*olen -= rounds * 48;
|
||||
}
|
||||
|
|
@ -0,0 +1,170 @@
|
|||
#ifdef BASE64_NEON32_USE_ASM
|
||||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_neon32_inner_asm (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
// This function duplicates the functionality of enc_loop_neon32_inner,
|
||||
// but entirely with inline assembly. This gives a significant speedup
|
||||
// over using NEON intrinsics, which do not always generate very good
|
||||
// code. The logic of the assembly is directly lifted from the
|
||||
// intrinsics version, so it can be used as a guide to this code.
|
||||
|
||||
// Temporary registers, used as scratch space.
|
||||
uint8x16_t tmp0, tmp1, tmp2, tmp3;
|
||||
uint8x16_t mask0, mask1, mask2, mask3;
|
||||
|
||||
// A lookup table containing the absolute offsets for all ranges.
|
||||
const uint8x16_t lut = {
|
||||
65U, 71U, 252U, 252U,
|
||||
252U, 252U, 252U, 252U,
|
||||
252U, 252U, 252U, 252U,
|
||||
237U, 240U, 0U, 0U
|
||||
};
|
||||
|
||||
// Numeric constants.
|
||||
const uint8x16_t n51 = vdupq_n_u8(51);
|
||||
const uint8x16_t n25 = vdupq_n_u8(25);
|
||||
const uint8x16_t n63 = vdupq_n_u8(63);
|
||||
|
||||
__asm__ (
|
||||
|
||||
// Load 48 bytes and deinterleave. The bytes are loaded to
|
||||
// hard-coded registers q12, q13 and q14, to ensure that they
|
||||
// are contiguous. Increment the source pointer.
|
||||
"vld3.8 {d24, d26, d28}, [%[src]]! \n\t"
|
||||
"vld3.8 {d25, d27, d29}, [%[src]]! \n\t"
|
||||
|
||||
// Reshuffle the bytes using temporaries.
|
||||
"vshr.u8 %q[t0], q12, #2 \n\t"
|
||||
"vshr.u8 %q[t1], q13, #4 \n\t"
|
||||
"vshr.u8 %q[t2], q14, #6 \n\t"
|
||||
"vsli.8 %q[t1], q12, #4 \n\t"
|
||||
"vsli.8 %q[t2], q13, #2 \n\t"
|
||||
"vand.u8 %q[t1], %q[t1], %q[n63] \n\t"
|
||||
"vand.u8 %q[t2], %q[t2], %q[n63] \n\t"
|
||||
"vand.u8 %q[t3], q14, %q[n63] \n\t"
|
||||
|
||||
// t0..t3 are the reshuffled inputs. Create LUT indices.
|
||||
"vqsub.u8 q12, %q[t0], %q[n51] \n\t"
|
||||
"vqsub.u8 q13, %q[t1], %q[n51] \n\t"
|
||||
"vqsub.u8 q14, %q[t2], %q[n51] \n\t"
|
||||
"vqsub.u8 q15, %q[t3], %q[n51] \n\t"
|
||||
|
||||
// Create the mask for range #0.
|
||||
"vcgt.u8 %q[m0], %q[t0], %q[n25] \n\t"
|
||||
"vcgt.u8 %q[m1], %q[t1], %q[n25] \n\t"
|
||||
"vcgt.u8 %q[m2], %q[t2], %q[n25] \n\t"
|
||||
"vcgt.u8 %q[m3], %q[t3], %q[n25] \n\t"
|
||||
|
||||
// Subtract -1 to correct the LUT indices.
|
||||
"vsub.u8 q12, %q[m0] \n\t"
|
||||
"vsub.u8 q13, %q[m1] \n\t"
|
||||
"vsub.u8 q14, %q[m2] \n\t"
|
||||
"vsub.u8 q15, %q[m3] \n\t"
|
||||
|
||||
// Lookup the delta values.
|
||||
"vtbl.u8 d24, {%q[lut]}, d24 \n\t"
|
||||
"vtbl.u8 d25, {%q[lut]}, d25 \n\t"
|
||||
"vtbl.u8 d26, {%q[lut]}, d26 \n\t"
|
||||
"vtbl.u8 d27, {%q[lut]}, d27 \n\t"
|
||||
"vtbl.u8 d28, {%q[lut]}, d28 \n\t"
|
||||
"vtbl.u8 d29, {%q[lut]}, d29 \n\t"
|
||||
"vtbl.u8 d30, {%q[lut]}, d30 \n\t"
|
||||
"vtbl.u8 d31, {%q[lut]}, d31 \n\t"
|
||||
|
||||
// Add the delta values.
|
||||
"vadd.u8 q12, %q[t0] \n\t"
|
||||
"vadd.u8 q13, %q[t1] \n\t"
|
||||
"vadd.u8 q14, %q[t2] \n\t"
|
||||
"vadd.u8 q15, %q[t3] \n\t"
|
||||
|
||||
// Store 64 bytes and interleave. Increment the dest pointer.
|
||||
"vst4.8 {d24, d26, d28, d30}, [%[dst]]! \n\t"
|
||||
"vst4.8 {d25, d27, d29, d31}, [%[dst]]! \n\t"
|
||||
|
||||
// Outputs (modified).
|
||||
: [src] "+r" (*s),
|
||||
[dst] "+r" (*o),
|
||||
[t0] "=&w" (tmp0),
|
||||
[t1] "=&w" (tmp1),
|
||||
[t2] "=&w" (tmp2),
|
||||
[t3] "=&w" (tmp3),
|
||||
[m0] "=&w" (mask0),
|
||||
[m1] "=&w" (mask1),
|
||||
[m2] "=&w" (mask2),
|
||||
[m3] "=&w" (mask3)
|
||||
|
||||
// Inputs (not modified).
|
||||
: [lut] "w" (lut),
|
||||
[n25] "w" (n25),
|
||||
[n51] "w" (n51),
|
||||
[n63] "w" (n63)
|
||||
|
||||
// Clobbers.
|
||||
: "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
|
||||
"cc", "memory"
|
||||
);
|
||||
}
|
||||
#endif
|
||||
|
||||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_neon32_inner (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
#ifdef BASE64_NEON32_USE_ASM
|
||||
enc_loop_neon32_inner_asm(s, o);
|
||||
#else
|
||||
// Load 48 bytes and deinterleave:
|
||||
uint8x16x3_t src = vld3q_u8(*s);
|
||||
|
||||
// Reshuffle:
|
||||
uint8x16x4_t out = enc_reshuffle(src);
|
||||
|
||||
// Translate reshuffled bytes to the Base64 alphabet:
|
||||
out = enc_translate(out);
|
||||
|
||||
// Interleave and store output:
|
||||
vst4q_u8(*o, out);
|
||||
|
||||
*s += 48;
|
||||
*o += 64;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_neon32 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
size_t rounds = *slen / 48;
|
||||
|
||||
*slen -= rounds * 48; // 48 bytes consumed per round
|
||||
*olen += rounds * 64; // 64 bytes produced per round
|
||||
|
||||
while (rounds > 0) {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_neon32_inner(s, o);
|
||||
enc_loop_neon32_inner(s, o);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_neon32_inner(s, o);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
static BASE64_FORCE_INLINE uint8x16x4_t
|
||||
enc_reshuffle (uint8x16x3_t in)
|
||||
{
|
||||
uint8x16x4_t out;
|
||||
|
||||
// Input:
|
||||
// in[0] = a7 a6 a5 a4 a3 a2 a1 a0
|
||||
// in[1] = b7 b6 b5 b4 b3 b2 b1 b0
|
||||
// in[2] = c7 c6 c5 c4 c3 c2 c1 c0
|
||||
|
||||
// Output:
|
||||
// out[0] = 00 00 a7 a6 a5 a4 a3 a2
|
||||
// out[1] = 00 00 a1 a0 b7 b6 b5 b4
|
||||
// out[2] = 00 00 b3 b2 b1 b0 c7 c6
|
||||
// out[3] = 00 00 c5 c4 c3 c2 c1 c0
|
||||
|
||||
// Move the input bits to where they need to be in the outputs. Except
|
||||
// for the first output, the high two bits are not cleared.
|
||||
out.val[0] = vshrq_n_u8(in.val[0], 2);
|
||||
out.val[1] = vshrq_n_u8(in.val[1], 4);
|
||||
out.val[2] = vshrq_n_u8(in.val[2], 6);
|
||||
out.val[1] = vsliq_n_u8(out.val[1], in.val[0], 4);
|
||||
out.val[2] = vsliq_n_u8(out.val[2], in.val[1], 2);
|
||||
|
||||
// Clear the high two bits in the second, third and fourth output.
|
||||
out.val[1] = vandq_u8(out.val[1], vdupq_n_u8(0x3F));
|
||||
out.val[2] = vandq_u8(out.val[2], vdupq_n_u8(0x3F));
|
||||
out.val[3] = vandq_u8(in.val[2], vdupq_n_u8(0x3F));
|
||||
|
||||
return out;
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
static BASE64_FORCE_INLINE uint8x16x4_t
|
||||
enc_translate (const uint8x16x4_t in)
|
||||
{
|
||||
// A lookup table containing the absolute offsets for all ranges:
|
||||
const uint8x16_t lut = {
|
||||
65U, 71U, 252U, 252U,
|
||||
252U, 252U, 252U, 252U,
|
||||
252U, 252U, 252U, 252U,
|
||||
237U, 240U, 0U, 0U
|
||||
};
|
||||
|
||||
const uint8x16_t offset = vdupq_n_u8(51);
|
||||
|
||||
uint8x16x4_t indices, mask, delta, out;
|
||||
|
||||
// Translate values 0..63 to the Base64 alphabet. There are five sets:
|
||||
// # From To Abs Index Characters
|
||||
// 0 [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
// 1 [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz
|
||||
// 2 [52..61] [48..57] -4 [2..11] 0123456789
|
||||
// 3 [62] [43] -19 12 +
|
||||
// 4 [63] [47] -16 13 /
|
||||
|
||||
// Create LUT indices from input:
|
||||
// the index for range #0 is right, others are 1 less than expected:
|
||||
indices.val[0] = vqsubq_u8(in.val[0], offset);
|
||||
indices.val[1] = vqsubq_u8(in.val[1], offset);
|
||||
indices.val[2] = vqsubq_u8(in.val[2], offset);
|
||||
indices.val[3] = vqsubq_u8(in.val[3], offset);
|
||||
|
||||
// mask is 0xFF (-1) for range #[1..4] and 0x00 for range #0:
|
||||
mask.val[0] = vcgtq_u8(in.val[0], vdupq_n_u8(25));
|
||||
mask.val[1] = vcgtq_u8(in.val[1], vdupq_n_u8(25));
|
||||
mask.val[2] = vcgtq_u8(in.val[2], vdupq_n_u8(25));
|
||||
mask.val[3] = vcgtq_u8(in.val[3], vdupq_n_u8(25));
|
||||
|
||||
// Subtract -1, so add 1 to indices for range #[1..4], All indices are
|
||||
// now correct:
|
||||
indices.val[0] = vsubq_u8(indices.val[0], mask.val[0]);
|
||||
indices.val[1] = vsubq_u8(indices.val[1], mask.val[1]);
|
||||
indices.val[2] = vsubq_u8(indices.val[2], mask.val[2]);
|
||||
indices.val[3] = vsubq_u8(indices.val[3], mask.val[3]);
|
||||
|
||||
// Lookup delta values:
|
||||
delta.val[0] = vqtbl1q_u8(lut, indices.val[0]);
|
||||
delta.val[1] = vqtbl1q_u8(lut, indices.val[1]);
|
||||
delta.val[2] = vqtbl1q_u8(lut, indices.val[2]);
|
||||
delta.val[3] = vqtbl1q_u8(lut, indices.val[3]);
|
||||
|
||||
// Add delta values:
|
||||
out.val[0] = vaddq_u8(in.val[0], delta.val[0]);
|
||||
out.val[1] = vaddq_u8(in.val[1], delta.val[1]);
|
||||
out.val[2] = vaddq_u8(in.val[2], delta.val[2]);
|
||||
out.val[3] = vaddq_u8(in.val[3], delta.val[3]);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_NEON64
|
||||
#include <arm_neon.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers.
|
||||
#if !defined(__wasm__) && (defined(__GNUC__) || defined(__clang__))
|
||||
#define BASE64_NEON64_USE_ASM
|
||||
#endif
|
||||
|
||||
static BASE64_FORCE_INLINE uint8x16x4_t
|
||||
load_64byte_table (const uint8_t *p)
|
||||
{
|
||||
#ifdef BASE64_NEON64_USE_ASM
|
||||
|
||||
// Force the table to be loaded into contiguous registers. GCC will not
|
||||
// normally allocate contiguous registers for a `uint8x16x4_t'. These
|
||||
// registers are chosen to not conflict with the ones in the enc loop.
|
||||
register uint8x16_t t0 __asm__ ("v8");
|
||||
register uint8x16_t t1 __asm__ ("v9");
|
||||
register uint8x16_t t2 __asm__ ("v10");
|
||||
register uint8x16_t t3 __asm__ ("v11");
|
||||
|
||||
__asm__ (
|
||||
"ld1 {%[t0].16b, %[t1].16b, %[t2].16b, %[t3].16b}, [%[src]], #64 \n\t"
|
||||
: [src] "+r" (p),
|
||||
[t0] "=w" (t0),
|
||||
[t1] "=w" (t1),
|
||||
[t2] "=w" (t2),
|
||||
[t3] "=w" (t3)
|
||||
);
|
||||
|
||||
return (uint8x16x4_t) {
|
||||
.val[0] = t0,
|
||||
.val[1] = t1,
|
||||
.val[2] = t2,
|
||||
.val[3] = t3,
|
||||
};
|
||||
#else
|
||||
return vld1q_u8_x4(p);
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "../generic/32/dec_loop.c"
|
||||
#include "../generic/64/enc_loop.c"
|
||||
#include "dec_loop.c"
|
||||
|
||||
#ifdef BASE64_NEON64_USE_ASM
|
||||
# include "enc_loop_asm.c"
|
||||
#else
|
||||
# include "enc_reshuffle.c"
|
||||
# include "enc_loop.c"
|
||||
#endif
|
||||
|
||||
#endif // HAVE_NEON64
|
||||
|
||||
// Stride size is so large on these NEON 64-bit functions
|
||||
// (48 bytes encode, 64 bytes decode) that we inline the
|
||||
// uint64 codec to stay performant on smaller inputs.
|
||||
|
||||
void
|
||||
base64_stream_encode_neon64 BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_NEON64
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_neon64(&s, &slen, &o, &olen);
|
||||
enc_loop_generic_64(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_neon64 BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_NEON64
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_neon64(&s, &slen, &o, &olen);
|
||||
dec_loop_generic_32(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
// The input consists of five valid character sets in the Base64 alphabet,
|
||||
// which we need to map back to the 6-bit values they represent.
|
||||
// There are three ranges, two singles, and then there's the rest.
|
||||
//
|
||||
// # From To LUT Characters
|
||||
// 1 [0..42] [255] #1 invalid input
|
||||
// 2 [43] [62] #1 +
|
||||
// 3 [44..46] [255] #1 invalid input
|
||||
// 4 [47] [63] #1 /
|
||||
// 5 [48..57] [52..61] #1 0..9
|
||||
// 6 [58..63] [255] #1 invalid input
|
||||
// 7 [64] [255] #2 invalid input
|
||||
// 8 [65..90] [0..25] #2 A..Z
|
||||
// 9 [91..96] [255] #2 invalid input
|
||||
// 10 [97..122] [26..51] #2 a..z
|
||||
// 11 [123..126] [255] #2 invalid input
|
||||
// (12) Everything else => invalid input
|
||||
|
||||
// The first LUT will use the VTBL instruction (out of range indices are set to
|
||||
// 0 in destination).
|
||||
static const uint8_t dec_lut1[] = {
|
||||
255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U,
|
||||
255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U,
|
||||
255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 255U, 62U, 255U, 255U, 255U, 63U,
|
||||
52U, 53U, 54U, 55U, 56U, 57U, 58U, 59U, 60U, 61U, 255U, 255U, 255U, 255U, 255U, 255U,
|
||||
};
|
||||
|
||||
// The second LUT will use the VTBX instruction (out of range indices will be
|
||||
// unchanged in destination). Input [64..126] will be mapped to index [1..63]
|
||||
// in this LUT. Index 0 means that value comes from LUT #1.
|
||||
static const uint8_t dec_lut2[] = {
|
||||
0U, 255U, 0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U,
|
||||
14U, 15U, 16U, 17U, 18U, 19U, 20U, 21U, 22U, 23U, 24U, 25U, 255U, 255U, 255U, 255U,
|
||||
255U, 255U, 26U, 27U, 28U, 29U, 30U, 31U, 32U, 33U, 34U, 35U, 36U, 37U, 38U, 39U,
|
||||
40U, 41U, 42U, 43U, 44U, 45U, 46U, 47U, 48U, 49U, 50U, 51U, 255U, 255U, 255U, 255U,
|
||||
};
|
||||
|
||||
// All input values in range for the first look-up will be 0U in the second
|
||||
// look-up result. All input values out of range for the first look-up will be
|
||||
// 0U in the first look-up result. Thus, the two results can be ORed without
|
||||
// conflicts.
|
||||
//
|
||||
// Invalid characters that are in the valid range for either look-up will be
|
||||
// set to 255U in the combined result. Other invalid characters will just be
|
||||
// passed through with the second look-up result (using the VTBX instruction).
|
||||
// Since the second LUT is 64 bytes, those passed-through values are guaranteed
|
||||
// to have a value greater than 63U. Therefore, valid characters will be mapped
|
||||
// to the valid [0..63] range and all invalid characters will be mapped to
|
||||
// values greater than 63.
|
||||
|
||||
static inline void
|
||||
dec_loop_neon64 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 64) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 64 bytes per round. Unlike the SSE codecs, no
|
||||
// extra trailing zero bytes are written, so it is not necessary to
|
||||
// reserve extra input bytes:
|
||||
size_t rounds = *slen / 64;
|
||||
|
||||
*slen -= rounds * 64; // 64 bytes consumed per round
|
||||
*olen += rounds * 48; // 48 bytes produced per round
|
||||
|
||||
const uint8x16x4_t tbl_dec1 = load_64byte_table(dec_lut1);
|
||||
const uint8x16x4_t tbl_dec2 = load_64byte_table(dec_lut2);
|
||||
|
||||
do {
|
||||
const uint8x16_t offset = vdupq_n_u8(63U);
|
||||
uint8x16x4_t dec1, dec2;
|
||||
uint8x16x3_t dec;
|
||||
|
||||
// Load 64 bytes and deinterleave:
|
||||
uint8x16x4_t str = vld4q_u8((uint8_t *) *s);
|
||||
|
||||
// Get indices for second LUT:
|
||||
dec2.val[0] = vqsubq_u8(str.val[0], offset);
|
||||
dec2.val[1] = vqsubq_u8(str.val[1], offset);
|
||||
dec2.val[2] = vqsubq_u8(str.val[2], offset);
|
||||
dec2.val[3] = vqsubq_u8(str.val[3], offset);
|
||||
|
||||
// Get values from first LUT:
|
||||
dec1.val[0] = vqtbl4q_u8(tbl_dec1, str.val[0]);
|
||||
dec1.val[1] = vqtbl4q_u8(tbl_dec1, str.val[1]);
|
||||
dec1.val[2] = vqtbl4q_u8(tbl_dec1, str.val[2]);
|
||||
dec1.val[3] = vqtbl4q_u8(tbl_dec1, str.val[3]);
|
||||
|
||||
// Get values from second LUT:
|
||||
dec2.val[0] = vqtbx4q_u8(dec2.val[0], tbl_dec2, dec2.val[0]);
|
||||
dec2.val[1] = vqtbx4q_u8(dec2.val[1], tbl_dec2, dec2.val[1]);
|
||||
dec2.val[2] = vqtbx4q_u8(dec2.val[2], tbl_dec2, dec2.val[2]);
|
||||
dec2.val[3] = vqtbx4q_u8(dec2.val[3], tbl_dec2, dec2.val[3]);
|
||||
|
||||
// Get final values:
|
||||
str.val[0] = vorrq_u8(dec1.val[0], dec2.val[0]);
|
||||
str.val[1] = vorrq_u8(dec1.val[1], dec2.val[1]);
|
||||
str.val[2] = vorrq_u8(dec1.val[2], dec2.val[2]);
|
||||
str.val[3] = vorrq_u8(dec1.val[3], dec2.val[3]);
|
||||
|
||||
// Check for invalid input, any value larger than 63:
|
||||
const uint8x16_t classified
|
||||
= vorrq_u8(
|
||||
vorrq_u8(vcgtq_u8(str.val[0], vdupq_n_u8(63)), vcgtq_u8(str.val[1], vdupq_n_u8(63))),
|
||||
vorrq_u8(vcgtq_u8(str.val[2], vdupq_n_u8(63)), vcgtq_u8(str.val[3], vdupq_n_u8(63)))
|
||||
);
|
||||
|
||||
// Check that all bits are zero:
|
||||
if (vmaxvq_u8(classified) != 0U) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Compress four bytes into three:
|
||||
dec.val[0] = vorrq_u8(vshlq_n_u8(str.val[0], 2), vshrq_n_u8(str.val[1], 4));
|
||||
dec.val[1] = vorrq_u8(vshlq_n_u8(str.val[1], 4), vshrq_n_u8(str.val[2], 2));
|
||||
dec.val[2] = vorrq_u8(vshlq_n_u8(str.val[2], 6), str.val[3]);
|
||||
|
||||
// Interleave and store decoded result:
|
||||
vst3q_u8((uint8_t *) *o, dec);
|
||||
|
||||
*s += 64;
|
||||
*o += 48;
|
||||
|
||||
} while (--rounds > 0);
|
||||
|
||||
// Adjust for any rounds that were skipped:
|
||||
*slen += rounds * 64;
|
||||
*olen -= rounds * 48;
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_neon64_inner (const uint8_t **s, uint8_t **o, const uint8x16x4_t tbl_enc)
|
||||
{
|
||||
// Load 48 bytes and deinterleave:
|
||||
uint8x16x3_t src = vld3q_u8(*s);
|
||||
|
||||
// Divide bits of three input bytes over four output bytes:
|
||||
uint8x16x4_t out = enc_reshuffle(src);
|
||||
|
||||
// The bits have now been shifted to the right locations;
|
||||
// translate their values 0..63 to the Base64 alphabet.
|
||||
// Use a 64-byte table lookup:
|
||||
out.val[0] = vqtbl4q_u8(tbl_enc, out.val[0]);
|
||||
out.val[1] = vqtbl4q_u8(tbl_enc, out.val[1]);
|
||||
out.val[2] = vqtbl4q_u8(tbl_enc, out.val[2]);
|
||||
out.val[3] = vqtbl4q_u8(tbl_enc, out.val[3]);
|
||||
|
||||
// Interleave and store output:
|
||||
vst4q_u8(*o, out);
|
||||
|
||||
*s += 48;
|
||||
*o += 64;
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_neon64 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
size_t rounds = *slen / 48;
|
||||
|
||||
*slen -= rounds * 48; // 48 bytes consumed per round
|
||||
*olen += rounds * 64; // 64 bytes produced per round
|
||||
|
||||
// Load the encoding table:
|
||||
const uint8x16x4_t tbl_enc = load_64byte_table(base64_table_enc_6bit);
|
||||
|
||||
while (rounds > 0) {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_neon64_inner(s, o, tbl_enc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
// Apologies in advance for combining the preprocessor with inline assembly,
|
||||
// two notoriously gnarly parts of C, but it was necessary to avoid a lot of
|
||||
// code repetition. The preprocessor is used to template large sections of
|
||||
// inline assembly that differ only in the registers used. If the code was
|
||||
// written out by hand, it would become very large and hard to audit.
|
||||
|
||||
// Generate a block of inline assembly that loads three user-defined registers
|
||||
// A, B, C from memory and deinterleaves them, post-incrementing the src
|
||||
// pointer. The register set should be sequential.
|
||||
#define LOAD(A, B, C) \
|
||||
"ld3 {"A".16b, "B".16b, "C".16b}, [%[src]], #48 \n\t"
|
||||
|
||||
// Generate a block of inline assembly that takes three deinterleaved registers
|
||||
// and shuffles the bytes. The output is in temporary registers t0..t3.
|
||||
#define SHUF(A, B, C) \
|
||||
"ushr %[t0].16b, "A".16b, #2 \n\t" \
|
||||
"ushr %[t1].16b, "B".16b, #4 \n\t" \
|
||||
"ushr %[t2].16b, "C".16b, #6 \n\t" \
|
||||
"sli %[t1].16b, "A".16b, #4 \n\t" \
|
||||
"sli %[t2].16b, "B".16b, #2 \n\t" \
|
||||
"and %[t1].16b, %[t1].16b, %[n63].16b \n\t" \
|
||||
"and %[t2].16b, %[t2].16b, %[n63].16b \n\t" \
|
||||
"and %[t3].16b, "C".16b, %[n63].16b \n\t"
|
||||
|
||||
// Generate a block of inline assembly that takes temporary registers t0..t3
|
||||
// and translates them to the base64 alphabet, using a table loaded into
|
||||
// v8..v11. The output is in user-defined registers A..D.
|
||||
#define TRAN(A, B, C, D) \
|
||||
"tbl "A".16b, {v8.16b-v11.16b}, %[t0].16b \n\t" \
|
||||
"tbl "B".16b, {v8.16b-v11.16b}, %[t1].16b \n\t" \
|
||||
"tbl "C".16b, {v8.16b-v11.16b}, %[t2].16b \n\t" \
|
||||
"tbl "D".16b, {v8.16b-v11.16b}, %[t3].16b \n\t"
|
||||
|
||||
// Generate a block of inline assembly that interleaves four registers and
|
||||
// stores them, post-incrementing the destination pointer.
|
||||
#define STOR(A, B, C, D) \
|
||||
"st4 {"A".16b, "B".16b, "C".16b, "D".16b}, [%[dst]], #64 \n\t"
|
||||
|
||||
// Generate a block of inline assembly that generates a single self-contained
|
||||
// encoder round: fetch the data, process it, and store the result.
|
||||
#define ROUND() \
|
||||
LOAD("v12", "v13", "v14") \
|
||||
SHUF("v12", "v13", "v14") \
|
||||
TRAN("v12", "v13", "v14", "v15") \
|
||||
STOR("v12", "v13", "v14", "v15")
|
||||
|
||||
// Generate a block of assembly that generates a type A interleaved encoder
|
||||
// round. It uses registers that were loaded by the previous type B round, and
|
||||
// in turn loads registers for the next type B round.
|
||||
#define ROUND_A() \
|
||||
SHUF("v2", "v3", "v4") \
|
||||
LOAD("v12", "v13", "v14") \
|
||||
TRAN("v2", "v3", "v4", "v5") \
|
||||
STOR("v2", "v3", "v4", "v5")
|
||||
|
||||
// Type B interleaved encoder round. Same as type A, but register sets swapped.
|
||||
#define ROUND_B() \
|
||||
SHUF("v12", "v13", "v14") \
|
||||
LOAD("v2", "v3", "v4") \
|
||||
TRAN("v12", "v13", "v14", "v15") \
|
||||
STOR("v12", "v13", "v14", "v15")
|
||||
|
||||
// The first type A round needs to load its own registers.
|
||||
#define ROUND_A_FIRST() \
|
||||
LOAD("v2", "v3", "v4") \
|
||||
ROUND_A()
|
||||
|
||||
// The last type B round omits the load for the next step.
|
||||
#define ROUND_B_LAST() \
|
||||
SHUF("v12", "v13", "v14") \
|
||||
TRAN("v12", "v13", "v14", "v15") \
|
||||
STOR("v12", "v13", "v14", "v15")
|
||||
|
||||
// Suppress clang's warning that the literal string in the asm statement is
|
||||
// overlong (longer than the ISO-mandated minimum size of 4095 bytes for C99
|
||||
// compilers). It may be true, but the goal here is not C99 portability.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
||||
|
||||
static inline void
|
||||
enc_loop_neon64 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
size_t rounds = *slen / 48;
|
||||
|
||||
if (rounds == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
*slen -= rounds * 48; // 48 bytes consumed per round.
|
||||
*olen += rounds * 64; // 64 bytes produced per round.
|
||||
|
||||
// Number of times to go through the 8x loop.
|
||||
size_t loops = rounds / 8;
|
||||
|
||||
// Number of rounds remaining after the 8x loop.
|
||||
rounds %= 8;
|
||||
|
||||
// Temporary registers, used as scratch space.
|
||||
uint8x16_t tmp0, tmp1, tmp2, tmp3;
|
||||
|
||||
__asm__ volatile (
|
||||
|
||||
// Load the encoding table into v8..v11.
|
||||
" ld1 {v8.16b-v11.16b}, [%[tbl]] \n\t"
|
||||
|
||||
// If there are eight rounds or more, enter an 8x unrolled loop
|
||||
// of interleaved encoding rounds. The rounds interleave memory
|
||||
// operations (load/store) with data operations to maximize
|
||||
// pipeline throughput.
|
||||
" cbz %[loops], 4f \n\t"
|
||||
|
||||
// The SIMD instructions do not touch the flags.
|
||||
"88: subs %[loops], %[loops], #1 \n\t"
|
||||
" " ROUND_A_FIRST()
|
||||
" " ROUND_B()
|
||||
" " ROUND_A()
|
||||
" " ROUND_B()
|
||||
" " ROUND_A()
|
||||
" " ROUND_B()
|
||||
" " ROUND_A()
|
||||
" " ROUND_B_LAST()
|
||||
" b.ne 88b \n\t"
|
||||
|
||||
// Enter a 4x unrolled loop for rounds of 4 or more.
|
||||
"4: cmp %[rounds], #4 \n\t"
|
||||
" b.lt 30f \n\t"
|
||||
" " ROUND_A_FIRST()
|
||||
" " ROUND_B()
|
||||
" " ROUND_A()
|
||||
" " ROUND_B_LAST()
|
||||
" sub %[rounds], %[rounds], #4 \n\t"
|
||||
|
||||
// Dispatch the remaining rounds 0..3.
|
||||
"30: cbz %[rounds], 0f \n\t"
|
||||
" cmp %[rounds], #2 \n\t"
|
||||
" b.eq 2f \n\t"
|
||||
" b.lt 1f \n\t"
|
||||
|
||||
// Block of non-interlaced encoding rounds, which can each
|
||||
// individually be jumped to. Rounds fall through to the next.
|
||||
"3: " ROUND()
|
||||
"2: " ROUND()
|
||||
"1: " ROUND()
|
||||
"0: \n\t"
|
||||
|
||||
// Outputs (modified).
|
||||
: [loops] "+r" (loops),
|
||||
[src] "+r" (*s),
|
||||
[dst] "+r" (*o),
|
||||
[t0] "=&w" (tmp0),
|
||||
[t1] "=&w" (tmp1),
|
||||
[t2] "=&w" (tmp2),
|
||||
[t3] "=&w" (tmp3)
|
||||
|
||||
// Inputs (not modified).
|
||||
: [rounds] "r" (rounds),
|
||||
[tbl] "r" (base64_table_enc_6bit),
|
||||
[n63] "w" (vdupq_n_u8(63))
|
||||
|
||||
// Clobbers.
|
||||
: "v2", "v3", "v4", "v5",
|
||||
"v8", "v9", "v10", "v11",
|
||||
"v12", "v13", "v14", "v15",
|
||||
"cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
static BASE64_FORCE_INLINE uint8x16x4_t
|
||||
enc_reshuffle (const uint8x16x3_t in)
|
||||
{
|
||||
uint8x16x4_t out;
|
||||
|
||||
// Input:
|
||||
// in[0] = a7 a6 a5 a4 a3 a2 a1 a0
|
||||
// in[1] = b7 b6 b5 b4 b3 b2 b1 b0
|
||||
// in[2] = c7 c6 c5 c4 c3 c2 c1 c0
|
||||
|
||||
// Output:
|
||||
// out[0] = 00 00 a7 a6 a5 a4 a3 a2
|
||||
// out[1] = 00 00 a1 a0 b7 b6 b5 b4
|
||||
// out[2] = 00 00 b3 b2 b1 b0 c7 c6
|
||||
// out[3] = 00 00 c5 c4 c3 c2 c1 c0
|
||||
|
||||
// Move the input bits to where they need to be in the outputs. Except
|
||||
// for the first output, the high two bits are not cleared.
|
||||
out.val[0] = vshrq_n_u8(in.val[0], 2);
|
||||
out.val[1] = vshrq_n_u8(in.val[1], 4);
|
||||
out.val[2] = vshrq_n_u8(in.val[2], 6);
|
||||
out.val[1] = vsliq_n_u8(out.val[1], in.val[0], 4);
|
||||
out.val[2] = vsliq_n_u8(out.val[2], in.val[1], 2);
|
||||
|
||||
// Clear the high two bits in the second, third and fourth output.
|
||||
out.val[1] = vandq_u8(out.val[1], vdupq_n_u8(0x3F));
|
||||
out.val[2] = vandq_u8(out.val[2], vdupq_n_u8(0x3F));
|
||||
out.val[3] = vandq_u8(in.val[2], vdupq_n_u8(0x3F));
|
||||
|
||||
return out;
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_SSE41
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute push (__attribute__((target("sse4.1"))), apply_to=function)
|
||||
#else
|
||||
#pragma GCC target("sse4.1")
|
||||
#endif
|
||||
#include <smmintrin.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
|
||||
#ifndef BASE64_SSE41_USE_ASM
|
||||
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
|
||||
# define BASE64_SSE41_USE_ASM 1
|
||||
# else
|
||||
# define BASE64_SSE41_USE_ASM 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include "../ssse3/dec_reshuffle.c"
|
||||
#include "../ssse3/dec_loop.c"
|
||||
|
||||
#if BASE64_SSE41_USE_ASM
|
||||
# include "../ssse3/enc_loop_asm.c"
|
||||
#else
|
||||
# include "../ssse3/enc_translate.c"
|
||||
# include "../ssse3/enc_reshuffle.c"
|
||||
# include "../ssse3/enc_loop.c"
|
||||
#endif
|
||||
|
||||
#endif // HAVE_SSE41
|
||||
|
||||
void
|
||||
base64_stream_encode_sse41 BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_SSE41
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_sse41 BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_SSE41
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute pop
|
||||
#endif
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_SSE42
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute push (__attribute__((target("sse4.2"))), apply_to=function)
|
||||
#else
|
||||
#pragma GCC target("sse4.2")
|
||||
#endif
|
||||
#include <nmmintrin.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
|
||||
#ifndef BASE64_SSE42_USE_ASM
|
||||
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
|
||||
# define BASE64_SSE42_USE_ASM 1
|
||||
# else
|
||||
# define BASE64_SSE42_USE_ASM 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include "../ssse3/dec_reshuffle.c"
|
||||
#include "../ssse3/dec_loop.c"
|
||||
|
||||
#if BASE64_SSE42_USE_ASM
|
||||
# include "../ssse3/enc_loop_asm.c"
|
||||
#else
|
||||
# include "../ssse3/enc_translate.c"
|
||||
# include "../ssse3/enc_reshuffle.c"
|
||||
# include "../ssse3/enc_loop.c"
|
||||
#endif
|
||||
|
||||
#endif // HAVE_SSE42
|
||||
|
||||
void
|
||||
base64_stream_encode_sse42 BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_SSE42
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_sse42 BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_SSE42
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute pop
|
||||
#endif
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "../../tables/tables.h"
|
||||
#include "../../codecs.h"
|
||||
#include "config.h"
|
||||
#include "../../env.h"
|
||||
|
||||
#if HAVE_SSSE3
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute push (__attribute__((target("ssse3"))), apply_to=function)
|
||||
#else
|
||||
#pragma GCC target("ssse3")
|
||||
#endif
|
||||
#include <tmmintrin.h>
|
||||
|
||||
// Only enable inline assembly on supported compilers and on 64-bit CPUs.
|
||||
// 32-bit CPUs with SSSE3 support, such as low-end Atoms, only have eight XMM
|
||||
// registers, which is not enough to run the inline assembly.
|
||||
#ifndef BASE64_SSSE3_USE_ASM
|
||||
# if (defined(__GNUC__) || defined(__clang__)) && BASE64_WORDSIZE == 64
|
||||
# define BASE64_SSSE3_USE_ASM 1
|
||||
# else
|
||||
# define BASE64_SSSE3_USE_ASM 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include "dec_reshuffle.c"
|
||||
#include "dec_loop.c"
|
||||
|
||||
#if BASE64_SSSE3_USE_ASM
|
||||
# include "enc_loop_asm.c"
|
||||
#else
|
||||
# include "enc_reshuffle.c"
|
||||
# include "enc_translate.c"
|
||||
# include "enc_loop.c"
|
||||
#endif
|
||||
|
||||
#endif // HAVE_SSSE3
|
||||
|
||||
void
|
||||
base64_stream_encode_ssse3 BASE64_ENC_PARAMS
|
||||
{
|
||||
#if HAVE_SSSE3
|
||||
#include "../generic/enc_head.c"
|
||||
enc_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/enc_tail.c"
|
||||
#else
|
||||
base64_enc_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode_ssse3 BASE64_DEC_PARAMS
|
||||
{
|
||||
#if HAVE_SSSE3
|
||||
#include "../generic/dec_head.c"
|
||||
dec_loop_ssse3(&s, &slen, &o, &olen);
|
||||
#include "../generic/dec_tail.c"
|
||||
#if defined(__clang__)
|
||||
#pragma clang attribute pop
|
||||
#endif
|
||||
#else
|
||||
return base64_dec_stub(state, src, srclen, out, outlen);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
// The input consists of six character sets in the Base64 alphabet, which we
|
||||
// need to map back to the 6-bit values they represent. There are three ranges,
|
||||
// two singles, and then there's the rest.
|
||||
//
|
||||
// # From To Add Characters
|
||||
// 1 [43] [62] +19 +
|
||||
// 2 [47] [63] +16 /
|
||||
// 3 [48..57] [52..61] +4 0..9
|
||||
// 4 [65..90] [0..25] -65 A..Z
|
||||
// 5 [97..122] [26..51] -71 a..z
|
||||
// (6) Everything else => invalid input
|
||||
//
|
||||
// We will use lookup tables for character validation and offset computation.
|
||||
// Remember that 0x2X and 0x0X are the same index for _mm_shuffle_epi8, this
|
||||
// allows to mask with 0x2F instead of 0x0F and thus save one constant
|
||||
// declaration (register and/or memory access).
|
||||
//
|
||||
// For offsets:
|
||||
// Perfect hash for lut = ((src >> 4) & 0x2F) + ((src == 0x2F) ? 0xFF : 0x00)
|
||||
// 0000 = garbage
|
||||
// 0001 = /
|
||||
// 0010 = +
|
||||
// 0011 = 0-9
|
||||
// 0100 = A-Z
|
||||
// 0101 = A-Z
|
||||
// 0110 = a-z
|
||||
// 0111 = a-z
|
||||
// 1000 >= garbage
|
||||
//
|
||||
// For validation, here's the table.
|
||||
// A character is valid if and only if the AND of the 2 lookups equals 0:
|
||||
//
|
||||
// hi \ lo 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111
|
||||
// LUT 0x15 0x11 0x11 0x11 0x11 0x11 0x11 0x11 0x11 0x11 0x13 0x1A 0x1B 0x1B 0x1B 0x1A
|
||||
//
|
||||
// 0000 0x10 char NUL SOH STX ETX EOT ENQ ACK BEL BS HT LF VT FF CR SO SI
|
||||
// andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
//
|
||||
// 0001 0x10 char DLE DC1 DC2 DC3 DC4 NAK SYN ETB CAN EM SUB ESC FS GS RS US
|
||||
// andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
//
|
||||
// 0010 0x01 char ! " # $ % & ' ( ) * + , - . /
|
||||
// andlut 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x01 0x00 0x01 0x01 0x01 0x00
|
||||
//
|
||||
// 0011 0x02 char 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
|
||||
// andlut 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x02 0x02 0x02 0x02 0x02 0x02
|
||||
//
|
||||
// 0100 0x04 char @ A B C D E F G H I J K L M N O
|
||||
// andlut 0x04 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
|
||||
//
|
||||
// 0101 0x08 char P Q R S T U V W X Y Z [ \ ] ^ _
|
||||
// andlut 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x08 0x08 0x08 0x08 0x08
|
||||
//
|
||||
// 0110 0x04 char ` a b c d e f g h i j k l m n o
|
||||
// andlut 0x04 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
|
||||
// 0111 0x08 char p q r s t u v w x y z { | } ~
|
||||
// andlut 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x08 0x08 0x08 0x08 0x08
|
||||
//
|
||||
// 1000 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1001 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1010 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1011 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1100 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1101 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1110 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
// 1111 0x10 andlut 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10 0x10
|
||||
|
||||
static BASE64_FORCE_INLINE int
|
||||
dec_loop_ssse3_inner (const uint8_t **s, uint8_t **o, size_t *rounds)
|
||||
{
|
||||
const __m128i lut_lo = _mm_setr_epi8(
|
||||
0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
|
||||
0x11, 0x11, 0x13, 0x1A, 0x1B, 0x1B, 0x1B, 0x1A);
|
||||
|
||||
const __m128i lut_hi = _mm_setr_epi8(
|
||||
0x10, 0x10, 0x01, 0x02, 0x04, 0x08, 0x04, 0x08,
|
||||
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10);
|
||||
|
||||
const __m128i lut_roll = _mm_setr_epi8(
|
||||
0, 16, 19, 4, -65, -65, -71, -71,
|
||||
0, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
const __m128i mask_2F = _mm_set1_epi8(0x2F);
|
||||
|
||||
// Load input:
|
||||
__m128i str = _mm_loadu_si128((__m128i *) *s);
|
||||
|
||||
// Table lookups:
|
||||
const __m128i hi_nibbles = _mm_and_si128(_mm_srli_epi32(str, 4), mask_2F);
|
||||
const __m128i lo_nibbles = _mm_and_si128(str, mask_2F);
|
||||
const __m128i hi = _mm_shuffle_epi8(lut_hi, hi_nibbles);
|
||||
const __m128i lo = _mm_shuffle_epi8(lut_lo, lo_nibbles);
|
||||
|
||||
// Check for invalid input: if any "and" values from lo and hi are not
|
||||
// zero, fall back on bytewise code to do error checking and reporting:
|
||||
if (_mm_movemask_epi8(_mm_cmpgt_epi8(_mm_and_si128(lo, hi), _mm_setzero_si128())) != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const __m128i eq_2F = _mm_cmpeq_epi8(str, mask_2F);
|
||||
const __m128i roll = _mm_shuffle_epi8(lut_roll, _mm_add_epi8(eq_2F, hi_nibbles));
|
||||
|
||||
// Now simply add the delta values to the input:
|
||||
str = _mm_add_epi8(str, roll);
|
||||
|
||||
// Reshuffle the input to packed 12-byte output format:
|
||||
str = dec_reshuffle(str);
|
||||
|
||||
// Store the output:
|
||||
_mm_storeu_si128((__m128i *) *o, str);
|
||||
|
||||
*s += 16;
|
||||
*o += 12;
|
||||
*rounds -= 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dec_loop_ssse3 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 24) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 16 bytes per round. Because 4 extra zero bytes are
|
||||
// written after the output, ensure that there will be at least 8 bytes
|
||||
// of input data left to cover the gap. (6 data bytes and up to two
|
||||
// end-of-string markers.)
|
||||
size_t rounds = (*slen - 8) / 16;
|
||||
|
||||
*slen -= rounds * 16; // 16 bytes consumed per round
|
||||
*olen += rounds * 12; // 12 bytes produced per round
|
||||
|
||||
do {
|
||||
if (rounds >= 8) {
|
||||
if (dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
if (dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
if (dec_loop_ssse3_inner(s, o, &rounds) &&
|
||||
dec_loop_ssse3_inner(s, o, &rounds)) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
dec_loop_ssse3_inner(s, o, &rounds);
|
||||
break;
|
||||
|
||||
} while (rounds > 0);
|
||||
|
||||
// Adjust for any rounds that were skipped:
|
||||
*slen += rounds * 16;
|
||||
*olen -= rounds * 12;
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
static BASE64_FORCE_INLINE __m128i
|
||||
dec_reshuffle (const __m128i in)
|
||||
{
|
||||
// in, bits, upper case are most significant bits, lower case are least significant bits
|
||||
// 00llllll 00kkkkLL 00jjKKKK 00JJJJJJ
|
||||
// 00iiiiii 00hhhhII 00ggHHHH 00GGGGGG
|
||||
// 00ffffff 00eeeeFF 00ddEEEE 00DDDDDD
|
||||
// 00cccccc 00bbbbCC 00aaBBBB 00AAAAAA
|
||||
|
||||
const __m128i merge_ab_and_bc = _mm_maddubs_epi16(in, _mm_set1_epi32(0x01400140));
|
||||
// 0000kkkk LLllllll 0000JJJJ JJjjKKKK
|
||||
// 0000hhhh IIiiiiii 0000GGGG GGggHHHH
|
||||
// 0000eeee FFffffff 0000DDDD DDddEEEE
|
||||
// 0000bbbb CCcccccc 0000AAAA AAaaBBBB
|
||||
|
||||
const __m128i out = _mm_madd_epi16(merge_ab_and_bc, _mm_set1_epi32(0x00011000));
|
||||
// 00000000 JJJJJJjj KKKKkkkk LLllllll
|
||||
// 00000000 GGGGGGgg HHHHhhhh IIiiiiii
|
||||
// 00000000 DDDDDDdd EEEEeeee FFffffff
|
||||
// 00000000 AAAAAAaa BBBBbbbb CCcccccc
|
||||
|
||||
// Pack bytes together:
|
||||
return _mm_shuffle_epi8(out, _mm_setr_epi8(
|
||||
2, 1, 0,
|
||||
6, 5, 4,
|
||||
10, 9, 8,
|
||||
14, 13, 12,
|
||||
-1, -1, -1, -1));
|
||||
// 00000000 00000000 00000000 00000000
|
||||
// LLllllll KKKKkkkk JJJJJJjj IIiiiiii
|
||||
// HHHHhhhh GGGGGGgg FFffffff EEEEeeee
|
||||
// DDDDDDdd CCcccccc BBBBbbbb AAAAAAaa
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
static BASE64_FORCE_INLINE void
|
||||
enc_loop_ssse3_inner (const uint8_t **s, uint8_t **o)
|
||||
{
|
||||
// Load input:
|
||||
__m128i str = _mm_loadu_si128((__m128i *) *s);
|
||||
|
||||
// Reshuffle:
|
||||
str = enc_reshuffle(str);
|
||||
|
||||
// Translate reshuffled bytes to the Base64 alphabet:
|
||||
str = enc_translate(str);
|
||||
|
||||
// Store:
|
||||
_mm_storeu_si128((__m128i *) *o, str);
|
||||
|
||||
*s += 12;
|
||||
*o += 16;
|
||||
}
|
||||
|
||||
static inline void
|
||||
enc_loop_ssse3 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
if (*slen < 16) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 12 bytes at a time. Because blocks are loaded 16
|
||||
// bytes at a time, ensure that there will be at least 4 remaining
|
||||
// bytes after the last round, so that the final read will not pass
|
||||
// beyond the bounds of the input buffer:
|
||||
size_t rounds = (*slen - 4) / 12;
|
||||
|
||||
*slen -= rounds * 12; // 12 bytes consumed per round
|
||||
*olen += rounds * 16; // 16 bytes produced per round
|
||||
|
||||
do {
|
||||
if (rounds >= 8) {
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
rounds -= 8;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 4) {
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
rounds -= 4;
|
||||
continue;
|
||||
}
|
||||
if (rounds >= 2) {
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
rounds -= 2;
|
||||
continue;
|
||||
}
|
||||
enc_loop_ssse3_inner(s, o);
|
||||
break;
|
||||
|
||||
} while (rounds > 0);
|
||||
}
|
||||
|
|
@ -0,0 +1,268 @@
|
|||
// Apologies in advance for combining the preprocessor with inline assembly,
|
||||
// two notoriously gnarly parts of C, but it was necessary to avoid a lot of
|
||||
// code repetition. The preprocessor is used to template large sections of
|
||||
// inline assembly that differ only in the registers used. If the code was
|
||||
// written out by hand, it would become very large and hard to audit.
|
||||
|
||||
// Generate a block of inline assembly that loads register R0 from memory. The
|
||||
// offset at which the register is loaded is set by the given round.
|
||||
#define LOAD(R0, ROUND) \
|
||||
"lddqu ("#ROUND" * 12)(%[src]), %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that deinterleaves and shuffles register
|
||||
// R0 using preloaded constants. Outputs in R0 and R1.
|
||||
#define SHUF(R0, R1) \
|
||||
"pshufb %[lut0], %["R0"] \n\t" \
|
||||
"movdqa %["R0"], %["R1"] \n\t" \
|
||||
"pand %[msk0], %["R0"] \n\t" \
|
||||
"pand %[msk2], %["R1"] \n\t" \
|
||||
"pmulhuw %[msk1], %["R0"] \n\t" \
|
||||
"pmullw %[msk3], %["R1"] \n\t" \
|
||||
"por %["R1"], %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that takes R0 and R1 and translates
|
||||
// their contents to the base64 alphabet, using preloaded constants.
|
||||
#define TRAN(R0, R1, R2) \
|
||||
"movdqa %["R0"], %["R1"] \n\t" \
|
||||
"movdqa %["R0"], %["R2"] \n\t" \
|
||||
"psubusb %[n51], %["R1"] \n\t" \
|
||||
"pcmpgtb %[n25], %["R2"] \n\t" \
|
||||
"psubb %["R2"], %["R1"] \n\t" \
|
||||
"movdqa %[lut1], %["R2"] \n\t" \
|
||||
"pshufb %["R1"], %["R2"] \n\t" \
|
||||
"paddb %["R2"], %["R0"] \n\t"
|
||||
|
||||
// Generate a block of inline assembly that stores the given register R0 at an
|
||||
// offset set by the given round.
|
||||
#define STOR(R0, ROUND) \
|
||||
"movdqu %["R0"], ("#ROUND" * 16)(%[dst]) \n\t"
|
||||
|
||||
// Generate a block of inline assembly that generates a single self-contained
|
||||
// encoder round: fetch the data, process it, and store the result. Then update
|
||||
// the source and destination pointers.
|
||||
#define ROUND() \
|
||||
LOAD("a", 0) \
|
||||
SHUF("a", "b") \
|
||||
TRAN("a", "b", "c") \
|
||||
STOR("a", 0) \
|
||||
"add $12, %[src] \n\t" \
|
||||
"add $16, %[dst] \n\t"
|
||||
|
||||
// Define a macro that initiates a three-way interleaved encoding round by
|
||||
// preloading registers a, b and c from memory.
|
||||
// The register graph shows which registers are in use during each step, and
|
||||
// is a visual aid for choosing registers for that step. Symbol index:
|
||||
//
|
||||
// + indicates that a register is loaded by that step.
|
||||
// | indicates that a register is in use and must not be touched.
|
||||
// - indicates that a register is decommissioned by that step.
|
||||
// x indicates that a register is used as a temporary by that step.
|
||||
// V indicates that a register is an input or output to the macro.
|
||||
//
|
||||
#define ROUND_3_INIT() /* a b c d e f */ \
|
||||
LOAD("a", 0) /* + */ \
|
||||
SHUF("a", "d") /* | + */ \
|
||||
LOAD("b", 1) /* | + | */ \
|
||||
TRAN("a", "d", "e") /* | | - x */ \
|
||||
LOAD("c", 2) /* V V V */
|
||||
|
||||
// Define a macro that translates, shuffles and stores the input registers A, B
|
||||
// and C, and preloads registers D, E and F for the next round.
|
||||
// This macro can be arbitrarily daisy-chained by feeding output registers D, E
|
||||
// and F back into the next round as input registers A, B and C. The macro
|
||||
// carefully interleaves memory operations with data operations for optimal
|
||||
// pipelined performance.
|
||||
|
||||
#define ROUND_3(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
|
||||
LOAD(D, (ROUND + 3)) /* V V V + */ \
|
||||
SHUF(B, E) /* | | | | + */ \
|
||||
STOR(A, (ROUND + 0)) /* - | | | | */ \
|
||||
TRAN(B, E, F) /* | | | - x */ \
|
||||
LOAD(E, (ROUND + 4)) /* | | | + */ \
|
||||
SHUF(C, A) /* + | | | | */ \
|
||||
STOR(B, (ROUND + 1)) /* | - | | | */ \
|
||||
TRAN(C, A, F) /* - | | | x */ \
|
||||
LOAD(F, (ROUND + 5)) /* | | | + */ \
|
||||
SHUF(D, A) /* + | | | | */ \
|
||||
STOR(C, (ROUND + 2)) /* | - | | | */ \
|
||||
TRAN(D, A, B) /* - x V V V */
|
||||
|
||||
// Define a macro that terminates a ROUND_3 macro by taking pre-loaded
|
||||
// registers D, E and F, and translating, shuffling and storing them.
|
||||
#define ROUND_3_END(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
|
||||
SHUF(E, A) /* + V V V */ \
|
||||
STOR(D, (ROUND + 3)) /* | - | | */ \
|
||||
TRAN(E, A, B) /* - x | | */ \
|
||||
SHUF(F, C) /* + | | */ \
|
||||
STOR(E, (ROUND + 4)) /* | - | */ \
|
||||
TRAN(F, C, D) /* - x | */ \
|
||||
STOR(F, (ROUND + 5)) /* - */
|
||||
|
||||
// Define a type A round. Inputs are a, b, and c, outputs are d, e, and f.
|
||||
#define ROUND_3_A(ROUND) \
|
||||
ROUND_3(ROUND, "a", "b", "c", "d", "e", "f")
|
||||
|
||||
// Define a type B round. Inputs and outputs are swapped with regard to type A.
|
||||
#define ROUND_3_B(ROUND) \
|
||||
ROUND_3(ROUND, "d", "e", "f", "a", "b", "c")
|
||||
|
||||
// Terminating macro for a type A round.
|
||||
#define ROUND_3_A_LAST(ROUND) \
|
||||
ROUND_3_A(ROUND) \
|
||||
ROUND_3_END(ROUND, "a", "b", "c", "d", "e", "f")
|
||||
|
||||
// Terminating macro for a type B round.
|
||||
#define ROUND_3_B_LAST(ROUND) \
|
||||
ROUND_3_B(ROUND) \
|
||||
ROUND_3_END(ROUND, "d", "e", "f", "a", "b", "c")
|
||||
|
||||
// Suppress clang's warning that the literal string in the asm statement is
|
||||
// overlong (longer than the ISO-mandated minimum size of 4095 bytes for C99
|
||||
// compilers). It may be true, but the goal here is not C99 portability.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
||||
|
||||
static inline void
|
||||
enc_loop_ssse3 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
|
||||
{
|
||||
// For a clearer explanation of the algorithm used by this function,
|
||||
// please refer to the plain (not inline assembly) implementation. This
|
||||
// function follows the same basic logic.
|
||||
|
||||
if (*slen < 16) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process blocks of 12 bytes at a time. Input is read in blocks of 16
|
||||
// bytes, so "reserve" four bytes from the input buffer to ensure that
|
||||
// we never read beyond the end of the input buffer.
|
||||
size_t rounds = (*slen - 4) / 12;
|
||||
|
||||
*slen -= rounds * 12; // 12 bytes consumed per round
|
||||
*olen += rounds * 16; // 16 bytes produced per round
|
||||
|
||||
// Number of times to go through the 36x loop.
|
||||
size_t loops = rounds / 36;
|
||||
|
||||
// Number of rounds remaining after the 36x loop.
|
||||
rounds %= 36;
|
||||
|
||||
// Lookup tables.
|
||||
const __m128i lut0 = _mm_set_epi8(
|
||||
10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1);
|
||||
|
||||
const __m128i lut1 = _mm_setr_epi8(
|
||||
65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0);
|
||||
|
||||
// Temporary registers.
|
||||
__m128i a, b, c, d, e, f;
|
||||
|
||||
__asm__ volatile (
|
||||
|
||||
// If there are 36 rounds or more, enter a 36x unrolled loop of
|
||||
// interleaved encoding rounds. The rounds interleave memory
|
||||
// operations (load/store) with data operations (table lookups,
|
||||
// etc) to maximize pipeline throughput.
|
||||
" test %[loops], %[loops] \n\t"
|
||||
" jz 18f \n\t"
|
||||
" jmp 36f \n\t"
|
||||
" \n\t"
|
||||
".balign 64 \n\t"
|
||||
"36: " ROUND_3_INIT()
|
||||
" " ROUND_3_A( 0)
|
||||
" " ROUND_3_B( 3)
|
||||
" " ROUND_3_A( 6)
|
||||
" " ROUND_3_B( 9)
|
||||
" " ROUND_3_A(12)
|
||||
" " ROUND_3_B(15)
|
||||
" " ROUND_3_A(18)
|
||||
" " ROUND_3_B(21)
|
||||
" " ROUND_3_A(24)
|
||||
" " ROUND_3_B(27)
|
||||
" " ROUND_3_A_LAST(30)
|
||||
" add $(12 * 36), %[src] \n\t"
|
||||
" add $(16 * 36), %[dst] \n\t"
|
||||
" dec %[loops] \n\t"
|
||||
" jnz 36b \n\t"
|
||||
|
||||
// Enter an 18x unrolled loop for rounds of 18 or more.
|
||||
"18: cmp $18, %[rounds] \n\t"
|
||||
" jl 9f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A(0)
|
||||
" " ROUND_3_B(3)
|
||||
" " ROUND_3_A(6)
|
||||
" " ROUND_3_B(9)
|
||||
" " ROUND_3_A_LAST(12)
|
||||
" sub $18, %[rounds] \n\t"
|
||||
" add $(12 * 18), %[src] \n\t"
|
||||
" add $(16 * 18), %[dst] \n\t"
|
||||
|
||||
// Enter a 9x unrolled loop for rounds of 9 or more.
|
||||
"9: cmp $9, %[rounds] \n\t"
|
||||
" jl 6f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A(0)
|
||||
" " ROUND_3_B_LAST(3)
|
||||
" sub $9, %[rounds] \n\t"
|
||||
" add $(12 * 9), %[src] \n\t"
|
||||
" add $(16 * 9), %[dst] \n\t"
|
||||
|
||||
// Enter a 6x unrolled loop for rounds of 6 or more.
|
||||
"6: cmp $6, %[rounds] \n\t"
|
||||
" jl 55f \n\t"
|
||||
" " ROUND_3_INIT()
|
||||
" " ROUND_3_A_LAST(0)
|
||||
" sub $6, %[rounds] \n\t"
|
||||
" add $(12 * 6), %[src] \n\t"
|
||||
" add $(16 * 6), %[dst] \n\t"
|
||||
|
||||
// Dispatch the remaining rounds 0..5.
|
||||
"55: cmp $3, %[rounds] \n\t"
|
||||
" jg 45f \n\t"
|
||||
" je 3f \n\t"
|
||||
" cmp $1, %[rounds] \n\t"
|
||||
" jg 2f \n\t"
|
||||
" je 1f \n\t"
|
||||
" jmp 0f \n\t"
|
||||
|
||||
"45: cmp $4, %[rounds] \n\t"
|
||||
" je 4f \n\t"
|
||||
|
||||
// Block of non-interlaced encoding rounds, which can each
|
||||
// individually be jumped to. Rounds fall through to the next.
|
||||
"5: " ROUND()
|
||||
"4: " ROUND()
|
||||
"3: " ROUND()
|
||||
"2: " ROUND()
|
||||
"1: " ROUND()
|
||||
"0: \n\t"
|
||||
|
||||
// Outputs (modified).
|
||||
: [rounds] "+r" (rounds),
|
||||
[loops] "+r" (loops),
|
||||
[src] "+r" (*s),
|
||||
[dst] "+r" (*o),
|
||||
[a] "=&x" (a),
|
||||
[b] "=&x" (b),
|
||||
[c] "=&x" (c),
|
||||
[d] "=&x" (d),
|
||||
[e] "=&x" (e),
|
||||
[f] "=&x" (f)
|
||||
|
||||
// Inputs (not modified).
|
||||
: [lut0] "x" (lut0),
|
||||
[lut1] "x" (lut1),
|
||||
[msk0] "x" (_mm_set1_epi32(0x0FC0FC00)),
|
||||
[msk1] "x" (_mm_set1_epi32(0x04000040)),
|
||||
[msk2] "x" (_mm_set1_epi32(0x003F03F0)),
|
||||
[msk3] "x" (_mm_set1_epi32(0x01000010)),
|
||||
[n51] "x" (_mm_set1_epi8(51)),
|
||||
[n25] "x" (_mm_set1_epi8(25))
|
||||
|
||||
// Clobbers.
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
static BASE64_FORCE_INLINE __m128i
|
||||
enc_reshuffle (__m128i in)
|
||||
{
|
||||
// Input, bytes MSB to LSB:
|
||||
// 0 0 0 0 l k j i h g f e d c b a
|
||||
|
||||
in = _mm_shuffle_epi8(in, _mm_set_epi8(
|
||||
10, 11, 9, 10,
|
||||
7, 8, 6, 7,
|
||||
4, 5, 3, 4,
|
||||
1, 2, 0, 1));
|
||||
// in, bytes MSB to LSB:
|
||||
// k l j k
|
||||
// h i g h
|
||||
// e f d e
|
||||
// b c a b
|
||||
|
||||
const __m128i t0 = _mm_and_si128(in, _mm_set1_epi32(0x0FC0FC00));
|
||||
// bits, upper case are most significant bits, lower case are least significant bits
|
||||
// 0000kkkk LL000000 JJJJJJ00 00000000
|
||||
// 0000hhhh II000000 GGGGGG00 00000000
|
||||
// 0000eeee FF000000 DDDDDD00 00000000
|
||||
// 0000bbbb CC000000 AAAAAA00 00000000
|
||||
|
||||
const __m128i t1 = _mm_mulhi_epu16(t0, _mm_set1_epi32(0x04000040));
|
||||
// 00000000 00kkkkLL 00000000 00JJJJJJ
|
||||
// 00000000 00hhhhII 00000000 00GGGGGG
|
||||
// 00000000 00eeeeFF 00000000 00DDDDDD
|
||||
// 00000000 00bbbbCC 00000000 00AAAAAA
|
||||
|
||||
const __m128i t2 = _mm_and_si128(in, _mm_set1_epi32(0x003F03F0));
|
||||
// 00000000 00llllll 000000jj KKKK0000
|
||||
// 00000000 00iiiiii 000000gg HHHH0000
|
||||
// 00000000 00ffffff 000000dd EEEE0000
|
||||
// 00000000 00cccccc 000000aa BBBB0000
|
||||
|
||||
const __m128i t3 = _mm_mullo_epi16(t2, _mm_set1_epi32(0x01000010));
|
||||
// 00llllll 00000000 00jjKKKK 00000000
|
||||
// 00iiiiii 00000000 00ggHHHH 00000000
|
||||
// 00ffffff 00000000 00ddEEEE 00000000
|
||||
// 00cccccc 00000000 00aaBBBB 00000000
|
||||
|
||||
return _mm_or_si128(t1, t3);
|
||||
// 00llllll 00kkkkLL 00jjKKKK 00JJJJJJ
|
||||
// 00iiiiii 00hhhhII 00ggHHHH 00GGGGGG
|
||||
// 00ffffff 00eeeeFF 00ddEEEE 00DDDDDD
|
||||
// 00cccccc 00bbbbCC 00aaBBBB 00AAAAAA
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
static BASE64_FORCE_INLINE __m128i
|
||||
enc_translate (const __m128i in)
|
||||
{
|
||||
// A lookup table containing the absolute offsets for all ranges:
|
||||
const __m128i lut = _mm_setr_epi8(
|
||||
65, 71, -4, -4,
|
||||
-4, -4, -4, -4,
|
||||
-4, -4, -4, -4,
|
||||
-19, -16, 0, 0
|
||||
);
|
||||
|
||||
// Translate values 0..63 to the Base64 alphabet. There are five sets:
|
||||
// # From To Abs Index Characters
|
||||
// 0 [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
// 1 [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz
|
||||
// 2 [52..61] [48..57] -4 [2..11] 0123456789
|
||||
// 3 [62] [43] -19 12 +
|
||||
// 4 [63] [47] -16 13 /
|
||||
|
||||
// Create LUT indices from the input. The index for range #0 is right,
|
||||
// others are 1 less than expected:
|
||||
__m128i indices = _mm_subs_epu8(in, _mm_set1_epi8(51));
|
||||
|
||||
// mask is 0xFF (-1) for range #[1..4] and 0x00 for range #0:
|
||||
__m128i mask = _mm_cmpgt_epi8(in, _mm_set1_epi8(25));
|
||||
|
||||
// Subtract -1, so add 1 to indices for range #[1..4]. All indices are
|
||||
// now correct:
|
||||
indices = _mm_sub_epi8(indices, mask);
|
||||
|
||||
// Add offsets to input values:
|
||||
return _mm_add_epi8(in, _mm_shuffle_epi8(lut, indices));
|
||||
}
|
||||
|
|
@ -0,0 +1,314 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "codecs.h"
|
||||
#include "config.h"
|
||||
#include "env.h"
|
||||
|
||||
#if (__x86_64__ || __i386__ || _M_X86 || _M_X64)
|
||||
#define BASE64_X86
|
||||
#if (HAVE_SSSE3 || HAVE_SSE41 || HAVE_SSE42 || HAVE_AVX || HAVE_AVX2 || HAVE_AVX512)
|
||||
#define BASE64_X86_SIMD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef BASE64_X86
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#define __cpuid_count(__level, __count, __eax, __ebx, __ecx, __edx) \
|
||||
{ \
|
||||
int info[4]; \
|
||||
__cpuidex(info, __level, __count); \
|
||||
__eax = info[0]; \
|
||||
__ebx = info[1]; \
|
||||
__ecx = info[2]; \
|
||||
__edx = info[3]; \
|
||||
}
|
||||
#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \
|
||||
__cpuid_count(__level, 0, __eax, __ebx, __ecx, __edx)
|
||||
#else
|
||||
#include <cpuid.h>
|
||||
#if HAVE_AVX512 || HAVE_AVX2 || HAVE_AVX
|
||||
#if ((__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 2) || (__clang_major__ >= 3))
|
||||
static inline uint64_t _xgetbv (uint32_t index)
|
||||
{
|
||||
uint32_t eax, edx;
|
||||
__asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(index));
|
||||
return ((uint64_t)edx << 32) | eax;
|
||||
}
|
||||
#else
|
||||
#error "Platform not supported"
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef bit_AVX512vl
|
||||
#define bit_AVX512vl (1 << 31)
|
||||
#endif
|
||||
#ifndef bit_AVX512vbmi
|
||||
#define bit_AVX512vbmi (1 << 1)
|
||||
#endif
|
||||
#ifndef bit_AVX2
|
||||
#define bit_AVX2 (1 << 5)
|
||||
#endif
|
||||
#ifndef bit_SSSE3
|
||||
#define bit_SSSE3 (1 << 9)
|
||||
#endif
|
||||
#ifndef bit_SSE41
|
||||
#define bit_SSE41 (1 << 19)
|
||||
#endif
|
||||
#ifndef bit_SSE42
|
||||
#define bit_SSE42 (1 << 20)
|
||||
#endif
|
||||
#ifndef bit_AVX
|
||||
#define bit_AVX (1 << 28)
|
||||
#endif
|
||||
|
||||
#define bit_XSAVE_XRSTORE (1 << 27)
|
||||
|
||||
#ifndef _XCR_XFEATURE_ENABLED_MASK
|
||||
#define _XCR_XFEATURE_ENABLED_MASK 0
|
||||
#endif
|
||||
|
||||
#define bit_XMM (1 << 1)
|
||||
#define bit_YMM (1 << 2)
|
||||
#define bit_OPMASK (1 << 5)
|
||||
#define bit_ZMM (1 << 6)
|
||||
#define bit_HIGH_ZMM (1 << 7)
|
||||
|
||||
#define _XCR_XMM_AND_YMM_STATE_ENABLED_BY_OS (bit_XMM | bit_YMM)
|
||||
|
||||
#define _AVX_512_ENABLED_BY_OS (bit_XMM | bit_YMM | bit_OPMASK | bit_ZMM | bit_HIGH_ZMM)
|
||||
|
||||
#endif
|
||||
|
||||
// Function declarations:
|
||||
#define BASE64_CODEC_FUNCS(arch) \
|
||||
extern void base64_stream_encode_ ## arch BASE64_ENC_PARAMS; \
|
||||
extern int base64_stream_decode_ ## arch BASE64_DEC_PARAMS;
|
||||
|
||||
BASE64_CODEC_FUNCS(avx512)
|
||||
BASE64_CODEC_FUNCS(avx2)
|
||||
BASE64_CODEC_FUNCS(neon32)
|
||||
BASE64_CODEC_FUNCS(neon64)
|
||||
BASE64_CODEC_FUNCS(plain)
|
||||
BASE64_CODEC_FUNCS(ssse3)
|
||||
BASE64_CODEC_FUNCS(sse41)
|
||||
BASE64_CODEC_FUNCS(sse42)
|
||||
BASE64_CODEC_FUNCS(avx)
|
||||
|
||||
static bool
|
||||
codec_choose_forced (struct codec *codec, int flags)
|
||||
{
|
||||
// If the user wants to use a certain codec,
|
||||
// always allow it, even if the codec is a no-op.
|
||||
// For testing purposes.
|
||||
|
||||
if (!(flags & 0xFFFF)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (flags & BASE64_FORCE_AVX2) {
|
||||
codec->enc = base64_stream_encode_avx2;
|
||||
codec->dec = base64_stream_decode_avx2;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_NEON32) {
|
||||
codec->enc = base64_stream_encode_neon32;
|
||||
codec->dec = base64_stream_decode_neon32;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_NEON64) {
|
||||
codec->enc = base64_stream_encode_neon64;
|
||||
codec->dec = base64_stream_decode_neon64;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_PLAIN) {
|
||||
codec->enc = base64_stream_encode_plain;
|
||||
codec->dec = base64_stream_decode_plain;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_SSSE3) {
|
||||
codec->enc = base64_stream_encode_ssse3;
|
||||
codec->dec = base64_stream_decode_ssse3;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_SSE41) {
|
||||
codec->enc = base64_stream_encode_sse41;
|
||||
codec->dec = base64_stream_decode_sse41;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_SSE42) {
|
||||
codec->enc = base64_stream_encode_sse42;
|
||||
codec->dec = base64_stream_decode_sse42;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_AVX) {
|
||||
codec->enc = base64_stream_encode_avx;
|
||||
codec->dec = base64_stream_decode_avx;
|
||||
return true;
|
||||
}
|
||||
if (flags & BASE64_FORCE_AVX512) {
|
||||
codec->enc = base64_stream_encode_avx512;
|
||||
codec->dec = base64_stream_decode_avx512;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
codec_choose_arm (struct codec *codec)
|
||||
{
|
||||
#if HAVE_NEON64 || ((defined(__ARM_NEON__) || defined(__ARM_NEON)) && HAVE_NEON32)
|
||||
|
||||
// Unfortunately there is no portable way to check for NEON
|
||||
// support at runtime from userland in the same way that x86
|
||||
// has cpuid, so just stick to the compile-time configuration:
|
||||
|
||||
#if HAVE_NEON64
|
||||
codec->enc = base64_stream_encode_neon64;
|
||||
codec->dec = base64_stream_decode_neon64;
|
||||
#else
|
||||
codec->enc = base64_stream_encode_neon32;
|
||||
codec->dec = base64_stream_decode_neon32;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
|
||||
#else
|
||||
(void)codec;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool
|
||||
codec_choose_x86 (struct codec *codec)
|
||||
{
|
||||
#ifdef BASE64_X86_SIMD
|
||||
|
||||
unsigned int eax, ebx = 0, ecx = 0, edx;
|
||||
unsigned int max_level;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
int info[4];
|
||||
__cpuidex(info, 0, 0);
|
||||
max_level = info[0];
|
||||
#else
|
||||
max_level = __get_cpuid_max(0, NULL);
|
||||
#endif
|
||||
|
||||
#if HAVE_AVX512 || HAVE_AVX2 || HAVE_AVX
|
||||
// Check for AVX/AVX2/AVX512 support:
|
||||
// Checking for AVX requires 3 things:
|
||||
// 1) CPUID indicates that the OS uses XSAVE and XRSTORE instructions
|
||||
// (allowing saving YMM registers on context switch)
|
||||
// 2) CPUID indicates support for AVX
|
||||
// 3) XGETBV indicates the AVX registers will be saved and restored on
|
||||
// context switch
|
||||
//
|
||||
// Note that XGETBV is only available on 686 or later CPUs, so the
|
||||
// instruction needs to be conditionally run.
|
||||
if (max_level >= 1) {
|
||||
__cpuid_count(1, 0, eax, ebx, ecx, edx);
|
||||
if (ecx & bit_XSAVE_XRSTORE) {
|
||||
uint64_t xcr_mask;
|
||||
xcr_mask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
|
||||
if ((xcr_mask & _XCR_XMM_AND_YMM_STATE_ENABLED_BY_OS) == _XCR_XMM_AND_YMM_STATE_ENABLED_BY_OS) { // check multiple bits at once
|
||||
#if HAVE_AVX512
|
||||
if (max_level >= 7 && ((xcr_mask & _AVX_512_ENABLED_BY_OS) == _AVX_512_ENABLED_BY_OS)) {
|
||||
__cpuid_count(7, 0, eax, ebx, ecx, edx);
|
||||
if ((ebx & bit_AVX512vl) && (ecx & bit_AVX512vbmi)) {
|
||||
codec->enc = base64_stream_encode_avx512;
|
||||
codec->dec = base64_stream_decode_avx512;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if HAVE_AVX2
|
||||
if (max_level >= 7) {
|
||||
__cpuid_count(7, 0, eax, ebx, ecx, edx);
|
||||
if (ebx & bit_AVX2) {
|
||||
codec->enc = base64_stream_encode_avx2;
|
||||
codec->dec = base64_stream_decode_avx2;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if HAVE_AVX
|
||||
__cpuid_count(1, 0, eax, ebx, ecx, edx);
|
||||
if (ecx & bit_AVX) {
|
||||
codec->enc = base64_stream_encode_avx;
|
||||
codec->dec = base64_stream_decode_avx;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if HAVE_SSE42
|
||||
// Check for SSE42 support:
|
||||
if (max_level >= 1) {
|
||||
__cpuid(1, eax, ebx, ecx, edx);
|
||||
if (ecx & bit_SSE42) {
|
||||
codec->enc = base64_stream_encode_sse42;
|
||||
codec->dec = base64_stream_decode_sse42;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if HAVE_SSE41
|
||||
// Check for SSE41 support:
|
||||
if (max_level >= 1) {
|
||||
__cpuid(1, eax, ebx, ecx, edx);
|
||||
if (ecx & bit_SSE41) {
|
||||
codec->enc = base64_stream_encode_sse41;
|
||||
codec->dec = base64_stream_decode_sse41;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if HAVE_SSSE3
|
||||
// Check for SSSE3 support:
|
||||
if (max_level >= 1) {
|
||||
__cpuid(1, eax, ebx, ecx, edx);
|
||||
if (ecx & bit_SSSE3) {
|
||||
codec->enc = base64_stream_encode_ssse3;
|
||||
codec->dec = base64_stream_decode_ssse3;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
(void)codec;
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
codec_choose (struct codec *codec, int flags)
|
||||
{
|
||||
// User forced a codec:
|
||||
if (codec_choose_forced(codec, flags)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Runtime feature detection:
|
||||
if (codec_choose_arm(codec)) {
|
||||
return;
|
||||
}
|
||||
if (codec_choose_x86(codec)) {
|
||||
return;
|
||||
}
|
||||
codec->enc = base64_stream_encode_plain;
|
||||
codec->dec = base64_stream_decode_plain;
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
#include "libbase64.h"
|
||||
|
||||
// Function parameters for encoding functions:
|
||||
#define BASE64_ENC_PARAMS \
|
||||
( struct base64_state *state \
|
||||
, const char *src \
|
||||
, size_t srclen \
|
||||
, char *out \
|
||||
, size_t *outlen \
|
||||
)
|
||||
|
||||
// Function parameters for decoding functions:
|
||||
#define BASE64_DEC_PARAMS \
|
||||
( struct base64_state *state \
|
||||
, const char *src \
|
||||
, size_t srclen \
|
||||
, char *out \
|
||||
, size_t *outlen \
|
||||
)
|
||||
|
||||
// This function is used as a stub when a certain encoder is not compiled in.
|
||||
// It discards the inputs and returns zero output bytes.
|
||||
static inline void
|
||||
base64_enc_stub BASE64_ENC_PARAMS
|
||||
{
|
||||
(void) state;
|
||||
(void) src;
|
||||
(void) srclen;
|
||||
(void) out;
|
||||
|
||||
*outlen = 0;
|
||||
}
|
||||
|
||||
// This function is used as a stub when a certain decoder is not compiled in.
|
||||
// It discards the inputs and returns an invalid decoding result.
|
||||
static inline int
|
||||
base64_dec_stub BASE64_DEC_PARAMS
|
||||
{
|
||||
(void) state;
|
||||
(void) src;
|
||||
(void) srclen;
|
||||
(void) out;
|
||||
(void) outlen;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
typedef void (* base64_enc_fn) BASE64_ENC_PARAMS;
|
||||
typedef int (* base64_dec_fn) BASE64_DEC_PARAMS;
|
||||
|
||||
struct codec
|
||||
{
|
||||
base64_enc_fn enc;
|
||||
base64_dec_fn dec;
|
||||
};
|
||||
|
||||
extern void codec_choose (struct codec *, int flags);
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
#ifndef BASE64_CONFIG_H
|
||||
#define BASE64_CONFIG_H
|
||||
|
||||
#if !defined(__APPLE__) && ((defined(__x86_64__) && defined(__LP64__)) || defined(_M_X64))
|
||||
#define HAVE_SSSE3 1
|
||||
#define HAVE_SSE41 1
|
||||
#define HAVE_SSE42 1
|
||||
#define HAVE_AVX 1
|
||||
#define HAVE_AVX2 1
|
||||
#define HAVE_AVX512 0
|
||||
#elif (defined(__APPLE__) && defined(__aarch64__))
|
||||
#define HAVE_NEON64 1
|
||||
#elif (defined(__wasm__) && defined(__wasm_simd128__))
|
||||
#include "emscripten/version.h"
|
||||
#if __EMSCRIPTEN_major__ == 3
|
||||
#define HAVE_NEON32 1
|
||||
#elif __EMSCRIPTEN_major__ > 3
|
||||
#define HAVE_NEON64 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // BASE64_CONFIG_H
|
||||
84
venv/lib/python3.11/site-packages/mypyc/lib-rt/base64/env.h
Normal file
84
venv/lib/python3.11/site-packages/mypyc/lib-rt/base64/env.h
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
#ifndef BASE64_ENV_H
|
||||
#define BASE64_ENV_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// This header file contains macro definitions that describe certain aspects of
|
||||
// the compile-time environment. Compatibility and portability macros go here.
|
||||
|
||||
// Define machine endianness. This is for GCC:
|
||||
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
# define BASE64_LITTLE_ENDIAN 1
|
||||
#else
|
||||
# define BASE64_LITTLE_ENDIAN 0
|
||||
#endif
|
||||
|
||||
// This is for Clang:
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
# define BASE64_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
|
||||
#ifdef __BIG_ENDIAN__
|
||||
# define BASE64_LITTLE_ENDIAN 0
|
||||
#endif
|
||||
|
||||
// MSVC++ needs intrin.h for _byteswap_uint64 (issue #68):
|
||||
#if BASE64_LITTLE_ENDIAN && defined(_MSC_VER)
|
||||
# include <intrin.h>
|
||||
#endif
|
||||
|
||||
// Endian conversion functions:
|
||||
#if BASE64_LITTLE_ENDIAN
|
||||
# ifdef _MSC_VER
|
||||
// Microsoft Visual C++:
|
||||
# define BASE64_HTOBE32(x) _byteswap_ulong(x)
|
||||
# define BASE64_HTOBE64(x) _byteswap_uint64(x)
|
||||
# else
|
||||
// GCC and Clang:
|
||||
# define BASE64_HTOBE32(x) __builtin_bswap32(x)
|
||||
# define BASE64_HTOBE64(x) __builtin_bswap64(x)
|
||||
# endif
|
||||
#else
|
||||
// No conversion needed:
|
||||
# define BASE64_HTOBE32(x) (x)
|
||||
# define BASE64_HTOBE64(x) (x)
|
||||
#endif
|
||||
|
||||
// Detect word size:
|
||||
#if defined (__x86_64__)
|
||||
// This also works for the x32 ABI, which has a 64-bit word size.
|
||||
# define BASE64_WORDSIZE 64
|
||||
#elif SIZE_MAX == UINT32_MAX
|
||||
# define BASE64_WORDSIZE 32
|
||||
#elif SIZE_MAX == UINT64_MAX
|
||||
# define BASE64_WORDSIZE 64
|
||||
#else
|
||||
# error BASE64_WORDSIZE_NOT_DEFINED
|
||||
#endif
|
||||
|
||||
// End-of-file definitions.
|
||||
// Almost end-of-file when waiting for the last '=' character:
|
||||
#define BASE64_AEOF 1
|
||||
// End-of-file when stream end has been reached or invalid input provided:
|
||||
#define BASE64_EOF 2
|
||||
|
||||
// GCC 7 defaults to issuing a warning for fallthrough in switch statements,
|
||||
// unless the fallthrough cases are marked with an attribute. As we use
|
||||
// fallthrough deliberately, define an alias for the attribute:
|
||||
#if __GNUC__ >= 7
|
||||
# define BASE64_FALLTHROUGH __attribute__((fallthrough));
|
||||
#else
|
||||
# define BASE64_FALLTHROUGH
|
||||
#endif
|
||||
|
||||
// Declare macros to ensure that functions that are intended to be inlined, are
|
||||
// actually inlined, even when no optimization is applied. A lot of inner loop
|
||||
// code is factored into separate functions for reasons of readability, but
|
||||
// that code should always be inlined (and optimized) in the main loop.
|
||||
#ifdef _MSC_VER
|
||||
# define BASE64_FORCE_INLINE __forceinline
|
||||
#else
|
||||
# define BASE64_FORCE_INLINE inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
#endif // BASE64_ENV_H
|
||||
164
venv/lib/python3.11/site-packages/mypyc/lib-rt/base64/lib.c
Normal file
164
venv/lib/python3.11/site-packages/mypyc/lib-rt/base64/lib.c
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#ifdef _OPENMP
|
||||
#include <omp.h>
|
||||
#endif
|
||||
|
||||
#include "libbase64.h"
|
||||
#include "tables/tables.h"
|
||||
#include "codecs.h"
|
||||
#include "env.h"
|
||||
|
||||
// These static function pointers are initialized once when the library is
|
||||
// first used, and remain in use for the remaining lifetime of the program.
|
||||
// The idea being that CPU features don't change at runtime.
|
||||
static struct codec codec = { NULL, NULL };
|
||||
|
||||
void
|
||||
base64_stream_encode_init (struct base64_state *state, int flags)
|
||||
{
|
||||
// If any of the codec flags are set, redo choice:
|
||||
if (codec.enc == NULL || flags & 0xFF) {
|
||||
codec_choose(&codec, flags);
|
||||
}
|
||||
state->eof = 0;
|
||||
state->bytes = 0;
|
||||
state->carry = 0;
|
||||
state->flags = flags;
|
||||
}
|
||||
|
||||
void
|
||||
base64_stream_encode
|
||||
( struct base64_state *state
|
||||
, const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
)
|
||||
{
|
||||
codec.enc(state, src, srclen, out, outlen);
|
||||
}
|
||||
|
||||
void
|
||||
base64_stream_encode_final
|
||||
( struct base64_state *state
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
)
|
||||
{
|
||||
uint8_t *o = (uint8_t *)out;
|
||||
|
||||
if (state->bytes == 1) {
|
||||
*o++ = base64_table_enc_6bit[state->carry];
|
||||
*o++ = '=';
|
||||
*o++ = '=';
|
||||
*outlen = 3;
|
||||
return;
|
||||
}
|
||||
if (state->bytes == 2) {
|
||||
*o++ = base64_table_enc_6bit[state->carry];
|
||||
*o++ = '=';
|
||||
*outlen = 2;
|
||||
return;
|
||||
}
|
||||
*outlen = 0;
|
||||
}
|
||||
|
||||
void
|
||||
base64_stream_decode_init (struct base64_state *state, int flags)
|
||||
{
|
||||
// If any of the codec flags are set, redo choice:
|
||||
if (codec.dec == NULL || flags & 0xFFFF) {
|
||||
codec_choose(&codec, flags);
|
||||
}
|
||||
state->eof = 0;
|
||||
state->bytes = 0;
|
||||
state->carry = 0;
|
||||
state->flags = flags;
|
||||
}
|
||||
|
||||
int
|
||||
base64_stream_decode
|
||||
( struct base64_state *state
|
||||
, const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
)
|
||||
{
|
||||
return codec.dec(state, src, srclen, out, outlen);
|
||||
}
|
||||
|
||||
#ifdef _OPENMP
|
||||
|
||||
// Due to the overhead of initializing OpenMP and creating a team of
|
||||
// threads, we require the data length to be larger than a threshold:
|
||||
#define OMP_THRESHOLD 20000
|
||||
|
||||
// Conditionally include OpenMP-accelerated codec implementations:
|
||||
#include "lib_openmp.c"
|
||||
#endif
|
||||
|
||||
void
|
||||
base64_encode
|
||||
( const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
, int flags
|
||||
)
|
||||
{
|
||||
size_t s;
|
||||
size_t t;
|
||||
struct base64_state state;
|
||||
|
||||
#ifdef _OPENMP
|
||||
if (srclen >= OMP_THRESHOLD) {
|
||||
base64_encode_openmp(src, srclen, out, outlen, flags);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Init the stream reader:
|
||||
base64_stream_encode_init(&state, flags);
|
||||
|
||||
// Feed the whole string to the stream reader:
|
||||
base64_stream_encode(&state, src, srclen, out, &s);
|
||||
|
||||
// Finalize the stream by writing trailer if any:
|
||||
base64_stream_encode_final(&state, out + s, &t);
|
||||
|
||||
// Final output length is stream length plus tail:
|
||||
*outlen = s + t;
|
||||
}
|
||||
|
||||
int
|
||||
base64_decode
|
||||
( const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
, int flags
|
||||
)
|
||||
{
|
||||
int ret;
|
||||
struct base64_state state;
|
||||
|
||||
#ifdef _OPENMP
|
||||
if (srclen >= OMP_THRESHOLD) {
|
||||
return base64_decode_openmp(src, srclen, out, outlen, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Init the stream reader:
|
||||
base64_stream_decode_init(&state, flags);
|
||||
|
||||
// Feed the whole string to the stream reader:
|
||||
ret = base64_stream_decode(&state, src, srclen, out, outlen);
|
||||
|
||||
// If when decoding a whole block, we're still waiting for input then fail:
|
||||
if (ret && (state.bytes == 0)) {
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
// This code makes some assumptions on the implementation of
|
||||
// base64_stream_encode_init(), base64_stream_encode() and base64_stream_decode().
|
||||
// Basically these assumptions boil down to that when breaking the src into
|
||||
// parts, out parts can be written without side effects.
|
||||
// This is met when:
|
||||
// 1) base64_stream_encode() and base64_stream_decode() don't use globals;
|
||||
// 2) the shared variables src and out are not read or written outside of the
|
||||
// bounds of their parts, i.e. when base64_stream_encode() reads a multiple
|
||||
// of 3 bytes, it must write no more then a multiple of 4 bytes, not even
|
||||
// temporarily;
|
||||
// 3) the state flag can be discarded after base64_stream_encode() and
|
||||
// base64_stream_decode() on the parts.
|
||||
|
||||
static inline void
|
||||
base64_encode_openmp
|
||||
( const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
, int flags
|
||||
)
|
||||
{
|
||||
size_t s;
|
||||
size_t t;
|
||||
size_t sum = 0, len, last_len;
|
||||
struct base64_state state, initial_state;
|
||||
int num_threads, i;
|
||||
|
||||
// Request a number of threads but not necessarily get them:
|
||||
#pragma omp parallel
|
||||
{
|
||||
// Get the number of threads used from one thread only,
|
||||
// as num_threads is a shared var:
|
||||
#pragma omp single
|
||||
{
|
||||
num_threads = omp_get_num_threads();
|
||||
|
||||
// Split the input string into num_threads parts, each
|
||||
// part a multiple of 3 bytes. The remaining bytes will
|
||||
// be done later:
|
||||
len = srclen / (num_threads * 3);
|
||||
len *= 3;
|
||||
last_len = srclen - num_threads * len;
|
||||
|
||||
// Init the stream reader:
|
||||
base64_stream_encode_init(&state, flags);
|
||||
initial_state = state;
|
||||
}
|
||||
|
||||
// Single has an implicit barrier for all threads to wait here
|
||||
// for the above to complete:
|
||||
#pragma omp for firstprivate(state) private(s) reduction(+:sum) schedule(static,1)
|
||||
for (i = 0; i < num_threads; i++)
|
||||
{
|
||||
// Feed each part of the string to the stream reader:
|
||||
base64_stream_encode(&state, src + i * len, len, out + i * len * 4 / 3, &s);
|
||||
sum += s;
|
||||
}
|
||||
}
|
||||
|
||||
// As encoding should never fail and we encode an exact multiple
|
||||
// of 3 bytes, we can discard state:
|
||||
state = initial_state;
|
||||
|
||||
// Encode the remaining bytes:
|
||||
base64_stream_encode(&state, src + num_threads * len, last_len, out + num_threads * len * 4 / 3, &s);
|
||||
|
||||
// Finalize the stream by writing trailer if any:
|
||||
base64_stream_encode_final(&state, out + num_threads * len * 4 / 3 + s, &t);
|
||||
|
||||
// Final output length is stream length plus tail:
|
||||
sum += s + t;
|
||||
*outlen = sum;
|
||||
}
|
||||
|
||||
static inline int
|
||||
base64_decode_openmp
|
||||
( const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
, int flags
|
||||
)
|
||||
{
|
||||
int num_threads, result = 0, i;
|
||||
size_t sum = 0, len, last_len, s;
|
||||
struct base64_state state, initial_state;
|
||||
|
||||
// Request a number of threads but not necessarily get them:
|
||||
#pragma omp parallel
|
||||
{
|
||||
// Get the number of threads used from one thread only,
|
||||
// as num_threads is a shared var:
|
||||
#pragma omp single
|
||||
{
|
||||
num_threads = omp_get_num_threads();
|
||||
|
||||
// Split the input string into num_threads parts, each
|
||||
// part a multiple of 4 bytes. The remaining bytes will
|
||||
// be done later:
|
||||
len = srclen / (num_threads * 4);
|
||||
len *= 4;
|
||||
last_len = srclen - num_threads * len;
|
||||
|
||||
// Init the stream reader:
|
||||
base64_stream_decode_init(&state, flags);
|
||||
|
||||
initial_state = state;
|
||||
}
|
||||
|
||||
// Single has an implicit barrier to wait here for the above to
|
||||
// complete:
|
||||
#pragma omp for firstprivate(state) private(s) reduction(+:sum, result) schedule(static,1)
|
||||
for (i = 0; i < num_threads; i++)
|
||||
{
|
||||
int this_result;
|
||||
|
||||
// Feed each part of the string to the stream reader:
|
||||
this_result = base64_stream_decode(&state, src + i * len, len, out + i * len * 3 / 4, &s);
|
||||
sum += s;
|
||||
result += this_result;
|
||||
}
|
||||
}
|
||||
|
||||
// If `result' equals `-num_threads', then all threads returned -1,
|
||||
// indicating that the requested codec is not available:
|
||||
if (result == -num_threads) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If `result' does not equal `num_threads', then at least one of the
|
||||
// threads hit a decode error:
|
||||
if (result != num_threads) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// So far so good, now decode whatever remains in the buffer. Reuse the
|
||||
// initial state, since we are at a 4-byte boundary:
|
||||
state = initial_state;
|
||||
result = base64_stream_decode(&state, src + num_threads * len, last_len, out + num_threads * len * 3 / 4, &s);
|
||||
sum += s;
|
||||
*outlen = sum;
|
||||
|
||||
// If when decoding a whole block, we're still waiting for input then fail:
|
||||
if (result && (state.bytes == 0)) {
|
||||
return result;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -0,0 +1,146 @@
|
|||
#ifndef LIBBASE64_H
|
||||
#define LIBBASE64_H
|
||||
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
#define BASE64_SYMBOL_IMPORT __declspec(dllimport)
|
||||
#define BASE64_SYMBOL_EXPORT __declspec(dllexport)
|
||||
#define BASE64_SYMBOL_PRIVATE
|
||||
|
||||
#elif __GNUC__ >= 4
|
||||
#define BASE64_SYMBOL_IMPORT __attribute__ ((visibility ("default")))
|
||||
#define BASE64_SYMBOL_EXPORT __attribute__ ((visibility ("default")))
|
||||
#define BASE64_SYMBOL_PRIVATE __attribute__ ((visibility ("hidden")))
|
||||
|
||||
#else
|
||||
#define BASE64_SYMBOL_IMPORT
|
||||
#define BASE64_SYMBOL_EXPORT
|
||||
#define BASE64_SYMBOL_PRIVATE
|
||||
#endif
|
||||
|
||||
#if defined(BASE64_STATIC_DEFINE)
|
||||
#define BASE64_EXPORT
|
||||
#define BASE64_NO_EXPORT
|
||||
|
||||
#else
|
||||
#if defined(BASE64_EXPORTS) // defined if we are building the shared library
|
||||
#define BASE64_EXPORT BASE64_SYMBOL_EXPORT
|
||||
|
||||
#else
|
||||
#define BASE64_EXPORT BASE64_SYMBOL_IMPORT
|
||||
#endif
|
||||
|
||||
#define BASE64_NO_EXPORT BASE64_SYMBOL_PRIVATE
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* These are the flags that can be passed in the `flags` argument. The values
|
||||
* below force the use of a given codec, even if that codec is a no-op in the
|
||||
* current build. Used in testing. Set to 0 for the default behavior, which is
|
||||
* runtime feature detection on x86, a compile-time fixed codec on ARM, and
|
||||
* the plain codec on other platforms: */
|
||||
#define BASE64_FORCE_AVX2 (1 << 0)
|
||||
#define BASE64_FORCE_NEON32 (1 << 1)
|
||||
#define BASE64_FORCE_NEON64 (1 << 2)
|
||||
#define BASE64_FORCE_PLAIN (1 << 3)
|
||||
#define BASE64_FORCE_SSSE3 (1 << 4)
|
||||
#define BASE64_FORCE_SSE41 (1 << 5)
|
||||
#define BASE64_FORCE_SSE42 (1 << 6)
|
||||
#define BASE64_FORCE_AVX (1 << 7)
|
||||
#define BASE64_FORCE_AVX512 (1 << 8)
|
||||
|
||||
struct base64_state {
|
||||
int eof;
|
||||
int bytes;
|
||||
int flags;
|
||||
unsigned char carry;
|
||||
};
|
||||
|
||||
/* Wrapper function to encode a plain string of given length. Output is written
|
||||
* to *out without trailing zero. Output length in bytes is written to *outlen.
|
||||
* The buffer in `out` has been allocated by the caller and is at least 4/3 the
|
||||
* size of the input. See above for `flags`; set to 0 for default operation: */
|
||||
void BASE64_EXPORT base64_encode
|
||||
( const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
, int flags
|
||||
) ;
|
||||
|
||||
/* Call this before calling base64_stream_encode() to init the state. See above
|
||||
* for `flags`; set to 0 for default operation: */
|
||||
void BASE64_EXPORT base64_stream_encode_init
|
||||
( struct base64_state *state
|
||||
, int flags
|
||||
) ;
|
||||
|
||||
/* Encodes the block of data of given length at `src`, into the buffer at
|
||||
* `out`. Caller is responsible for allocating a large enough out-buffer; it
|
||||
* must be at least 4/3 the size of the in-buffer, but take some margin. Places
|
||||
* the number of new bytes written into `outlen` (which is set to zero when the
|
||||
* function starts). Does not zero-terminate or finalize the output. */
|
||||
void BASE64_EXPORT base64_stream_encode
|
||||
( struct base64_state *state
|
||||
, const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
) ;
|
||||
|
||||
/* Finalizes the output begun by previous calls to `base64_stream_encode()`.
|
||||
* Adds the required end-of-stream markers if appropriate. `outlen` is modified
|
||||
* and will contain the number of new bytes written at `out` (which will quite
|
||||
* often be zero). */
|
||||
void BASE64_EXPORT base64_stream_encode_final
|
||||
( struct base64_state *state
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
) ;
|
||||
|
||||
/* Wrapper function to decode a plain string of given length. Output is written
|
||||
* to *out without trailing zero. Output length in bytes is written to *outlen.
|
||||
* The buffer in `out` has been allocated by the caller and is at least 3/4 the
|
||||
* size of the input. See above for `flags`, set to 0 for default operation: */
|
||||
int BASE64_EXPORT base64_decode
|
||||
( const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
, int flags
|
||||
) ;
|
||||
|
||||
/* Call this before calling base64_stream_decode() to init the state. See above
|
||||
* for `flags`; set to 0 for default operation: */
|
||||
void BASE64_EXPORT base64_stream_decode_init
|
||||
( struct base64_state *state
|
||||
, int flags
|
||||
) ;
|
||||
|
||||
/* Decodes the block of data of given length at `src`, into the buffer at
|
||||
* `out`. Caller is responsible for allocating a large enough out-buffer; it
|
||||
* must be at least 3/4 the size of the in-buffer, but take some margin. Places
|
||||
* the number of new bytes written into `outlen` (which is set to zero when the
|
||||
* function starts). Does not zero-terminate the output. Returns 1 if all is
|
||||
* well, and 0 if a decoding error was found, such as an invalid character.
|
||||
* Returns -1 if the chosen codec is not included in the current build. Used by
|
||||
* the test harness to check whether a codec is available for testing. */
|
||||
int BASE64_EXPORT base64_stream_decode
|
||||
( struct base64_state *state
|
||||
, const char *src
|
||||
, size_t srclen
|
||||
, char *out
|
||||
, size_t *outlen
|
||||
) ;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LIBBASE64_H */
|
||||
|
|
@ -0,0 +1,387 @@
|
|||
#define PY_SSIZE_T_CLEAN
|
||||
#include <Python.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define BASE64_EXPORTS
|
||||
|
||||
#include "librt_base64.h"
|
||||
#include "libbase64.h"
|
||||
#include "pythoncapi_compat.h"
|
||||
|
||||
static PyObject *
|
||||
b64decode_handle_invalid_input(
|
||||
PyObject *out_bytes, char *outbuf, size_t max_out, const char *src, size_t srclen, bool freesrc);
|
||||
|
||||
#define BASE64_MAXBIN ((PY_SSIZE_T_MAX - 3) / 2)
|
||||
|
||||
#define STACK_BUFFER_SIZE 1024
|
||||
|
||||
static void
|
||||
convert_encoded_to_urlsafe(char *buf, size_t len) {
|
||||
// The loop is written to enable SIMD optimizations
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
char ch = buf[i];
|
||||
if (ch == '+') {
|
||||
ch = '-';
|
||||
} else if (ch == '/') {
|
||||
ch = '_';
|
||||
}
|
||||
buf[i] = ch;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
convert_urlsafe_to_encoded(const char *src, size_t len, char *buf) {
|
||||
// The loop is written to enable SIMD optimizations
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
char ch = src[i];
|
||||
if (ch == '-') {
|
||||
ch = '+';
|
||||
} else if (ch == '_') {
|
||||
ch = '/';
|
||||
}
|
||||
buf[i] = ch;
|
||||
}
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
b64encode_internal(PyObject *obj, bool urlsafe) {
|
||||
unsigned char *ascii_data;
|
||||
char *bin_data;
|
||||
int leftbits = 0;
|
||||
unsigned char this_ch;
|
||||
unsigned int leftchar = 0;
|
||||
Py_ssize_t bin_len, out_len;
|
||||
PyBytesWriter *writer;
|
||||
int newline = 0; // TODO
|
||||
|
||||
if (!PyBytes_Check(obj)) {
|
||||
PyErr_SetString(PyExc_TypeError, "base64() expects a bytes object");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bin_data = PyBytes_AS_STRING(obj);
|
||||
bin_len = PyBytes_GET_SIZE(obj);
|
||||
assert(bin_len >= 0);
|
||||
|
||||
if (bin_len > BASE64_MAXBIN) {
|
||||
PyErr_SetString(PyExc_ValueError, "Too much data for base64 line");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Py_ssize_t buflen = 4 * bin_len / 3 + 4;
|
||||
char *buf;
|
||||
char stack_buf[STACK_BUFFER_SIZE];
|
||||
if (buflen <= STACK_BUFFER_SIZE) {
|
||||
buf = stack_buf;
|
||||
} else {
|
||||
buf = PyMem_Malloc(buflen);
|
||||
if (buf == NULL) {
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
}
|
||||
size_t actual_len;
|
||||
base64_encode(bin_data, bin_len, buf, &actual_len, 0);
|
||||
|
||||
if (urlsafe) {
|
||||
convert_encoded_to_urlsafe(buf, actual_len);
|
||||
}
|
||||
|
||||
PyObject *res = PyBytes_FromStringAndSize(buf, actual_len);
|
||||
if (buflen > STACK_BUFFER_SIZE)
|
||||
PyMem_Free(buf);
|
||||
return res;
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
b64encode(PyObject *self, PyObject *const *args, size_t nargs) {
|
||||
if (nargs != 1) {
|
||||
PyErr_SetString(PyExc_TypeError, "b64encode() takes exactly one argument");
|
||||
return 0;
|
||||
}
|
||||
return b64encode_internal(args[0], false);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
urlsafe_b64encode(PyObject *self, PyObject *const *args, size_t nargs) {
|
||||
if (nargs != 1) {
|
||||
PyErr_SetString(PyExc_TypeError, "urlsafe_b64encode() takes exactly one argument");
|
||||
return 0;
|
||||
}
|
||||
return b64encode_internal(args[0], true);
|
||||
}
|
||||
|
||||
static inline int
|
||||
is_valid_base64_char(char c, bool allow_padding) {
|
||||
return ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
|
||||
(c >= '0' && c <= '9') || (c == '+') || (c == '/') || (allow_padding && c == '='));
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
b64decode_internal(PyObject *arg, bool urlsafe) {
|
||||
const char *src;
|
||||
Py_ssize_t srclen_ssz;
|
||||
|
||||
// Get input pointer and length
|
||||
if (PyBytes_Check(arg)) {
|
||||
src = PyBytes_AS_STRING(arg);
|
||||
srclen_ssz = PyBytes_GET_SIZE(arg);
|
||||
} else if (PyUnicode_Check(arg)) {
|
||||
if (!PyUnicode_IS_ASCII(arg)) {
|
||||
PyErr_SetString(PyExc_ValueError,
|
||||
"string argument should contain only ASCII characters");
|
||||
return NULL;
|
||||
}
|
||||
src = (const char *)PyUnicode_1BYTE_DATA(arg);
|
||||
srclen_ssz = PyUnicode_GET_LENGTH(arg);
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError,
|
||||
"argument should be a bytes-like object or ASCII string");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Fast-path: empty input
|
||||
if (srclen_ssz == 0) {
|
||||
return PyBytes_FromStringAndSize(NULL, 0);
|
||||
}
|
||||
|
||||
if (urlsafe) {
|
||||
char *new_src = PyMem_Malloc(srclen_ssz + 1);
|
||||
if (new_src == NULL) {
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
convert_urlsafe_to_encoded(src, srclen_ssz, new_src);
|
||||
src = new_src;
|
||||
}
|
||||
|
||||
// Quickly ignore invalid characters at the end. Other invalid characters
|
||||
// are also accepted, but they need a slow path.
|
||||
while (srclen_ssz > 0 && !is_valid_base64_char(src[srclen_ssz - 1], true)) {
|
||||
srclen_ssz--;
|
||||
}
|
||||
|
||||
// Compute an output capacity that's at least 3/4 of input, without overflow:
|
||||
// ceil(3/4 * N) == N - floor(N/4)
|
||||
size_t srclen = (size_t)srclen_ssz;
|
||||
size_t max_out = srclen - (srclen / 4);
|
||||
if (max_out == 0) {
|
||||
max_out = 1; // defensive (srclen > 0 implies >= 1 anyway)
|
||||
}
|
||||
if (max_out > (size_t)PY_SSIZE_T_MAX) {
|
||||
PyErr_SetString(PyExc_OverflowError, "input too large");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Allocate output bytes (uninitialized) of the max capacity
|
||||
PyObject *out_bytes = PyBytes_FromStringAndSize(NULL, (Py_ssize_t)max_out);
|
||||
if (out_bytes == NULL) {
|
||||
if (urlsafe) {
|
||||
PyMem_Free((void *)src);
|
||||
}
|
||||
return NULL; // Propagate memory error
|
||||
}
|
||||
|
||||
char *outbuf = PyBytes_AS_STRING(out_bytes);
|
||||
size_t outlen = max_out;
|
||||
|
||||
int ret = base64_decode(src, srclen, outbuf, &outlen, 0);
|
||||
|
||||
if (ret != 1) {
|
||||
if (ret == 0) {
|
||||
// Slow path: handle non-base64 input
|
||||
return b64decode_handle_invalid_input(out_bytes, outbuf, max_out, src, srclen, urlsafe);
|
||||
}
|
||||
Py_DECREF(out_bytes);
|
||||
if (urlsafe) {
|
||||
PyMem_Free((void *)src);
|
||||
}
|
||||
if (ret == -1) {
|
||||
PyErr_SetString(PyExc_NotImplementedError, "base64 codec not available in this build");
|
||||
} else {
|
||||
PyErr_SetString(PyExc_RuntimeError, "base64_decode failed");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (urlsafe) {
|
||||
PyMem_Free((void *)src);
|
||||
}
|
||||
|
||||
// Sanity-check contract (decoder must not overflow our buffer)
|
||||
if (outlen > max_out) {
|
||||
Py_DECREF(out_bytes);
|
||||
PyErr_SetString(PyExc_RuntimeError, "decoder wrote past output buffer");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Shrink in place to the actual decoded length
|
||||
if (_PyBytes_Resize(&out_bytes, (Py_ssize_t)outlen) < 0) {
|
||||
// _PyBytes_Resize sets an exception and may free the old object
|
||||
return NULL;
|
||||
}
|
||||
return out_bytes;
|
||||
}
|
||||
|
||||
// Process non-base64 input by ignoring non-base64 characters, for compatibility
|
||||
// with stdlib b64decode.
|
||||
static PyObject *
|
||||
b64decode_handle_invalid_input(
|
||||
PyObject *out_bytes, char *outbuf, size_t max_out, const char *src, size_t srclen, bool freesrc)
|
||||
{
|
||||
// Copy input to a temporary buffer, with non-base64 characters and extra suffix
|
||||
// characters removed
|
||||
size_t newbuf_len = 0;
|
||||
char *newbuf = PyMem_Malloc(srclen);
|
||||
if (newbuf == NULL) {
|
||||
Py_DECREF(out_bytes);
|
||||
if (freesrc) {
|
||||
PyMem_Free((void *)src);
|
||||
}
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
|
||||
// Copy base64 characters and some padding to the new buffer
|
||||
for (size_t i = 0; i < srclen; i++) {
|
||||
char c = src[i];
|
||||
if (is_valid_base64_char(c, false)) {
|
||||
newbuf[newbuf_len++] = c;
|
||||
} else if (c == '=') {
|
||||
// Copy a necessary amount of padding
|
||||
int remainder = newbuf_len % 4;
|
||||
if (remainder == 0) {
|
||||
// No padding needed
|
||||
break;
|
||||
}
|
||||
int numpad = 4 - remainder;
|
||||
// Check that there is at least the required amount padding (CPython ignores
|
||||
// extra padding)
|
||||
while (numpad > 0) {
|
||||
if (i == srclen || src[i] != '=') {
|
||||
break;
|
||||
}
|
||||
newbuf[newbuf_len++] = '=';
|
||||
i++;
|
||||
numpad--;
|
||||
// Skip non-base64 alphabet characters within padding
|
||||
while (i < srclen && !is_valid_base64_char(src[i], true)) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Stdlib always performs a non-strict padding check
|
||||
if (newbuf_len % 4 != 0) {
|
||||
if (freesrc) {
|
||||
PyMem_Free((void *)src);
|
||||
}
|
||||
Py_DECREF(out_bytes);
|
||||
PyMem_Free(newbuf);
|
||||
PyErr_SetString(PyExc_ValueError, "Incorrect padding");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t outlen = max_out;
|
||||
int ret = base64_decode(newbuf, newbuf_len, outbuf, &outlen, 0);
|
||||
PyMem_Free(newbuf);
|
||||
if (freesrc) {
|
||||
PyMem_Free((void *)src);
|
||||
}
|
||||
|
||||
if (ret != 1) {
|
||||
Py_DECREF(out_bytes);
|
||||
if (ret == 0) {
|
||||
PyErr_SetString(PyExc_ValueError, "Only base64 data is allowed");
|
||||
}
|
||||
if (ret == -1) {
|
||||
PyErr_SetString(PyExc_NotImplementedError, "base64 codec not available in this build");
|
||||
} else {
|
||||
PyErr_SetString(PyExc_RuntimeError, "base64_decode failed");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Shrink in place to the actual decoded length
|
||||
if (_PyBytes_Resize(&out_bytes, (Py_ssize_t)outlen) < 0) {
|
||||
// _PyBytes_Resize sets an exception and may free the old object
|
||||
return NULL;
|
||||
}
|
||||
return out_bytes;
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
b64decode(PyObject *self, PyObject *const *args, size_t nargs) {
|
||||
if (nargs != 1) {
|
||||
PyErr_SetString(PyExc_TypeError, "b64decode() takes exactly one argument");
|
||||
return 0;
|
||||
}
|
||||
return b64decode_internal(args[0], false);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
urlsafe_b64decode(PyObject *self, PyObject *const *args, size_t nargs) {
|
||||
if (nargs != 1) {
|
||||
PyErr_SetString(PyExc_TypeError, "urlsafe_b64decode() takes exactly one argument");
|
||||
return 0;
|
||||
}
|
||||
return b64decode_internal(args[0], true);
|
||||
}
|
||||
|
||||
static PyMethodDef librt_base64_module_methods[] = {
|
||||
{"b64encode", (PyCFunction)b64encode, METH_FASTCALL, PyDoc_STR("Encode bytes object using Base64.")},
|
||||
{"b64decode", (PyCFunction)b64decode, METH_FASTCALL, PyDoc_STR("Decode a Base64 encoded bytes object or ASCII string.")},
|
||||
{"urlsafe_b64encode", (PyCFunction)urlsafe_b64encode, METH_FASTCALL, PyDoc_STR("Encode bytes object using URL and file system safe Base64 alphabet.")},
|
||||
{"urlsafe_b64decode", (PyCFunction)urlsafe_b64decode, METH_FASTCALL, PyDoc_STR("Decode bytes or ASCII string using URL and file system safe Base64 alphabet.")},
|
||||
{NULL, NULL, 0, NULL}
|
||||
};
|
||||
|
||||
static int
|
||||
base64_abi_version(void) {
|
||||
return LIBRT_BASE64_ABI_VERSION;
|
||||
}
|
||||
|
||||
static int
|
||||
base64_api_version(void) {
|
||||
return LIBRT_BASE64_API_VERSION;
|
||||
}
|
||||
|
||||
static int
|
||||
librt_base64_module_exec(PyObject *m)
|
||||
{
|
||||
// Export mypy internal C API, be careful with the order!
|
||||
static void *base64_api[LIBRT_BASE64_API_LEN] = {
|
||||
(void *)base64_abi_version,
|
||||
(void *)base64_api_version,
|
||||
(void *)b64encode_internal,
|
||||
(void *)b64decode_internal,
|
||||
};
|
||||
PyObject *c_api_object = PyCapsule_New((void *)base64_api, "librt.base64._C_API", NULL);
|
||||
if (PyModule_Add(m, "_C_API", c_api_object) < 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyModuleDef_Slot librt_base64_module_slots[] = {
|
||||
{Py_mod_exec, librt_base64_module_exec},
|
||||
#ifdef Py_MOD_GIL_NOT_USED
|
||||
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
|
||||
#endif
|
||||
{0, NULL}
|
||||
};
|
||||
|
||||
static PyModuleDef librt_base64_module = {
|
||||
.m_base = PyModuleDef_HEAD_INIT,
|
||||
.m_name = "base64",
|
||||
.m_doc = "Fast base64 encoding and decoding optimized for mypyc",
|
||||
.m_size = 0,
|
||||
.m_methods = librt_base64_module_methods,
|
||||
.m_slots = librt_base64_module_slots,
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC
|
||||
PyInit_base64(void)
|
||||
{
|
||||
return PyModuleDef_Init(&librt_base64_module);
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
#ifndef LIBRT_BASE64_H
|
||||
#define LIBRT_BASE64_H
|
||||
|
||||
#include <Python.h>
|
||||
|
||||
#define LIBRT_BASE64_ABI_VERSION 1
|
||||
#define LIBRT_BASE64_API_VERSION 2
|
||||
#define LIBRT_BASE64_API_LEN 4
|
||||
|
||||
static void *LibRTBase64_API[LIBRT_BASE64_API_LEN];
|
||||
|
||||
#define LibRTBase64_ABIVersion (*(int (*)(void)) LibRTBase64_API[0])
|
||||
#define LibRTBase64_APIVersion (*(int (*)(void)) LibRTBase64_API[1])
|
||||
#define LibRTBase64_b64encode_internal (*(PyObject* (*)(PyObject *source, bool urlsafe)) LibRTBase64_API[2])
|
||||
#define LibRTBase64_b64decode_internal (*(PyObject* (*)(PyObject *source, bool urlsafe)) LibRTBase64_API[3])
|
||||
|
||||
static int
|
||||
import_librt_base64(void)
|
||||
{
|
||||
PyObject *mod = PyImport_ImportModule("librt.base64");
|
||||
if (mod == NULL)
|
||||
return -1;
|
||||
Py_DECREF(mod); // we import just for the side effect of making the below work.
|
||||
void *capsule = PyCapsule_Import("librt.base64._C_API", 0);
|
||||
if (capsule == NULL)
|
||||
return -1;
|
||||
memcpy(LibRTBase64_API, capsule, sizeof(LibRTBase64_API));
|
||||
if (LibRTBase64_ABIVersion() != LIBRT_BASE64_ABI_VERSION) {
|
||||
char err[128];
|
||||
snprintf(err, sizeof(err), "ABI version conflict for librt.base64, expected %d, found %d",
|
||||
LIBRT_BASE64_ABI_VERSION,
|
||||
LibRTBase64_ABIVersion()
|
||||
);
|
||||
PyErr_SetString(PyExc_ValueError, err);
|
||||
return -1;
|
||||
}
|
||||
if (LibRTBase64_APIVersion() < LIBRT_BASE64_API_VERSION) {
|
||||
char err[128];
|
||||
snprintf(err, sizeof(err),
|
||||
"API version conflict for librt.base64, expected %d or newer, found %d (hint: upgrade librt)",
|
||||
LIBRT_BASE64_API_VERSION,
|
||||
LibRTBase64_APIVersion()
|
||||
);
|
||||
PyErr_SetString(PyExc_ValueError, err);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // LIBRT_BASE64_H
|
||||
|
|
@ -0,0 +1,393 @@
|
|||
#include <stdint.h>
|
||||
#define CHAR62 '+'
|
||||
#define CHAR63 '/'
|
||||
#define CHARPAD '='
|
||||
|
||||
|
||||
#if BASE64_LITTLE_ENDIAN
|
||||
|
||||
|
||||
/* SPECIAL DECODE TABLES FOR LITTLE ENDIAN (INTEL) CPUS */
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d0[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x000000f8, 0xffffffff, 0xffffffff, 0xffffffff, 0x000000fc,
|
||||
0x000000d0, 0x000000d4, 0x000000d8, 0x000000dc, 0x000000e0, 0x000000e4,
|
||||
0x000000e8, 0x000000ec, 0x000000f0, 0x000000f4, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00000004, 0x00000008, 0x0000000c, 0x00000010, 0x00000014, 0x00000018,
|
||||
0x0000001c, 0x00000020, 0x00000024, 0x00000028, 0x0000002c, 0x00000030,
|
||||
0x00000034, 0x00000038, 0x0000003c, 0x00000040, 0x00000044, 0x00000048,
|
||||
0x0000004c, 0x00000050, 0x00000054, 0x00000058, 0x0000005c, 0x00000060,
|
||||
0x00000064, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x00000068, 0x0000006c, 0x00000070, 0x00000074, 0x00000078,
|
||||
0x0000007c, 0x00000080, 0x00000084, 0x00000088, 0x0000008c, 0x00000090,
|
||||
0x00000094, 0x00000098, 0x0000009c, 0x000000a0, 0x000000a4, 0x000000a8,
|
||||
0x000000ac, 0x000000b0, 0x000000b4, 0x000000b8, 0x000000bc, 0x000000c0,
|
||||
0x000000c4, 0x000000c8, 0x000000cc, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d1[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x0000e003, 0xffffffff, 0xffffffff, 0xffffffff, 0x0000f003,
|
||||
0x00004003, 0x00005003, 0x00006003, 0x00007003, 0x00008003, 0x00009003,
|
||||
0x0000a003, 0x0000b003, 0x0000c003, 0x0000d003, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00001000, 0x00002000, 0x00003000, 0x00004000, 0x00005000, 0x00006000,
|
||||
0x00007000, 0x00008000, 0x00009000, 0x0000a000, 0x0000b000, 0x0000c000,
|
||||
0x0000d000, 0x0000e000, 0x0000f000, 0x00000001, 0x00001001, 0x00002001,
|
||||
0x00003001, 0x00004001, 0x00005001, 0x00006001, 0x00007001, 0x00008001,
|
||||
0x00009001, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x0000a001, 0x0000b001, 0x0000c001, 0x0000d001, 0x0000e001,
|
||||
0x0000f001, 0x00000002, 0x00001002, 0x00002002, 0x00003002, 0x00004002,
|
||||
0x00005002, 0x00006002, 0x00007002, 0x00008002, 0x00009002, 0x0000a002,
|
||||
0x0000b002, 0x0000c002, 0x0000d002, 0x0000e002, 0x0000f002, 0x00000003,
|
||||
0x00001003, 0x00002003, 0x00003003, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d2[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x00800f00, 0xffffffff, 0xffffffff, 0xffffffff, 0x00c00f00,
|
||||
0x00000d00, 0x00400d00, 0x00800d00, 0x00c00d00, 0x00000e00, 0x00400e00,
|
||||
0x00800e00, 0x00c00e00, 0x00000f00, 0x00400f00, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00400000, 0x00800000, 0x00c00000, 0x00000100, 0x00400100, 0x00800100,
|
||||
0x00c00100, 0x00000200, 0x00400200, 0x00800200, 0x00c00200, 0x00000300,
|
||||
0x00400300, 0x00800300, 0x00c00300, 0x00000400, 0x00400400, 0x00800400,
|
||||
0x00c00400, 0x00000500, 0x00400500, 0x00800500, 0x00c00500, 0x00000600,
|
||||
0x00400600, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x00800600, 0x00c00600, 0x00000700, 0x00400700, 0x00800700,
|
||||
0x00c00700, 0x00000800, 0x00400800, 0x00800800, 0x00c00800, 0x00000900,
|
||||
0x00400900, 0x00800900, 0x00c00900, 0x00000a00, 0x00400a00, 0x00800a00,
|
||||
0x00c00a00, 0x00000b00, 0x00400b00, 0x00800b00, 0x00c00b00, 0x00000c00,
|
||||
0x00400c00, 0x00800c00, 0x00c00c00, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d3[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x003e0000, 0xffffffff, 0xffffffff, 0xffffffff, 0x003f0000,
|
||||
0x00340000, 0x00350000, 0x00360000, 0x00370000, 0x00380000, 0x00390000,
|
||||
0x003a0000, 0x003b0000, 0x003c0000, 0x003d0000, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00010000, 0x00020000, 0x00030000, 0x00040000, 0x00050000, 0x00060000,
|
||||
0x00070000, 0x00080000, 0x00090000, 0x000a0000, 0x000b0000, 0x000c0000,
|
||||
0x000d0000, 0x000e0000, 0x000f0000, 0x00100000, 0x00110000, 0x00120000,
|
||||
0x00130000, 0x00140000, 0x00150000, 0x00160000, 0x00170000, 0x00180000,
|
||||
0x00190000, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x001a0000, 0x001b0000, 0x001c0000, 0x001d0000, 0x001e0000,
|
||||
0x001f0000, 0x00200000, 0x00210000, 0x00220000, 0x00230000, 0x00240000,
|
||||
0x00250000, 0x00260000, 0x00270000, 0x00280000, 0x00290000, 0x002a0000,
|
||||
0x002b0000, 0x002c0000, 0x002d0000, 0x002e0000, 0x002f0000, 0x00300000,
|
||||
0x00310000, 0x00320000, 0x00330000, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
#else
|
||||
|
||||
|
||||
/* SPECIAL DECODE TABLES FOR BIG ENDIAN (IBM/MOTOROLA/SUN) CPUS */
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d0[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xf8000000, 0xffffffff, 0xffffffff, 0xffffffff, 0xfc000000,
|
||||
0xd0000000, 0xd4000000, 0xd8000000, 0xdc000000, 0xe0000000, 0xe4000000,
|
||||
0xe8000000, 0xec000000, 0xf0000000, 0xf4000000, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x04000000, 0x08000000, 0x0c000000, 0x10000000, 0x14000000, 0x18000000,
|
||||
0x1c000000, 0x20000000, 0x24000000, 0x28000000, 0x2c000000, 0x30000000,
|
||||
0x34000000, 0x38000000, 0x3c000000, 0x40000000, 0x44000000, 0x48000000,
|
||||
0x4c000000, 0x50000000, 0x54000000, 0x58000000, 0x5c000000, 0x60000000,
|
||||
0x64000000, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x68000000, 0x6c000000, 0x70000000, 0x74000000, 0x78000000,
|
||||
0x7c000000, 0x80000000, 0x84000000, 0x88000000, 0x8c000000, 0x90000000,
|
||||
0x94000000, 0x98000000, 0x9c000000, 0xa0000000, 0xa4000000, 0xa8000000,
|
||||
0xac000000, 0xb0000000, 0xb4000000, 0xb8000000, 0xbc000000, 0xc0000000,
|
||||
0xc4000000, 0xc8000000, 0xcc000000, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d1[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x03e00000, 0xffffffff, 0xffffffff, 0xffffffff, 0x03f00000,
|
||||
0x03400000, 0x03500000, 0x03600000, 0x03700000, 0x03800000, 0x03900000,
|
||||
0x03a00000, 0x03b00000, 0x03c00000, 0x03d00000, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00100000, 0x00200000, 0x00300000, 0x00400000, 0x00500000, 0x00600000,
|
||||
0x00700000, 0x00800000, 0x00900000, 0x00a00000, 0x00b00000, 0x00c00000,
|
||||
0x00d00000, 0x00e00000, 0x00f00000, 0x01000000, 0x01100000, 0x01200000,
|
||||
0x01300000, 0x01400000, 0x01500000, 0x01600000, 0x01700000, 0x01800000,
|
||||
0x01900000, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x01a00000, 0x01b00000, 0x01c00000, 0x01d00000, 0x01e00000,
|
||||
0x01f00000, 0x02000000, 0x02100000, 0x02200000, 0x02300000, 0x02400000,
|
||||
0x02500000, 0x02600000, 0x02700000, 0x02800000, 0x02900000, 0x02a00000,
|
||||
0x02b00000, 0x02c00000, 0x02d00000, 0x02e00000, 0x02f00000, 0x03000000,
|
||||
0x03100000, 0x03200000, 0x03300000, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d2[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x000f8000, 0xffffffff, 0xffffffff, 0xffffffff, 0x000fc000,
|
||||
0x000d0000, 0x000d4000, 0x000d8000, 0x000dc000, 0x000e0000, 0x000e4000,
|
||||
0x000e8000, 0x000ec000, 0x000f0000, 0x000f4000, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00004000, 0x00008000, 0x0000c000, 0x00010000, 0x00014000, 0x00018000,
|
||||
0x0001c000, 0x00020000, 0x00024000, 0x00028000, 0x0002c000, 0x00030000,
|
||||
0x00034000, 0x00038000, 0x0003c000, 0x00040000, 0x00044000, 0x00048000,
|
||||
0x0004c000, 0x00050000, 0x00054000, 0x00058000, 0x0005c000, 0x00060000,
|
||||
0x00064000, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x00068000, 0x0006c000, 0x00070000, 0x00074000, 0x00078000,
|
||||
0x0007c000, 0x00080000, 0x00084000, 0x00088000, 0x0008c000, 0x00090000,
|
||||
0x00094000, 0x00098000, 0x0009c000, 0x000a0000, 0x000a4000, 0x000a8000,
|
||||
0x000ac000, 0x000b0000, 0x000b4000, 0x000b8000, 0x000bc000, 0x000c0000,
|
||||
0x000c4000, 0x000c8000, 0x000cc000, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
const uint32_t base64_table_dec_32bit_d3[256] = {
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x00003e00, 0xffffffff, 0xffffffff, 0xffffffff, 0x00003f00,
|
||||
0x00003400, 0x00003500, 0x00003600, 0x00003700, 0x00003800, 0x00003900,
|
||||
0x00003a00, 0x00003b00, 0x00003c00, 0x00003d00, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000,
|
||||
0x00000100, 0x00000200, 0x00000300, 0x00000400, 0x00000500, 0x00000600,
|
||||
0x00000700, 0x00000800, 0x00000900, 0x00000a00, 0x00000b00, 0x00000c00,
|
||||
0x00000d00, 0x00000e00, 0x00000f00, 0x00001000, 0x00001100, 0x00001200,
|
||||
0x00001300, 0x00001400, 0x00001500, 0x00001600, 0x00001700, 0x00001800,
|
||||
0x00001900, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0x00001a00, 0x00001b00, 0x00001c00, 0x00001d00, 0x00001e00,
|
||||
0x00001f00, 0x00002000, 0x00002100, 0x00002200, 0x00002300, 0x00002400,
|
||||
0x00002500, 0x00002600, 0x00002700, 0x00002800, 0x00002900, 0x00002a00,
|
||||
0x00002b00, 0x00002c00, 0x00002d00, 0x00002e00, 0x00002f00, 0x00003000,
|
||||
0x00003100, 0x00003200, 0x00003300, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,40 @@
|
|||
#include "tables.h"
|
||||
|
||||
const uint8_t
|
||||
base64_table_enc_6bit[] =
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
"abcdefghijklmnopqrstuvwxyz"
|
||||
"0123456789"
|
||||
"+/";
|
||||
|
||||
// In the lookup table below, note that the value for '=' (character 61) is
|
||||
// 254, not 255. This character is used for in-band signaling of the end of
|
||||
// the datastream, and we will use that later. The characters A-Z, a-z, 0-9
|
||||
// and + / are mapped to their "decoded" values. The other bytes all map to
|
||||
// the value 255, which flags them as "invalid input".
|
||||
|
||||
const uint8_t
|
||||
base64_table_dec_8bit[] =
|
||||
{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, // 0..15
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, // 16..31
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63, // 32..47
|
||||
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, 255, 254, 255, 255, // 48..63
|
||||
255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, // 64..79
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // 80..95
|
||||
255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, // 96..111
|
||||
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 255, 255, 255, 255, 255, // 112..127
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, // 128..143
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
};
|
||||
|
||||
#if BASE64_WORDSIZE >= 32
|
||||
# include "table_dec_32bit.h"
|
||||
# include "table_enc_12bit.h"
|
||||
#endif
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
#ifndef BASE64_TABLES_H
|
||||
#define BASE64_TABLES_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "../env.h"
|
||||
|
||||
// These tables are used by all codecs for fallback plain encoding/decoding:
|
||||
extern const uint8_t base64_table_enc_6bit[];
|
||||
extern const uint8_t base64_table_dec_8bit[];
|
||||
|
||||
// These tables are used for the 32-bit and 64-bit generic decoders:
|
||||
#if BASE64_WORDSIZE >= 32
|
||||
extern const uint32_t base64_table_dec_32bit_d0[];
|
||||
extern const uint32_t base64_table_dec_32bit_d1[];
|
||||
extern const uint32_t base64_table_dec_32bit_d2[];
|
||||
extern const uint32_t base64_table_dec_32bit_d3[];
|
||||
|
||||
// This table is used by the 32 and 64-bit generic encoders:
|
||||
extern const uint16_t base64_table_enc_12bit[];
|
||||
#endif
|
||||
|
||||
#endif // BASE64_TABLES_H
|
||||
Loading…
Add table
Add a link
Reference in a new issue