LCOV - code coverage report
Current view: top level - cipher/. - bufhelp.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 40 0.0 %
Date: 2016-11-29 14:56:30 Functions: 0 3 0.0 %

          Line data    Source code
       1             : /* bufhelp.h  -  Some buffer manipulation helpers
       2             :  * Copyright (C) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
       3             :  *
       4             :  * This file is part of Libgcrypt.
       5             :  *
       6             :  * Libgcrypt is free software; you can redistribute it and/or modify
       7             :  * it under the terms of the GNU Lesser General Public License as
       8             :  * published by the Free Software Foundation; either version 2.1 of
       9             :  * the License, or (at your option) any later version.
      10             :  *
      11             :  * Libgcrypt is distributed in the hope that it will be useful,
      12             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :  * GNU Lesser General Public License for more details.
      15             :  *
      16             :  * You should have received a copy of the GNU Lesser General Public
      17             :  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
      18             :  */
      19             : #ifndef GCRYPT_BUFHELP_H
      20             : #define GCRYPT_BUFHELP_H
      21             : 
      22             : 
      23             : #include "bithelp.h"
      24             : 
      25             : 
      26             : #undef BUFHELP_FAST_UNALIGNED_ACCESS
      27             : #if defined(HAVE_GCC_ATTRIBUTE_PACKED) && \
      28             :     defined(HAVE_GCC_ATTRIBUTE_ALIGNED) && \
      29             :     (defined(__i386__) || defined(__x86_64__) || \
      30             :      (defined(__arm__) && defined(__ARM_FEATURE_UNALIGNED)) || \
      31             :      defined(__aarch64__))
      32             : /* These architectures are able of unaligned memory accesses and can
      33             :    handle those fast.
      34             :  */
      35             : # define BUFHELP_FAST_UNALIGNED_ACCESS 1
      36             : #endif
      37             : 
      38             : 
      39             : #ifdef BUFHELP_FAST_UNALIGNED_ACCESS
      40             : /* Define type with one-byte alignment on architectures with fast unaligned
      41             :    memory accesses.
      42             :  */
      43             : typedef struct bufhelp_int_s
      44             : {
      45             :   uintptr_t a;
      46             : } __attribute__((packed, aligned(1))) bufhelp_int_t;
      47             : #else
      48             : /* Define type with default alignment for other architectures (unaligned
      49             :    accessed handled in per byte loops).
      50             :  */
      51             : typedef struct bufhelp_int_s
      52             : {
      53             :   uintptr_t a;
      54             : } bufhelp_int_t;
      55             : #endif
      56             : 
      57             : 
      58             : /* Optimized function for small buffer copying */
      59             : static inline void
      60           0 : buf_cpy(void *_dst, const void *_src, size_t len)
      61             : {
      62             : #if __GNUC__ >= 4 && (defined(__x86_64__) || defined(__i386__))
      63             :   /* For AMD64 and i386, memcpy is faster.  */
      64           0 :   memcpy(_dst, _src, len);
      65             : #else
      66             :   byte *dst = _dst;
      67             :   const byte *src = _src;
      68             :   bufhelp_int_t *ldst;
      69             :   const bufhelp_int_t *lsrc;
      70             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
      71             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
      72             : 
      73             :   /* Skip fast processing if buffers are unaligned.  */
      74             :   if (((uintptr_t)dst | (uintptr_t)src) & longmask)
      75             :     goto do_bytes;
      76             : #endif
      77             : 
      78             :   ldst = (bufhelp_int_t *)(void *)dst;
      79             :   lsrc = (const bufhelp_int_t *)(const void *)src;
      80             : 
      81             :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
      82             :     (ldst++)->a = (lsrc++)->a;
      83             : 
      84             :   dst = (byte *)ldst;
      85             :   src = (const byte *)lsrc;
      86             : 
      87             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
      88             : do_bytes:
      89             : #endif
      90             :   /* Handle tail.  */
      91             :   for (; len; len--)
      92             :     *dst++ = *src++;
      93             : #endif /*__GNUC__ >= 4 && (__x86_64__ || __i386__)*/
      94           0 : }
      95             : 
      96             : 
      97             : /* Optimized function for buffer xoring */
      98             : static inline void
      99           0 : buf_xor(void *_dst, const void *_src1, const void *_src2, size_t len)
     100             : {
     101           0 :   byte *dst = _dst;
     102           0 :   const byte *src1 = _src1;
     103           0 :   const byte *src2 = _src2;
     104             :   bufhelp_int_t *ldst;
     105             :   const bufhelp_int_t *lsrc1, *lsrc2;
     106             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     107             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     108             : 
     109             :   /* Skip fast processing if buffers are unaligned.  */
     110             :   if (((uintptr_t)dst | (uintptr_t)src1 | (uintptr_t)src2) & longmask)
     111             :     goto do_bytes;
     112             : #endif
     113             : 
     114           0 :   ldst = (bufhelp_int_t *)(void *)dst;
     115           0 :   lsrc1 = (const bufhelp_int_t *)(const void *)src1;
     116           0 :   lsrc2 = (const bufhelp_int_t *)(const void *)src2;
     117             : 
     118           0 :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     119           0 :     (ldst++)->a = (lsrc1++)->a ^ (lsrc2++)->a;
     120             : 
     121           0 :   dst = (byte *)ldst;
     122           0 :   src1 = (const byte *)lsrc1;
     123           0 :   src2 = (const byte *)lsrc2;
     124             : 
     125             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     126             : do_bytes:
     127             : #endif
     128             :   /* Handle tail.  */
     129           0 :   for (; len; len--)
     130           0 :     *dst++ = *src1++ ^ *src2++;
     131           0 : }
     132             : 
     133             : 
     134             : /* Optimized function for in-place buffer xoring. */
     135             : static inline void
     136             : buf_xor_1(void *_dst, const void *_src, size_t len)
     137             : {
     138             :   byte *dst = _dst;
     139             :   const byte *src = _src;
     140             :   bufhelp_int_t *ldst;
     141             :   const bufhelp_int_t *lsrc;
     142             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     143             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     144             : 
     145             :   /* Skip fast processing if buffers are unaligned.  */
     146             :   if (((uintptr_t)dst | (uintptr_t)src) & longmask)
     147             :     goto do_bytes;
     148             : #endif
     149             : 
     150             :   ldst = (bufhelp_int_t *)(void *)dst;
     151             :   lsrc = (const bufhelp_int_t *)(const void *)src;
     152             : 
     153             :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     154             :     (ldst++)->a ^= (lsrc++)->a;
     155             : 
     156             :   dst = (byte *)ldst;
     157             :   src = (const byte *)lsrc;
     158             : 
     159             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     160             : do_bytes:
     161             : #endif
     162             :   /* Handle tail.  */
     163             :   for (; len; len--)
     164             :     *dst++ ^= *src++;
     165             : }
     166             : 
     167             : 
     168             : /* Optimized function for buffer xoring with two destination buffers.  Used
     169             :    mainly by CFB mode encryption.  */
     170             : static inline void
     171             : buf_xor_2dst(void *_dst1, void *_dst2, const void *_src, size_t len)
     172             : {
     173             :   byte *dst1 = _dst1;
     174             :   byte *dst2 = _dst2;
     175             :   const byte *src = _src;
     176             :   bufhelp_int_t *ldst1, *ldst2;
     177             :   const bufhelp_int_t *lsrc;
     178             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     179             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     180             : 
     181             :   /* Skip fast processing if buffers are unaligned.  */
     182             :   if (((uintptr_t)src | (uintptr_t)dst1 | (uintptr_t)dst2) & longmask)
     183             :     goto do_bytes;
     184             : #endif
     185             : 
     186             :   ldst1 = (bufhelp_int_t *)(void *)dst1;
     187             :   ldst2 = (bufhelp_int_t *)(void *)dst2;
     188             :   lsrc = (const bufhelp_int_t *)(const void *)src;
     189             : 
     190             :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     191             :     (ldst1++)->a = ((ldst2++)->a ^= (lsrc++)->a);
     192             : 
     193             :   dst1 = (byte *)ldst1;
     194             :   dst2 = (byte *)ldst2;
     195             :   src = (const byte *)lsrc;
     196             : 
     197             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     198             : do_bytes:
     199             : #endif
     200             :   /* Handle tail.  */
     201             :   for (; len; len--)
     202             :     *dst1++ = (*dst2++ ^= *src++);
     203             : }
     204             : 
     205             : 
     206             : /* Optimized function for combined buffer xoring and copying.  Used by mainly
     207             :    CBC mode decryption.  */
     208             : static inline void
     209           0 : buf_xor_n_copy_2(void *_dst_xor, const void *_src_xor, void *_srcdst_cpy,
     210             :                  const void *_src_cpy, size_t len)
     211             : {
     212           0 :   byte *dst_xor = _dst_xor;
     213           0 :   byte *srcdst_cpy = _srcdst_cpy;
     214           0 :   const byte *src_xor = _src_xor;
     215           0 :   const byte *src_cpy = _src_cpy;
     216             :   byte temp;
     217             :   bufhelp_int_t *ldst_xor, *lsrcdst_cpy;
     218             :   const bufhelp_int_t *lsrc_cpy, *lsrc_xor;
     219             :   uintptr_t ltemp;
     220             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     221             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     222             : 
     223             :   /* Skip fast processing if buffers are unaligned.  */
     224             :   if (((uintptr_t)src_cpy | (uintptr_t)src_xor | (uintptr_t)dst_xor |
     225             :        (uintptr_t)srcdst_cpy) & longmask)
     226             :     goto do_bytes;
     227             : #endif
     228             : 
     229           0 :   ldst_xor = (bufhelp_int_t *)(void *)dst_xor;
     230           0 :   lsrc_xor = (const bufhelp_int_t *)(void *)src_xor;
     231           0 :   lsrcdst_cpy = (bufhelp_int_t *)(void *)srcdst_cpy;
     232           0 :   lsrc_cpy = (const bufhelp_int_t *)(const void *)src_cpy;
     233             : 
     234           0 :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     235             :     {
     236           0 :       ltemp = (lsrc_cpy++)->a;
     237           0 :       (ldst_xor++)->a = (lsrcdst_cpy)->a ^ (lsrc_xor++)->a;
     238           0 :       (lsrcdst_cpy++)->a = ltemp;
     239             :     }
     240             : 
     241           0 :   dst_xor = (byte *)ldst_xor;
     242           0 :   src_xor = (const byte *)lsrc_xor;
     243           0 :   srcdst_cpy = (byte *)lsrcdst_cpy;
     244           0 :   src_cpy = (const byte *)lsrc_cpy;
     245             : 
     246             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     247             : do_bytes:
     248             : #endif
     249             :   /* Handle tail.  */
     250           0 :   for (; len; len--)
     251             :     {
     252           0 :       temp = *src_cpy++;
     253           0 :       *dst_xor++ = *srcdst_cpy ^ *src_xor++;
     254           0 :       *srcdst_cpy++ = temp;
     255             :     }
     256           0 : }
     257             : 
     258             : 
     259             : /* Optimized function for combined buffer xoring and copying.  Used by mainly
     260             :    CFB mode decryption.  */
     261             : static inline void
     262             : buf_xor_n_copy(void *_dst_xor, void *_srcdst_cpy, const void *_src, size_t len)
     263             : {
     264             :   buf_xor_n_copy_2(_dst_xor, _src, _srcdst_cpy, _src, len);
     265             : }
     266             : 
     267             : 
     268             : /* Constant-time compare of two buffers.  Returns 1 if buffers are equal,
     269             :    and 0 if buffers differ.  */
     270             : static inline int
     271             : buf_eq_const(const void *_a, const void *_b, size_t len)
     272             : {
     273             :   const byte *a = _a;
     274             :   const byte *b = _b;
     275             :   size_t diff, i;
     276             : 
     277             :   /* Constant-time compare. */
     278             :   for (i = 0, diff = 0; i < len; i++)
     279             :     diff -= !!(a[i] - b[i]);
     280             : 
     281             :   return !diff;
     282             : }
     283             : 
     284             : 
     285             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     286             : 
     287             : /* Functions for loading and storing unaligned u32 values of different
     288             :    endianness.  */
     289             : static inline u32 buf_get_be32(const void *_buf)
     290             : {
     291             :   const byte *in = _buf;
     292             :   return ((u32)in[0] << 24) | ((u32)in[1] << 16) | \
     293             :          ((u32)in[2] << 8) | (u32)in[3];
     294             : }
     295             : 
     296             : static inline u32 buf_get_le32(const void *_buf)
     297             : {
     298             :   const byte *in = _buf;
     299             :   return ((u32)in[3] << 24) | ((u32)in[2] << 16) | \
     300             :          ((u32)in[1] << 8) | (u32)in[0];
     301             : }
     302             : 
     303             : static inline void buf_put_be32(void *_buf, u32 val)
     304             : {
     305             :   byte *out = _buf;
     306             :   out[0] = val >> 24;
     307             :   out[1] = val >> 16;
     308             :   out[2] = val >> 8;
     309             :   out[3] = val;
     310             : }
     311             : 
     312             : static inline void buf_put_le32(void *_buf, u32 val)
     313             : {
     314             :   byte *out = _buf;
     315             :   out[3] = val >> 24;
     316             :   out[2] = val >> 16;
     317             :   out[1] = val >> 8;
     318             :   out[0] = val;
     319             : }
     320             : 
     321             : 
     322             : /* Functions for loading and storing unaligned u64 values of different
     323             :    endianness.  */
     324             : static inline u64 buf_get_be64(const void *_buf)
     325             : {
     326             :   const byte *in = _buf;
     327             :   return ((u64)in[0] << 56) | ((u64)in[1] << 48) | \
     328             :          ((u64)in[2] << 40) | ((u64)in[3] << 32) | \
     329             :          ((u64)in[4] << 24) | ((u64)in[5] << 16) | \
     330             :          ((u64)in[6] << 8) | (u64)in[7];
     331             : }
     332             : 
     333             : static inline u64 buf_get_le64(const void *_buf)
     334             : {
     335             :   const byte *in = _buf;
     336             :   return ((u64)in[7] << 56) | ((u64)in[6] << 48) | \
     337             :          ((u64)in[5] << 40) | ((u64)in[4] << 32) | \
     338             :          ((u64)in[3] << 24) | ((u64)in[2] << 16) | \
     339             :          ((u64)in[1] << 8) | (u64)in[0];
     340             : }
     341             : 
     342             : static inline void buf_put_be64(void *_buf, u64 val)
     343             : {
     344             :   byte *out = _buf;
     345             :   out[0] = val >> 56;
     346             :   out[1] = val >> 48;
     347             :   out[2] = val >> 40;
     348             :   out[3] = val >> 32;
     349             :   out[4] = val >> 24;
     350             :   out[5] = val >> 16;
     351             :   out[6] = val >> 8;
     352             :   out[7] = val;
     353             : }
     354             : 
     355             : static inline void buf_put_le64(void *_buf, u64 val)
     356             : {
     357             :   byte *out = _buf;
     358             :   out[7] = val >> 56;
     359             :   out[6] = val >> 48;
     360             :   out[5] = val >> 40;
     361             :   out[4] = val >> 32;
     362             :   out[3] = val >> 24;
     363             :   out[2] = val >> 16;
     364             :   out[1] = val >> 8;
     365             :   out[0] = val;
     366             : }
     367             : 
     368             : #else /*BUFHELP_FAST_UNALIGNED_ACCESS*/
     369             : 
     370             : typedef struct bufhelp_u32_s
     371             : {
     372             :   u32 a;
     373             : } __attribute__((packed, aligned(1))) bufhelp_u32_t;
     374             : 
     375             : /* Functions for loading and storing unaligned u32 values of different
     376             :    endianness.  */
     377             : static inline u32 buf_get_be32(const void *_buf)
     378             : {
     379             :   return be_bswap32(((const bufhelp_u32_t *)_buf)->a);
     380             : }
     381             : 
     382             : static inline u32 buf_get_le32(const void *_buf)
     383             : {
     384             :   return le_bswap32(((const bufhelp_u32_t *)_buf)->a);
     385             : }
     386             : 
     387             : static inline void buf_put_be32(void *_buf, u32 val)
     388             : {
     389             :   bufhelp_u32_t *out = _buf;
     390             :   out->a = be_bswap32(val);
     391             : }
     392             : 
     393             : static inline void buf_put_le32(void *_buf, u32 val)
     394             : {
     395             :   bufhelp_u32_t *out = _buf;
     396             :   out->a = le_bswap32(val);
     397             : }
     398             : 
     399             : 
     400             : typedef struct bufhelp_u64_s
     401             : {
     402             :   u64 a;
     403             : } __attribute__((packed, aligned(1))) bufhelp_u64_t;
     404             : 
     405             : /* Functions for loading and storing unaligned u64 values of different
     406             :    endianness.  */
     407             : static inline u64 buf_get_be64(const void *_buf)
     408             : {
     409             :   return be_bswap64(((const bufhelp_u64_t *)_buf)->a);
     410             : }
     411             : 
     412             : static inline u64 buf_get_le64(const void *_buf)
     413             : {
     414             :   return le_bswap64(((const bufhelp_u64_t *)_buf)->a);
     415             : }
     416             : 
     417             : static inline void buf_put_be64(void *_buf, u64 val)
     418             : {
     419             :   bufhelp_u64_t *out = _buf;
     420             :   out->a = be_bswap64(val);
     421             : }
     422             : 
     423             : static inline void buf_put_le64(void *_buf, u64 val)
     424             : {
     425             :   bufhelp_u64_t *out = _buf;
     426             :   out->a = le_bswap64(val);
     427             : }
     428             : 
     429             : 
     430             : #endif /*BUFHELP_FAST_UNALIGNED_ACCESS*/
     431             : 
     432             : #endif /*GCRYPT_BUFHELP_H*/

Generated by: LCOV version 1.11