LCOV - code coverage report
Current view: top level - cipher/. - bufhelp.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 40 0.0 %
Date: 2015-11-05 17:08:00 Functions: 0 3 0.0 %

          Line data    Source code
       1             : /* bufhelp.h  -  Some buffer manipulation helpers
       2             :  * Copyright (C) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
       3             :  *
       4             :  * This file is part of Libgcrypt.
       5             :  *
       6             :  * Libgcrypt is free software; you can redistribute it and/or modify
       7             :  * it under the terms of the GNU Lesser General Public License as
       8             :  * published by the Free Software Foundation; either version 2.1 of
       9             :  * the License, or (at your option) any later version.
      10             :  *
      11             :  * Libgcrypt is distributed in the hope that it will be useful,
      12             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :  * GNU Lesser General Public License for more details.
      15             :  *
      16             :  * You should have received a copy of the GNU Lesser General Public
      17             :  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
      18             :  */
      19             : #ifndef GCRYPT_BUFHELP_H
      20             : #define GCRYPT_BUFHELP_H
      21             : 
      22             : 
      23             : #include "bithelp.h"
      24             : 
      25             : 
      26             : #undef BUFHELP_FAST_UNALIGNED_ACCESS
      27             : #if defined(HAVE_GCC_ATTRIBUTE_PACKED) && \
      28             :     defined(HAVE_GCC_ATTRIBUTE_ALIGNED) && \
      29             :     (defined(__i386__) || defined(__x86_64__) || \
      30             :      defined(__powerpc__) || defined(__powerpc64__) || \
      31             :      (defined(__arm__) && defined(__ARM_FEATURE_UNALIGNED)) || \
      32             :      defined(__aarch64__))
      33             : /* These architectures are able of unaligned memory accesses and can
      34             :    handle those fast.
      35             :  */
      36             : # define BUFHELP_FAST_UNALIGNED_ACCESS 1
      37             : #endif
      38             : 
      39             : 
      40             : #ifdef BUFHELP_FAST_UNALIGNED_ACCESS
      41             : /* Define type with one-byte alignment on architectures with fast unaligned
      42             :    memory accesses.
      43             :  */
      44             : typedef struct bufhelp_int_s
      45             : {
      46             :   uintptr_t a;
      47             : } __attribute__((packed, aligned(1))) bufhelp_int_t;
      48             : #else
      49             : /* Define type with default alignment for other architectures (unaligned
      50             :    accessed handled in per byte loops).
      51             :  */
      52             : typedef struct bufhelp_int_s
      53             : {
      54             :   uintptr_t a;
      55             : } bufhelp_int_t;
      56             : #endif
      57             : 
      58             : 
      59             : /* Optimized function for small buffer copying */
      60             : static inline void
      61           0 : buf_cpy(void *_dst, const void *_src, size_t len)
      62             : {
      63             : #if __GNUC__ >= 4 && (defined(__x86_64__) || defined(__i386__))
      64             :   /* For AMD64 and i386, memcpy is faster.  */
      65           0 :   memcpy(_dst, _src, len);
      66             : #else
      67             :   byte *dst = _dst;
      68             :   const byte *src = _src;
      69             :   bufhelp_int_t *ldst;
      70             :   const bufhelp_int_t *lsrc;
      71             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
      72             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
      73             : 
      74             :   /* Skip fast processing if buffers are unaligned.  */
      75             :   if (((uintptr_t)dst | (uintptr_t)src) & longmask)
      76             :     goto do_bytes;
      77             : #endif
      78             : 
      79             :   ldst = (bufhelp_int_t *)(void *)dst;
      80             :   lsrc = (const bufhelp_int_t *)(const void *)src;
      81             : 
      82             :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
      83             :     (ldst++)->a = (lsrc++)->a;
      84             : 
      85             :   dst = (byte *)ldst;
      86             :   src = (const byte *)lsrc;
      87             : 
      88             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
      89             : do_bytes:
      90             : #endif
      91             :   /* Handle tail.  */
      92             :   for (; len; len--)
      93             :     *dst++ = *src++;
      94             : #endif /*__GNUC__ >= 4 && (__x86_64__ || __i386__)*/
      95           0 : }
      96             : 
      97             : 
      98             : /* Optimized function for buffer xoring */
      99             : static inline void
     100           0 : buf_xor(void *_dst, const void *_src1, const void *_src2, size_t len)
     101             : {
     102           0 :   byte *dst = _dst;
     103           0 :   const byte *src1 = _src1;
     104           0 :   const byte *src2 = _src2;
     105             :   bufhelp_int_t *ldst;
     106             :   const bufhelp_int_t *lsrc1, *lsrc2;
     107             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     108             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     109             : 
     110             :   /* Skip fast processing if buffers are unaligned.  */
     111             :   if (((uintptr_t)dst | (uintptr_t)src1 | (uintptr_t)src2) & longmask)
     112             :     goto do_bytes;
     113             : #endif
     114             : 
     115           0 :   ldst = (bufhelp_int_t *)(void *)dst;
     116           0 :   lsrc1 = (const bufhelp_int_t *)(const void *)src1;
     117           0 :   lsrc2 = (const bufhelp_int_t *)(const void *)src2;
     118             : 
     119           0 :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     120           0 :     (ldst++)->a = (lsrc1++)->a ^ (lsrc2++)->a;
     121             : 
     122           0 :   dst = (byte *)ldst;
     123           0 :   src1 = (const byte *)lsrc1;
     124           0 :   src2 = (const byte *)lsrc2;
     125             : 
     126             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     127             : do_bytes:
     128             : #endif
     129             :   /* Handle tail.  */
     130           0 :   for (; len; len--)
     131           0 :     *dst++ = *src1++ ^ *src2++;
     132           0 : }
     133             : 
     134             : 
     135             : /* Optimized function for in-place buffer xoring. */
     136             : static inline void
     137             : buf_xor_1(void *_dst, const void *_src, size_t len)
     138             : {
     139             :   byte *dst = _dst;
     140             :   const byte *src = _src;
     141             :   bufhelp_int_t *ldst;
     142             :   const bufhelp_int_t *lsrc;
     143             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     144             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     145             : 
     146             :   /* Skip fast processing if buffers are unaligned.  */
     147             :   if (((uintptr_t)dst | (uintptr_t)src) & longmask)
     148             :     goto do_bytes;
     149             : #endif
     150             : 
     151             :   ldst = (bufhelp_int_t *)(void *)dst;
     152             :   lsrc = (const bufhelp_int_t *)(const void *)src;
     153             : 
     154             :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     155             :     (ldst++)->a ^= (lsrc++)->a;
     156             : 
     157             :   dst = (byte *)ldst;
     158             :   src = (const byte *)lsrc;
     159             : 
     160             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     161             : do_bytes:
     162             : #endif
     163             :   /* Handle tail.  */
     164             :   for (; len; len--)
     165             :     *dst++ ^= *src++;
     166             : }
     167             : 
     168             : 
     169             : /* Optimized function for buffer xoring with two destination buffers.  Used
     170             :    mainly by CFB mode encryption.  */
     171             : static inline void
     172             : buf_xor_2dst(void *_dst1, void *_dst2, const void *_src, size_t len)
     173             : {
     174             :   byte *dst1 = _dst1;
     175             :   byte *dst2 = _dst2;
     176             :   const byte *src = _src;
     177             :   bufhelp_int_t *ldst1, *ldst2;
     178             :   const bufhelp_int_t *lsrc;
     179             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     180             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     181             : 
     182             :   /* Skip fast processing if buffers are unaligned.  */
     183             :   if (((uintptr_t)src | (uintptr_t)dst1 | (uintptr_t)dst2) & longmask)
     184             :     goto do_bytes;
     185             : #endif
     186             : 
     187             :   ldst1 = (bufhelp_int_t *)(void *)dst1;
     188             :   ldst2 = (bufhelp_int_t *)(void *)dst2;
     189             :   lsrc = (const bufhelp_int_t *)(const void *)src;
     190             : 
     191             :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     192             :     (ldst1++)->a = ((ldst2++)->a ^= (lsrc++)->a);
     193             : 
     194             :   dst1 = (byte *)ldst1;
     195             :   dst2 = (byte *)ldst2;
     196             :   src = (const byte *)lsrc;
     197             : 
     198             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     199             : do_bytes:
     200             : #endif
     201             :   /* Handle tail.  */
     202             :   for (; len; len--)
     203             :     *dst1++ = (*dst2++ ^= *src++);
     204             : }
     205             : 
     206             : 
     207             : /* Optimized function for combined buffer xoring and copying.  Used by mainly
     208             :    CBC mode decryption.  */
     209             : static inline void
     210           0 : buf_xor_n_copy_2(void *_dst_xor, const void *_src_xor, void *_srcdst_cpy,
     211             :                  const void *_src_cpy, size_t len)
     212             : {
     213           0 :   byte *dst_xor = _dst_xor;
     214           0 :   byte *srcdst_cpy = _srcdst_cpy;
     215           0 :   const byte *src_xor = _src_xor;
     216           0 :   const byte *src_cpy = _src_cpy;
     217             :   byte temp;
     218             :   bufhelp_int_t *ldst_xor, *lsrcdst_cpy;
     219             :   const bufhelp_int_t *lsrc_cpy, *lsrc_xor;
     220             :   uintptr_t ltemp;
     221             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     222             :   const unsigned int longmask = sizeof(bufhelp_int_t) - 1;
     223             : 
     224             :   /* Skip fast processing if buffers are unaligned.  */
     225             :   if (((uintptr_t)src_cpy | (uintptr_t)src_xor | (uintptr_t)dst_xor |
     226             :        (uintptr_t)srcdst_cpy) & longmask)
     227             :     goto do_bytes;
     228             : #endif
     229             : 
     230           0 :   ldst_xor = (bufhelp_int_t *)(void *)dst_xor;
     231           0 :   lsrc_xor = (const bufhelp_int_t *)(void *)src_xor;
     232           0 :   lsrcdst_cpy = (bufhelp_int_t *)(void *)srcdst_cpy;
     233           0 :   lsrc_cpy = (const bufhelp_int_t *)(const void *)src_cpy;
     234             : 
     235           0 :   for (; len >= sizeof(bufhelp_int_t); len -= sizeof(bufhelp_int_t))
     236             :     {
     237           0 :       ltemp = (lsrc_cpy++)->a;
     238           0 :       (ldst_xor++)->a = (lsrcdst_cpy)->a ^ (lsrc_xor++)->a;
     239           0 :       (lsrcdst_cpy++)->a = ltemp;
     240             :     }
     241             : 
     242           0 :   dst_xor = (byte *)ldst_xor;
     243           0 :   src_xor = (const byte *)lsrc_xor;
     244           0 :   srcdst_cpy = (byte *)lsrcdst_cpy;
     245           0 :   src_cpy = (const byte *)lsrc_cpy;
     246             : 
     247             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     248             : do_bytes:
     249             : #endif
     250             :   /* Handle tail.  */
     251           0 :   for (; len; len--)
     252             :     {
     253           0 :       temp = *src_cpy++;
     254           0 :       *dst_xor++ = *srcdst_cpy ^ *src_xor++;
     255           0 :       *srcdst_cpy++ = temp;
     256             :     }
     257           0 : }
     258             : 
     259             : 
     260             : /* Optimized function for combined buffer xoring and copying.  Used by mainly
     261             :    CFB mode decryption.  */
     262             : static inline void
     263             : buf_xor_n_copy(void *_dst_xor, void *_srcdst_cpy, const void *_src, size_t len)
     264             : {
     265             :   buf_xor_n_copy_2(_dst_xor, _src, _srcdst_cpy, _src, len);
     266             : }
     267             : 
     268             : 
     269             : /* Constant-time compare of two buffers.  Returns 1 if buffers are equal,
     270             :    and 0 if buffers differ.  */
     271             : static inline int
     272             : buf_eq_const(const void *_a, const void *_b, size_t len)
     273             : {
     274             :   const byte *a = _a;
     275             :   const byte *b = _b;
     276             :   size_t diff, i;
     277             : 
     278             :   /* Constant-time compare. */
     279             :   for (i = 0, diff = 0; i < len; i++)
     280             :     diff -= !!(a[i] - b[i]);
     281             : 
     282             :   return !diff;
     283             : }
     284             : 
     285             : 
     286             : #ifndef BUFHELP_FAST_UNALIGNED_ACCESS
     287             : 
     288             : /* Functions for loading and storing unaligned u32 values of different
     289             :    endianness.  */
     290             : static inline u32 buf_get_be32(const void *_buf)
     291             : {
     292             :   const byte *in = _buf;
     293             :   return ((u32)in[0] << 24) | ((u32)in[1] << 16) | \
     294             :          ((u32)in[2] << 8) | (u32)in[3];
     295             : }
     296             : 
     297             : static inline u32 buf_get_le32(const void *_buf)
     298             : {
     299             :   const byte *in = _buf;
     300             :   return ((u32)in[3] << 24) | ((u32)in[2] << 16) | \
     301             :          ((u32)in[1] << 8) | (u32)in[0];
     302             : }
     303             : 
     304             : static inline void buf_put_be32(void *_buf, u32 val)
     305             : {
     306             :   byte *out = _buf;
     307             :   out[0] = val >> 24;
     308             :   out[1] = val >> 16;
     309             :   out[2] = val >> 8;
     310             :   out[3] = val;
     311             : }
     312             : 
     313             : static inline void buf_put_le32(void *_buf, u32 val)
     314             : {
     315             :   byte *out = _buf;
     316             :   out[3] = val >> 24;
     317             :   out[2] = val >> 16;
     318             :   out[1] = val >> 8;
     319             :   out[0] = val;
     320             : }
     321             : 
     322             : #ifdef HAVE_U64_TYPEDEF
     323             : /* Functions for loading and storing unaligned u64 values of different
     324             :    endianness.  */
     325             : static inline u64 buf_get_be64(const void *_buf)
     326             : {
     327             :   const byte *in = _buf;
     328             :   return ((u64)in[0] << 56) | ((u64)in[1] << 48) | \
     329             :          ((u64)in[2] << 40) | ((u64)in[3] << 32) | \
     330             :          ((u64)in[4] << 24) | ((u64)in[5] << 16) | \
     331             :          ((u64)in[6] << 8) | (u64)in[7];
     332             : }
     333             : 
     334             : static inline u64 buf_get_le64(const void *_buf)
     335             : {
     336             :   const byte *in = _buf;
     337             :   return ((u64)in[7] << 56) | ((u64)in[6] << 48) | \
     338             :          ((u64)in[5] << 40) | ((u64)in[4] << 32) | \
     339             :          ((u64)in[3] << 24) | ((u64)in[2] << 16) | \
     340             :          ((u64)in[1] << 8) | (u64)in[0];
     341             : }
     342             : 
     343             : static inline void buf_put_be64(void *_buf, u64 val)
     344             : {
     345             :   byte *out = _buf;
     346             :   out[0] = val >> 56;
     347             :   out[1] = val >> 48;
     348             :   out[2] = val >> 40;
     349             :   out[3] = val >> 32;
     350             :   out[4] = val >> 24;
     351             :   out[5] = val >> 16;
     352             :   out[6] = val >> 8;
     353             :   out[7] = val;
     354             : }
     355             : 
     356             : static inline void buf_put_le64(void *_buf, u64 val)
     357             : {
     358             :   byte *out = _buf;
     359             :   out[7] = val >> 56;
     360             :   out[6] = val >> 48;
     361             :   out[5] = val >> 40;
     362             :   out[4] = val >> 32;
     363             :   out[3] = val >> 24;
     364             :   out[2] = val >> 16;
     365             :   out[1] = val >> 8;
     366             :   out[0] = val;
     367             : }
     368             : #endif /*HAVE_U64_TYPEDEF*/
     369             : 
     370             : #else /*BUFHELP_FAST_UNALIGNED_ACCESS*/
     371             : 
     372             : typedef struct bufhelp_u32_s
     373             : {
     374             :   u32 a;
     375             : } __attribute__((packed, aligned(1))) bufhelp_u32_t;
     376             : 
     377             : /* Functions for loading and storing unaligned u32 values of different
     378             :    endianness.  */
     379             : static inline u32 buf_get_be32(const void *_buf)
     380             : {
     381             :   return be_bswap32(((const bufhelp_u32_t *)_buf)->a);
     382             : }
     383             : 
     384             : static inline u32 buf_get_le32(const void *_buf)
     385             : {
     386             :   return le_bswap32(((const bufhelp_u32_t *)_buf)->a);
     387             : }
     388             : 
     389             : static inline void buf_put_be32(void *_buf, u32 val)
     390             : {
     391             :   bufhelp_u32_t *out = _buf;
     392             :   out->a = be_bswap32(val);
     393             : }
     394             : 
     395             : static inline void buf_put_le32(void *_buf, u32 val)
     396             : {
     397             :   bufhelp_u32_t *out = _buf;
     398             :   out->a = le_bswap32(val);
     399             : }
     400             : 
     401             : #ifdef HAVE_U64_TYPEDEF
     402             : 
     403             : typedef struct bufhelp_u64_s
     404             : {
     405             :   u64 a;
     406             : } __attribute__((packed, aligned(1))) bufhelp_u64_t;
     407             : 
     408             : /* Functions for loading and storing unaligned u64 values of different
     409             :    endianness.  */
     410             : static inline u64 buf_get_be64(const void *_buf)
     411             : {
     412             :   return be_bswap64(((const bufhelp_u64_t *)_buf)->a);
     413             : }
     414             : 
     415             : static inline u64 buf_get_le64(const void *_buf)
     416             : {
     417             :   return le_bswap64(((const bufhelp_u64_t *)_buf)->a);
     418             : }
     419             : 
     420             : static inline void buf_put_be64(void *_buf, u64 val)
     421             : {
     422             :   bufhelp_u64_t *out = _buf;
     423             :   out->a = be_bswap64(val);
     424             : }
     425             : 
     426             : static inline void buf_put_le64(void *_buf, u64 val)
     427             : {
     428             :   bufhelp_u64_t *out = _buf;
     429             :   out->a = le_bswap64(val);
     430             : }
     431             : #endif /*HAVE_U64_TYPEDEF*/
     432             : 
     433             : #endif /*BUFHELP_FAST_UNALIGNED_ACCESS*/
     434             : 
     435             : #endif /*GCRYPT_BUFHELP_H*/

Generated by: LCOV version 1.11