1 : /* crypto/md32_common.h */
2 : /* ====================================================================
3 : * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
4 : *
5 : * Redistribution and use in source and binary forms, with or without
6 : * modification, are permitted provided that the following conditions
7 : * are met:
8 : *
9 : * 1. Redistributions of source code must retain the above copyright
10 : * notice, this list of conditions and the following disclaimer.
11 : *
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in
14 : * the documentation and/or other materials provided with the
15 : * distribution.
16 : *
17 : * 3. All advertising materials mentioning features or use of this
18 : * software must display the following acknowledgment:
19 : * "This product includes software developed by the OpenSSL Project
20 : * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
21 : *
22 : * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
23 : * endorse or promote products derived from this software without
24 : * prior written permission. For written permission, please contact
25 : * licensing@OpenSSL.org.
26 : *
27 : * 5. Products derived from this software may not be called "OpenSSL"
28 : * nor may "OpenSSL" appear in their names without prior written
29 : * permission of the OpenSSL Project.
30 : *
31 : * 6. Redistributions of any form whatsoever must retain the following
32 : * acknowledgment:
33 : * "This product includes software developed by the OpenSSL Project
34 : * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
35 : *
36 : * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
37 : * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
39 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
40 : * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 : * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
42 : * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
43 : * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
45 : * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
47 : * OF THE POSSIBILITY OF SUCH DAMAGE.
48 : * ====================================================================
49 : *
50 : * This product includes cryptographic software written by Eric Young
51 : * (eay@cryptsoft.com). This product includes software written by Tim
52 : * Hudson (tjh@cryptsoft.com).
53 : *
54 : */
55 :
56 : /*
57 : * This is a generic 32 bit "collector" for message digest algorithms.
58 : * Whenever needed it collects input character stream into chunks of
59 : * 32 bit values and invokes a block function that performs actual hash
60 : * calculations.
61 : *
62 : * Porting guide.
63 : *
64 : * Obligatory macros:
65 : *
66 : * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
67 : * this macro defines byte order of input stream.
68 : * HASH_CBLOCK
69 : * size of a unit chunk HASH_BLOCK operates on.
70 : * HASH_LONG
71 : * has to be at lest 32 bit wide, if it's wider, then
72 : * HASH_LONG_LOG2 *has to* be defined along
73 : * HASH_CTX
74 : * context structure that at least contains following
75 : * members:
76 : * typedef struct {
77 : * ...
78 : * HASH_LONG Nl,Nh;
79 : * HASH_LONG data[HASH_LBLOCK];
80 : * unsigned int num;
81 : * ...
82 : * } HASH_CTX;
83 : * HASH_UPDATE
84 : * name of "Update" function, implemented here.
85 : * HASH_TRANSFORM
86 : * name of "Transform" function, implemented here.
87 : * HASH_FINAL
88 : * name of "Final" function, implemented here.
89 : * HASH_BLOCK_HOST_ORDER
90 : * name of "block" function treating *aligned* input message
91 : * in host byte order, implemented externally.
92 : * HASH_BLOCK_DATA_ORDER
93 : * name of "block" function treating *unaligned* input message
94 : * in original (data) byte order, implemented externally (it
95 : * actually is optional if data and host are of the same
96 : * "endianess").
97 : * HASH_MAKE_STRING
98 : * macro convering context variables to an ASCII hash string.
99 : *
100 : * Optional macros:
101 : *
102 : * B_ENDIAN or L_ENDIAN
103 : * defines host byte-order.
104 : * HASH_LONG_LOG2
105 : * defaults to 2 if not states otherwise.
106 : * HASH_LBLOCK
107 : * assumed to be HASH_CBLOCK/4 if not stated otherwise.
108 : * HASH_BLOCK_DATA_ORDER_ALIGNED
109 : * alternative "block" function capable of treating
110 : * aligned input message in original (data) order,
111 : * implemented externally.
112 : *
113 : * MD5 example:
114 : *
115 : * #define DATA_ORDER_IS_LITTLE_ENDIAN
116 : *
117 : * #define HASH_LONG MD5_LONG
118 : * #define HASH_LONG_LOG2 MD5_LONG_LOG2
119 : * #define HASH_CTX MD5_CTX
120 : * #define HASH_CBLOCK MD5_CBLOCK
121 : * #define HASH_LBLOCK MD5_LBLOCK
122 : * #define HASH_UPDATE MD5_Update
123 : * #define HASH_TRANSFORM MD5_Transform
124 : * #define HASH_FINAL MD5_Final
125 : * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
126 : * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
127 : *
128 : * <appro@fy.chalmers.se>
129 : */
130 :
131 : #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
132 : #error "DATA_ORDER must be defined!"
133 : #endif
134 :
135 : #ifndef HASH_CBLOCK
136 : #error "HASH_CBLOCK must be defined!"
137 : #endif
138 : #ifndef HASH_LONG
139 : #error "HASH_LONG must be defined!"
140 : #endif
141 : #ifndef HASH_CTX
142 : #error "HASH_CTX must be defined!"
143 : #endif
144 :
145 : #ifndef HASH_UPDATE
146 : #error "HASH_UPDATE must be defined!"
147 : #endif
148 : #ifndef HASH_TRANSFORM
149 : #error "HASH_TRANSFORM must be defined!"
150 : #endif
151 : #ifndef HASH_FINAL
152 : #error "HASH_FINAL must be defined!"
153 : #endif
154 :
155 : #ifndef HASH_BLOCK_HOST_ORDER
156 : #error "HASH_BLOCK_HOST_ORDER must be defined!"
157 : #endif
158 :
159 : #if 0
160 : /*
161 : * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
162 : * isn't defined.
163 : */
164 : #ifndef HASH_BLOCK_DATA_ORDER
165 : #error "HASH_BLOCK_DATA_ORDER must be defined!"
166 : #endif
167 : #endif
168 :
169 : #ifndef HASH_LBLOCK
170 : #define HASH_LBLOCK (HASH_CBLOCK/4)
171 : #endif
172 :
173 : #ifndef HASH_LONG_LOG2
174 : #define HASH_LONG_LOG2 2
175 : #endif
176 :
177 : /*
178 : * Engage compiler specific rotate intrinsic function if available.
179 : */
180 : #undef ROTATE
181 : #ifndef PEDANTIC
182 : # if defined(_MSC_VER) || defined(__ICC)
183 : # define ROTATE(a,n) _lrotl(a,n)
184 : # elif defined(__MWERKS__)
185 : # if defined(__POWERPC__)
186 : # define ROTATE(a,n) __rlwinm(a,n,0,31)
187 : # elif defined(__MC68K__)
188 : /* Motorola specific tweak. <appro@fy.chalmers.se> */
189 : # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
190 : # else
191 : # define ROTATE(a,n) __rol(a,n)
192 : # endif
193 : # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
194 : /*
195 : * Some GNU C inline assembler templates. Note that these are
196 : * rotates by *constant* number of bits! But that's exactly
197 : * what we need here...
198 : * <appro@fy.chalmers.se>
199 : */
200 : # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
201 : # define ROTATE(a,n) ({ register unsigned int ret; \
202 : asm ( \
203 : "roll %1,%0" \
204 : : "=r"(ret) \
205 : : "I"(n), "0"(a) \
206 : : "cc"); \
207 : ret; \
208 : })
209 : # elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
210 : # define ROTATE(a,n) ({ register unsigned int ret; \
211 : asm ( \
212 : "rlwinm %0,%1,%2,0,31" \
213 : : "=r"(ret) \
214 : : "r"(a), "I"(n)); \
215 : ret; \
216 : })
217 : # endif
218 : # endif
219 : #endif /* PEDANTIC */
220 :
221 : #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
222 : /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
223 : #ifdef ROTATE
224 : /* 5 instructions with rotate instruction, else 9 */
225 : #define REVERSE_FETCH32(a,l) ( \
226 : l=*(const HASH_LONG *)(a), \
227 : ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
228 : )
229 : #else
230 : /* 6 instructions with rotate instruction, else 8 */
231 : #define REVERSE_FETCH32(a,l) ( \
232 : l=*(const HASH_LONG *)(a), \
233 : l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
234 : ROTATE(l,16) \
235 : )
236 : /*
237 : * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
238 : * It's rewritten as above for two reasons:
239 : * - RISCs aren't good at long constants and have to explicitely
240 : * compose 'em with several (well, usually 2) instructions in a
241 : * register before performing the actual operation and (as you
242 : * already realized:-) having same constant should inspire the
243 : * compiler to permanently allocate the only register for it;
244 : * - most modern CPUs have two ALUs, but usually only one has
245 : * circuitry for shifts:-( this minor tweak inspires compiler
246 : * to schedule shift instructions in a better way...
247 : *
248 : * <appro@fy.chalmers.se>
249 : */
250 : #endif
251 : #endif
252 :
253 : #ifndef ROTATE
254 : #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
255 : #endif
256 :
257 : /*
258 : * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
259 : * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
260 : * and host are of the same "endianess". It's possible to mask
261 : * this with blank #define HASH_BLOCK_DATA_ORDER though...
262 : *
263 : * <appro@fy.chalmers.se>
264 : */
265 : #if defined(B_ENDIAN)
266 : # if defined(DATA_ORDER_IS_BIG_ENDIAN)
267 : # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
268 : # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
269 : # endif
270 : # endif
271 : #elif defined(L_ENDIAN)
272 : # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
273 : # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
274 : # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
275 : # endif
276 : # endif
277 : #endif
278 :
279 : #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
280 : #ifndef HASH_BLOCK_DATA_ORDER
281 : #error "HASH_BLOCK_DATA_ORDER must be defined!"
282 : #endif
283 : #endif
284 :
285 : #if defined(DATA_ORDER_IS_BIG_ENDIAN)
286 :
287 : #ifndef PEDANTIC
288 : # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
289 : # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
290 : /*
291 : * This gives ~30-40% performance improvement in SHA-256 compiled
292 : * with gcc [on P4]. Well, first macro to be frank. We can pull
293 : * this trick on x86* platforms only, because these CPUs can fetch
294 : * unaligned data without raising an exception.
295 : */
296 : # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
297 : asm ("bswapl %0":"=r"(r):"0"(r)); \
298 : (c)+=4; (l)=r; })
299 : # define HOST_l2c(l,c) ({ unsigned int r=(l); \
300 : asm ("bswapl %0":"=r"(r):"0"(r)); \
301 : *((unsigned int *)(c))=r; (c)+=4; r; })
302 : # endif
303 : # endif
304 : #endif
305 :
306 : #ifndef HOST_c2l
307 : #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
308 : l|=(((unsigned long)(*((c)++)))<<16), \
309 : l|=(((unsigned long)(*((c)++)))<< 8), \
310 : l|=(((unsigned long)(*((c)++))) ), \
311 : l)
312 : #endif
313 : #define HOST_p_c2l(c,l,n) { \
314 : switch (n) { \
315 : case 0: l =((unsigned long)(*((c)++)))<<24; \
316 : case 1: l|=((unsigned long)(*((c)++)))<<16; \
317 : case 2: l|=((unsigned long)(*((c)++)))<< 8; \
318 : case 3: l|=((unsigned long)(*((c)++))); \
319 : } }
320 : #define HOST_p_c2l_p(c,l,sc,len) { \
321 : switch (sc) { \
322 : case 0: l =((unsigned long)(*((c)++)))<<24; \
323 : if (--len == 0) break; \
324 : case 1: l|=((unsigned long)(*((c)++)))<<16; \
325 : if (--len == 0) break; \
326 : case 2: l|=((unsigned long)(*((c)++)))<< 8; \
327 : } }
328 : /* NOTE the pointer is not incremented at the end of this */
329 : #define HOST_c2l_p(c,l,n) { \
330 : l=0; (c)+=n; \
331 : switch (n) { \
332 : case 3: l =((unsigned long)(*(--(c))))<< 8; \
333 : case 2: l|=((unsigned long)(*(--(c))))<<16; \
334 : case 1: l|=((unsigned long)(*(--(c))))<<24; \
335 : } }
336 : #ifndef HOST_l2c
337 : #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
338 : *((c)++)=(unsigned char)(((l)>>16)&0xff), \
339 : *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
340 : *((c)++)=(unsigned char)(((l) )&0xff), \
341 : l)
342 : #endif
343 :
344 : #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
345 :
346 : #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
347 : /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
348 : # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
349 : # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
350 : #endif
351 :
352 : #ifndef HOST_c2l
353 : #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
354 : l|=(((unsigned long)(*((c)++)))<< 8), \
355 : l|=(((unsigned long)(*((c)++)))<<16), \
356 : l|=(((unsigned long)(*((c)++)))<<24), \
357 : l)
358 : #endif
359 : #define HOST_p_c2l(c,l,n) { \
360 : switch (n) { \
361 : case 0: l =((unsigned long)(*((c)++))); \
362 : case 1: l|=((unsigned long)(*((c)++)))<< 8; \
363 : case 2: l|=((unsigned long)(*((c)++)))<<16; \
364 : case 3: l|=((unsigned long)(*((c)++)))<<24; \
365 : } }
366 : #define HOST_p_c2l_p(c,l,sc,len) { \
367 : switch (sc) { \
368 : case 0: l =((unsigned long)(*((c)++))); \
369 : if (--len == 0) break; \
370 : case 1: l|=((unsigned long)(*((c)++)))<< 8; \
371 : if (--len == 0) break; \
372 : case 2: l|=((unsigned long)(*((c)++)))<<16; \
373 : } }
374 : /* NOTE the pointer is not incremented at the end of this */
375 : #define HOST_c2l_p(c,l,n) { \
376 : l=0; (c)+=n; \
377 : switch (n) { \
378 : case 3: l =((unsigned long)(*(--(c))))<<16; \
379 : case 2: l|=((unsigned long)(*(--(c))))<< 8; \
380 : case 1: l|=((unsigned long)(*(--(c)))); \
381 : } }
382 : #ifndef HOST_l2c
383 : #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
384 : *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
385 : *((c)++)=(unsigned char)(((l)>>16)&0xff), \
386 : *((c)++)=(unsigned char)(((l)>>24)&0xff), \
387 : l)
388 : #endif
389 :
390 : #endif
391 :
392 : /*
393 : * Time for some action:-)
394 : */
395 :
396 : int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
397 110563 : {
398 110563 : const unsigned char *data=data_;
399 : register HASH_LONG * p;
400 : register HASH_LONG l;
401 : size_t sw,sc,ew,ec;
402 :
403 110563 : if (len==0) return 1;
404 :
405 110484 : l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
406 : /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
407 : * Wei Dai <weidai@eskimo.com> for pointing it out. */
408 110484 : if (l < c->Nl) /* overflow */
409 0 : c->Nh++;
410 110484 : c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */
411 110484 : c->Nl=l;
412 :
413 110484 : if (c->num != 0)
414 : {
415 97242 : p=c->data;
416 97242 : sw=c->num>>2;
417 97242 : sc=c->num&0x03;
418 :
419 97242 : if ((c->num+len) >= HASH_CBLOCK)
420 : {
421 6929 : l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
422 16207 : for (; sw<HASH_LBLOCK; sw++)
423 : {
424 9278 : HOST_c2l(data,l); p[sw]=l;
425 : }
426 6929 : HASH_BLOCK_HOST_ORDER (c,p,1);
427 6929 : len-=(HASH_CBLOCK-c->num);
428 6929 : c->num=0;
429 : /* drop through and do the rest */
430 : }
431 : else
432 : {
433 90313 : c->num+=(unsigned int)len;
434 90313 : if ((sc+len) < 4) /* ugly, add char's to a word */
435 : {
436 12119 : l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
437 : }
438 : else
439 : {
440 78194 : ew=(c->num>>2);
441 78194 : ec=(c->num&0x03);
442 78194 : if (sc)
443 24249 : l=p[sw];
444 78194 : HOST_p_c2l(data,l,sc);
445 78194 : p[sw++]=l;
446 137513 : for (; sw < ew; sw++)
447 : {
448 59319 : HOST_c2l(data,l); p[sw]=l;
449 : }
450 78194 : if (ec)
451 : {
452 6349 : HOST_c2l_p(data,l,ec); p[sw]=l;
453 : }
454 : }
455 90313 : return 1;
456 : }
457 : }
458 :
459 20171 : sw=len/HASH_CBLOCK;
460 20171 : if (sw > 0)
461 : {
462 : #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
463 : /*
464 : * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
465 : * only if sizeof(HASH_LONG)==4.
466 : */
467 : if ((((size_t)data)%4) == 0)
468 : {
469 : /* data is properly aligned so that we can cast it: */
470 : HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
471 : sw*=HASH_CBLOCK;
472 : data+=sw;
473 : len-=sw;
474 : }
475 : else
476 : #if !defined(HASH_BLOCK_DATA_ORDER)
477 : while (sw--)
478 : {
479 : memcpy (p=c->data,data,HASH_CBLOCK);
480 : HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
481 : data+=HASH_CBLOCK;
482 : len-=HASH_CBLOCK;
483 : }
484 : #endif
485 : #endif
486 : #if defined(HASH_BLOCK_DATA_ORDER)
487 : {
488 1724 : HASH_BLOCK_DATA_ORDER(c,data,sw);
489 1724 : sw*=HASH_CBLOCK;
490 1724 : data+=sw;
491 1724 : len-=sw;
492 : }
493 : #endif
494 : }
495 :
496 20171 : if (len!=0)
497 : {
498 13706 : p = c->data;
499 13706 : c->num = len;
500 13706 : ew=len>>2; /* words to copy */
501 13706 : ec=len&0x03;
502 44822 : for (; ew; ew--,p++)
503 : {
504 31116 : HOST_c2l(data,l); *p=l;
505 : }
506 13706 : HOST_c2l_p(data,l,ec);
507 13706 : *p=l;
508 : }
509 20171 : return 1;
510 : }
511 :
512 :
513 : void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
514 0 : {
515 : #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
516 : if ((((size_t)data)%4) == 0)
517 : /* data is properly aligned so that we can cast it: */
518 : HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
519 : else
520 : #if !defined(HASH_BLOCK_DATA_ORDER)
521 : {
522 : memcpy (c->data,data,HASH_CBLOCK);
523 : HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
524 : }
525 : #endif
526 : #endif
527 : #if defined(HASH_BLOCK_DATA_ORDER)
528 0 : HASH_BLOCK_DATA_ORDER (c,data,1);
529 : #endif
530 0 : }
531 :
532 :
533 : int HASH_FINAL (unsigned char *md, HASH_CTX *c)
534 6813 : {
535 : register HASH_LONG *p;
536 : register unsigned long l;
537 : register int i,j;
538 : static const unsigned char end[4]={0x80,0x00,0x00,0x00};
539 6813 : const unsigned char *cp=end;
540 :
541 : /* c->num should definitly have room for at least one more byte. */
542 6813 : p=c->data;
543 6813 : i=c->num>>2;
544 6813 : j=c->num&0x03;
545 :
546 : #if 0
547 : /* purify often complains about the following line as an
548 : * Uninitialized Memory Read. While this can be true, the
549 : * following p_c2l macro will reset l when that case is true.
550 : * This is because j&0x03 contains the number of 'valid' bytes
551 : * already in p[i]. If and only if j&0x03 == 0, the UMR will
552 : * occur but this is also the only time p_c2l will do
553 : * l= *(cp++) instead of l|= *(cp++)
554 : * Many thanks to Alex Tang <altitude@cic.net> for pickup this
555 : * 'potential bug' */
556 : #ifdef PURIFY
557 : if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
558 : #endif
559 : l=p[i];
560 : #else
561 6813 : l = (j==0) ? 0 : p[i];
562 : #endif
563 6813 : HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
564 :
565 6813 : if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
566 : {
567 335 : if (i<HASH_LBLOCK) p[i]=0;
568 335 : HASH_BLOCK_HOST_ORDER (c,p,1);
569 335 : i=0;
570 : }
571 26605 : for (; i<(HASH_LBLOCK-2); i++)
572 19792 : p[i]=0;
573 :
574 : #if defined(DATA_ORDER_IS_BIG_ENDIAN)
575 6813 : p[HASH_LBLOCK-2]=c->Nh;
576 6813 : p[HASH_LBLOCK-1]=c->Nl;
577 : #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
578 : p[HASH_LBLOCK-2]=c->Nl;
579 : p[HASH_LBLOCK-1]=c->Nh;
580 : #endif
581 6813 : HASH_BLOCK_HOST_ORDER (c,p,1);
582 :
583 : #ifndef HASH_MAKE_STRING
584 : #error "HASH_MAKE_STRING must be defined!"
585 : #else
586 6813 : HASH_MAKE_STRING(c,md);
587 : #endif
588 :
589 6813 : c->num=0;
590 : /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
591 : * but I'm not worried :-)
592 : OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
593 : */
594 6813 : return 1;
595 : }
596 :
597 : #ifndef MD32_REG_T
598 : #define MD32_REG_T long
599 : /*
600 : * This comment was originaly written for MD5, which is why it
601 : * discusses A-D. But it basically applies to all 32-bit digests,
602 : * which is why it was moved to common header file.
603 : *
604 : * In case you wonder why A-D are declared as long and not
605 : * as MD5_LONG. Doing so results in slight performance
606 : * boost on LP64 architectures. The catch is we don't
607 : * really care if 32 MSBs of a 64-bit register get polluted
608 : * with eventual overflows as we *save* only 32 LSBs in
609 : * *either* case. Now declaring 'em long excuses the compiler
610 : * from keeping 32 MSBs zeroed resulting in 13% performance
611 : * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
612 : * Well, to be honest it should say that this *prevents*
613 : * performance degradation.
614 : * <appro@fy.chalmers.se>
615 : * Apparently there're LP64 compilers that generate better
616 : * code if A-D are declared int. Most notably GCC-x86_64
617 : * generates better code.
618 : * <appro@fy.chalmers.se>
619 : */
620 : #endif
|