Denys Vlasenko | 05fd13e | 2022-01-03 01:57:29 +0100 | [diff] [blame^] | 1 | ### Generated by hash_md5_sha_x86-64.S.sh ### |
| 2 | #if defined(__GNUC__) && defined(__x86_64__) |
| 3 | .section .text.sha1_process_block64,"ax",@progbits |
| 4 | .globl sha1_process_block64 |
| 5 | .hidden sha1_process_block64 |
| 6 | .type sha1_process_block64, @function |
| 7 | sha1_process_block64: |
| 8 | pushq %r15 # |
| 9 | pushq %r14 # |
| 10 | pushq %r13 # |
| 11 | pushq %r12 # |
| 12 | pushq %rbp # |
| 13 | pushq %rbx # |
| 14 | pushq %rdi # we need ctx at the end |
| 15 | |
| 16 | #Register and stack use: |
| 17 | # eax..edx: a..d |
| 18 | # ebp: e |
| 19 | # esi,edi: temps |
| 20 | # -32+4*n(%rsp),r8...r15: W[0..7,8..15] |
| 21 | |
| 22 | movq 4*8(%rdi), %r8 |
| 23 | bswapq %r8 |
| 24 | movl %r8d, %r9d |
| 25 | shrq $32, %r8 |
| 26 | movq 4*10(%rdi), %r10 |
| 27 | bswapq %r10 |
| 28 | movl %r10d, %r11d |
| 29 | shrq $32, %r10 |
| 30 | movq 4*12(%rdi), %r12 |
| 31 | bswapq %r12 |
| 32 | movl %r12d, %r13d |
| 33 | shrq $32, %r12 |
| 34 | movq 4*14(%rdi), %r14 |
| 35 | bswapq %r14 |
| 36 | movl %r14d, %r15d |
| 37 | shrq $32, %r14 |
| 38 | |
| 39 | movl $3, %eax |
| 40 | 1: |
| 41 | movq (%rdi,%rax,8), %rsi |
| 42 | bswapq %rsi |
| 43 | rolq $32, %rsi |
| 44 | movq %rsi, -32(%rsp,%rax,8) |
| 45 | decl %eax |
| 46 | jns 1b |
| 47 | movl 80(%rdi), %eax # a = ctx->hash[0] |
| 48 | movl 84(%rdi), %ebx # b = ctx->hash[1] |
| 49 | movl 88(%rdi), %ecx # c = ctx->hash[2] |
| 50 | movl 92(%rdi), %edx # d = ctx->hash[3] |
| 51 | movl 96(%rdi), %ebp # e = ctx->hash[4] |
| 52 | |
| 53 | # 0 |
| 54 | # W[0], already in %esi |
| 55 | movl %ecx, %edi # c |
| 56 | xorl %edx, %edi # ^d |
| 57 | andl %ebx, %edi # &b |
| 58 | xorl %edx, %edi # (((c ^ d) & b) ^ d) |
| 59 | leal 0x5A827999(%rbp,%rsi),%ebp # e += RCONST + W[n] |
| 60 | addl %edi, %ebp # e += (((c ^ d) & b) ^ d) |
| 61 | movl %eax, %esi # |
| 62 | roll $5, %esi # rotl32(a,5) |
| 63 | addl %esi, %ebp # e += rotl32(a,5) |
| 64 | rorl $2, %ebx # b = rotl32(b,30) |
| 65 | # 1 |
| 66 | movl -32+4*1(%rsp), %esi # W[n] |
| 67 | movl %ebx, %edi # c |
| 68 | xorl %ecx, %edi # ^d |
| 69 | andl %eax, %edi # &b |
| 70 | xorl %ecx, %edi # (((c ^ d) & b) ^ d) |
| 71 | leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n] |
| 72 | addl %edi, %edx # e += (((c ^ d) & b) ^ d) |
| 73 | movl %ebp, %esi # |
| 74 | roll $5, %esi # rotl32(a,5) |
| 75 | addl %esi, %edx # e += rotl32(a,5) |
| 76 | rorl $2, %eax # b = rotl32(b,30) |
| 77 | # 2 |
| 78 | movl -32+4*2(%rsp), %esi # W[n] |
| 79 | movl %eax, %edi # c |
| 80 | xorl %ebx, %edi # ^d |
| 81 | andl %ebp, %edi # &b |
| 82 | xorl %ebx, %edi # (((c ^ d) & b) ^ d) |
| 83 | leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n] |
| 84 | addl %edi, %ecx # e += (((c ^ d) & b) ^ d) |
| 85 | movl %edx, %esi # |
| 86 | roll $5, %esi # rotl32(a,5) |
| 87 | addl %esi, %ecx # e += rotl32(a,5) |
| 88 | rorl $2, %ebp # b = rotl32(b,30) |
| 89 | # 3 |
| 90 | movl -32+4*3(%rsp), %esi # W[n] |
| 91 | movl %ebp, %edi # c |
| 92 | xorl %eax, %edi # ^d |
| 93 | andl %edx, %edi # &b |
| 94 | xorl %eax, %edi # (((c ^ d) & b) ^ d) |
| 95 | leal 0x5A827999(%rbx,%rsi),%ebx # e += RCONST + W[n] |
| 96 | addl %edi, %ebx # e += (((c ^ d) & b) ^ d) |
| 97 | movl %ecx, %esi # |
| 98 | roll $5, %esi # rotl32(a,5) |
| 99 | addl %esi, %ebx # e += rotl32(a,5) |
| 100 | rorl $2, %edx # b = rotl32(b,30) |
| 101 | # 4 |
| 102 | movl -32+4*4(%rsp), %esi # W[n] |
| 103 | movl %edx, %edi # c |
| 104 | xorl %ebp, %edi # ^d |
| 105 | andl %ecx, %edi # &b |
| 106 | xorl %ebp, %edi # (((c ^ d) & b) ^ d) |
| 107 | leal 0x5A827999(%rax,%rsi),%eax # e += RCONST + W[n] |
| 108 | addl %edi, %eax # e += (((c ^ d) & b) ^ d) |
| 109 | movl %ebx, %esi # |
| 110 | roll $5, %esi # rotl32(a,5) |
| 111 | addl %esi, %eax # e += rotl32(a,5) |
| 112 | rorl $2, %ecx # b = rotl32(b,30) |
| 113 | # 5 |
| 114 | movl -32+4*5(%rsp), %esi # W[n] |
| 115 | movl %ecx, %edi # c |
| 116 | xorl %edx, %edi # ^d |
| 117 | andl %ebx, %edi # &b |
| 118 | xorl %edx, %edi # (((c ^ d) & b) ^ d) |
| 119 | leal 0x5A827999(%rbp,%rsi),%ebp # e += RCONST + W[n] |
| 120 | addl %edi, %ebp # e += (((c ^ d) & b) ^ d) |
| 121 | movl %eax, %esi # |
| 122 | roll $5, %esi # rotl32(a,5) |
| 123 | addl %esi, %ebp # e += rotl32(a,5) |
| 124 | rorl $2, %ebx # b = rotl32(b,30) |
| 125 | # 6 |
| 126 | movl -32+4*6(%rsp), %esi # W[n] |
| 127 | movl %ebx, %edi # c |
| 128 | xorl %ecx, %edi # ^d |
| 129 | andl %eax, %edi # &b |
| 130 | xorl %ecx, %edi # (((c ^ d) & b) ^ d) |
| 131 | leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n] |
| 132 | addl %edi, %edx # e += (((c ^ d) & b) ^ d) |
| 133 | movl %ebp, %esi # |
| 134 | roll $5, %esi # rotl32(a,5) |
| 135 | addl %esi, %edx # e += rotl32(a,5) |
| 136 | rorl $2, %eax # b = rotl32(b,30) |
| 137 | # 7 |
| 138 | movl -32+4*7(%rsp), %esi # W[n] |
| 139 | movl %eax, %edi # c |
| 140 | xorl %ebx, %edi # ^d |
| 141 | andl %ebp, %edi # &b |
| 142 | xorl %ebx, %edi # (((c ^ d) & b) ^ d) |
| 143 | leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n] |
| 144 | addl %edi, %ecx # e += (((c ^ d) & b) ^ d) |
| 145 | movl %edx, %esi # |
| 146 | roll $5, %esi # rotl32(a,5) |
| 147 | addl %esi, %ecx # e += rotl32(a,5) |
| 148 | rorl $2, %ebp # b = rotl32(b,30) |
| 149 | # 8 |
| 150 | # W[n], in %r8 |
| 151 | movl %ebp, %edi # c |
| 152 | xorl %eax, %edi # ^d |
| 153 | andl %edx, %edi # &b |
| 154 | xorl %eax, %edi # (((c ^ d) & b) ^ d) |
| 155 | leal 0x5A827999(%rbx,%r8),%ebx # e += RCONST + W[n] |
| 156 | addl %edi, %ebx # e += (((c ^ d) & b) ^ d) |
| 157 | movl %ecx, %esi # |
| 158 | roll $5, %esi # rotl32(a,5) |
| 159 | addl %esi, %ebx # e += rotl32(a,5) |
| 160 | rorl $2, %edx # b = rotl32(b,30) |
| 161 | # 9 |
| 162 | # W[n], in %r9 |
| 163 | movl %edx, %edi # c |
| 164 | xorl %ebp, %edi # ^d |
| 165 | andl %ecx, %edi # &b |
| 166 | xorl %ebp, %edi # (((c ^ d) & b) ^ d) |
| 167 | leal 0x5A827999(%rax,%r9),%eax # e += RCONST + W[n] |
| 168 | addl %edi, %eax # e += (((c ^ d) & b) ^ d) |
| 169 | movl %ebx, %esi # |
| 170 | roll $5, %esi # rotl32(a,5) |
| 171 | addl %esi, %eax # e += rotl32(a,5) |
| 172 | rorl $2, %ecx # b = rotl32(b,30) |
| 173 | # 10 |
| 174 | # W[n], in %r10 |
| 175 | movl %ecx, %edi # c |
| 176 | xorl %edx, %edi # ^d |
| 177 | andl %ebx, %edi # &b |
| 178 | xorl %edx, %edi # (((c ^ d) & b) ^ d) |
| 179 | leal 0x5A827999(%rbp,%r10),%ebp # e += RCONST + W[n] |
| 180 | addl %edi, %ebp # e += (((c ^ d) & b) ^ d) |
| 181 | movl %eax, %esi # |
| 182 | roll $5, %esi # rotl32(a,5) |
| 183 | addl %esi, %ebp # e += rotl32(a,5) |
| 184 | rorl $2, %ebx # b = rotl32(b,30) |
| 185 | # 11 |
| 186 | # W[n], in %r11 |
| 187 | movl %ebx, %edi # c |
| 188 | xorl %ecx, %edi # ^d |
| 189 | andl %eax, %edi # &b |
| 190 | xorl %ecx, %edi # (((c ^ d) & b) ^ d) |
| 191 | leal 0x5A827999(%rdx,%r11),%edx # e += RCONST + W[n] |
| 192 | addl %edi, %edx # e += (((c ^ d) & b) ^ d) |
| 193 | movl %ebp, %esi # |
| 194 | roll $5, %esi # rotl32(a,5) |
| 195 | addl %esi, %edx # e += rotl32(a,5) |
| 196 | rorl $2, %eax # b = rotl32(b,30) |
| 197 | # 12 |
| 198 | # W[n], in %r12 |
| 199 | movl %eax, %edi # c |
| 200 | xorl %ebx, %edi # ^d |
| 201 | andl %ebp, %edi # &b |
| 202 | xorl %ebx, %edi # (((c ^ d) & b) ^ d) |
| 203 | leal 0x5A827999(%rcx,%r12),%ecx # e += RCONST + W[n] |
| 204 | addl %edi, %ecx # e += (((c ^ d) & b) ^ d) |
| 205 | movl %edx, %esi # |
| 206 | roll $5, %esi # rotl32(a,5) |
| 207 | addl %esi, %ecx # e += rotl32(a,5) |
| 208 | rorl $2, %ebp # b = rotl32(b,30) |
| 209 | # 13 |
| 210 | # W[n], in %r13 |
| 211 | movl %ebp, %edi # c |
| 212 | xorl %eax, %edi # ^d |
| 213 | andl %edx, %edi # &b |
| 214 | xorl %eax, %edi # (((c ^ d) & b) ^ d) |
| 215 | leal 0x5A827999(%rbx,%r13),%ebx # e += RCONST + W[n] |
| 216 | addl %edi, %ebx # e += (((c ^ d) & b) ^ d) |
| 217 | movl %ecx, %esi # |
| 218 | roll $5, %esi # rotl32(a,5) |
| 219 | addl %esi, %ebx # e += rotl32(a,5) |
| 220 | rorl $2, %edx # b = rotl32(b,30) |
| 221 | # 14 |
| 222 | # W[n], in %r14 |
| 223 | movl %edx, %edi # c |
| 224 | xorl %ebp, %edi # ^d |
| 225 | andl %ecx, %edi # &b |
| 226 | xorl %ebp, %edi # (((c ^ d) & b) ^ d) |
| 227 | leal 0x5A827999(%rax,%r14),%eax # e += RCONST + W[n] |
| 228 | addl %edi, %eax # e += (((c ^ d) & b) ^ d) |
| 229 | movl %ebx, %esi # |
| 230 | roll $5, %esi # rotl32(a,5) |
| 231 | addl %esi, %eax # e += rotl32(a,5) |
| 232 | rorl $2, %ecx # b = rotl32(b,30) |
| 233 | # 15 |
| 234 | # W[n], in %r15 |
| 235 | movl %ecx, %edi # c |
| 236 | xorl %edx, %edi # ^d |
| 237 | andl %ebx, %edi # &b |
| 238 | xorl %edx, %edi # (((c ^ d) & b) ^ d) |
| 239 | leal 0x5A827999(%rbp,%r15),%ebp # e += RCONST + W[n] |
| 240 | addl %edi, %ebp # e += (((c ^ d) & b) ^ d) |
| 241 | movl %eax, %esi # |
| 242 | roll $5, %esi # rotl32(a,5) |
| 243 | addl %esi, %ebp # e += rotl32(a,5) |
| 244 | rorl $2, %ebx # b = rotl32(b,30) |
| 245 | # 16 |
| 246 | movl %r13d, %esi # W[(n+13) & 15] |
| 247 | xorl %r8d, %esi # ^W[(n+8) & 15] |
| 248 | xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15] |
| 249 | xorl -32+4*0(%rsp), %esi # ^W[n & 15] |
| 250 | roll %esi # |
| 251 | movl %esi, -32+4*0(%rsp) # store to W[n & 15] |
| 252 | movl %ebx, %edi # c |
| 253 | xorl %ecx, %edi # ^d |
| 254 | andl %eax, %edi # &b |
| 255 | xorl %ecx, %edi # (((c ^ d) & b) ^ d) |
| 256 | leal 0x5A827999(%rdx,%rsi),%edx # e += RCONST + W[n] |
| 257 | addl %edi, %edx # e += (((c ^ d) & b) ^ d) |
| 258 | movl %ebp, %esi # |
| 259 | roll $5, %esi # rotl32(a,5) |
| 260 | addl %esi, %edx # e += rotl32(a,5) |
| 261 | rorl $2, %eax # b = rotl32(b,30) |
| 262 | # 17 |
| 263 | movl %r14d, %esi # W[(n+13) & 15] |
| 264 | xorl %r9d, %esi # ^W[(n+8) & 15] |
| 265 | xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15] |
| 266 | xorl -32+4*1(%rsp), %esi # ^W[n & 15] |
| 267 | roll %esi # |
| 268 | movl %esi, -32+4*1(%rsp) # store to W[n & 15] |
| 269 | movl %eax, %edi # c |
| 270 | xorl %ebx, %edi # ^d |
| 271 | andl %ebp, %edi # &b |
| 272 | xorl %ebx, %edi # (((c ^ d) & b) ^ d) |
| 273 | leal 0x5A827999(%rcx,%rsi),%ecx # e += RCONST + W[n] |
| 274 | addl %edi, %ecx # e += (((c ^ d) & b) ^ d) |
| 275 | movl %edx, %esi # |
| 276 | roll $5, %esi # rotl32(a,5) |
| 277 | addl %esi, %ecx # e += rotl32(a,5) |
| 278 | rorl $2, %ebp # b = rotl32(b,30) |
| 279 | # 18 |
| 280 | movl %r15d, %esi # W[(n+13) & 15] |
| 281 | xorl %r10d, %esi # ^W[(n+8) & 15] |
| 282 | xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15] |
| 283 | xorl -32+4*2(%rsp), %esi # ^W[n & 15] |
| 284 | roll %esi # |
| 285 | movl %esi, -32+4*2(%rsp) # store to W[n & 15] |
| 286 | movl %ebp, %edi # c |
| 287 | xorl %eax, %edi # ^d |
| 288 | andl %edx, %edi # &b |
| 289 | xorl %eax, %edi # (((c ^ d) & b) ^ d) |
| 290 | leal 0x5A827999(%rbx,%rsi),%ebx # e += RCONST + W[n] |
| 291 | addl %edi, %ebx # e += (((c ^ d) & b) ^ d) |
| 292 | movl %ecx, %esi # |
| 293 | roll $5, %esi # rotl32(a,5) |
| 294 | addl %esi, %ebx # e += rotl32(a,5) |
| 295 | rorl $2, %edx # b = rotl32(b,30) |
| 296 | # 19 |
| 297 | movl -32+4*0(%rsp), %esi # W[(n+13) & 15] |
| 298 | xorl %r11d, %esi # ^W[(n+8) & 15] |
| 299 | xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15] |
| 300 | xorl -32+4*3(%rsp), %esi # ^W[n & 15] |
| 301 | roll %esi # |
| 302 | movl %esi, -32+4*3(%rsp) # store to W[n & 15] |
| 303 | movl %edx, %edi # c |
| 304 | xorl %ebp, %edi # ^d |
| 305 | andl %ecx, %edi # &b |
| 306 | xorl %ebp, %edi # (((c ^ d) & b) ^ d) |
| 307 | leal 0x5A827999(%rax,%rsi),%eax # e += RCONST + W[n] |
| 308 | addl %edi, %eax # e += (((c ^ d) & b) ^ d) |
| 309 | movl %ebx, %esi # |
| 310 | roll $5, %esi # rotl32(a,5) |
| 311 | addl %esi, %eax # e += rotl32(a,5) |
| 312 | rorl $2, %ecx # b = rotl32(b,30) |
| 313 | # 20 |
| 314 | movl -32+4*1(%rsp), %esi # W[(n+13) & 15] |
| 315 | xorl %r12d, %esi # ^W[(n+8) & 15] |
| 316 | xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15] |
| 317 | xorl -32+4*4(%rsp), %esi # ^W[n & 15] |
| 318 | roll %esi # |
| 319 | movl %esi, -32+4*4(%rsp) # store to W[n & 15] |
| 320 | movl %ecx, %edi # c |
| 321 | xorl %edx, %edi # ^d |
| 322 | xorl %ebx, %edi # ^b |
| 323 | leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 324 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 325 | movl %eax, %esi # |
| 326 | roll $5, %esi # rotl32(a,5) |
| 327 | addl %esi, %ebp # e += rotl32(a,5) |
| 328 | rorl $2, %ebx # b = rotl32(b,30) |
| 329 | # 21 |
| 330 | movl -32+4*2(%rsp), %esi # W[(n+13) & 15] |
| 331 | xorl %r13d, %esi # ^W[(n+8) & 15] |
| 332 | xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15] |
| 333 | xorl -32+4*5(%rsp), %esi # ^W[n & 15] |
| 334 | roll %esi # |
| 335 | movl %esi, -32+4*5(%rsp) # store to W[n & 15] |
| 336 | movl %ebx, %edi # c |
| 337 | xorl %ecx, %edi # ^d |
| 338 | xorl %eax, %edi # ^b |
| 339 | leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 340 | addl %edi, %edx # e += (c ^ d ^ b) |
| 341 | movl %ebp, %esi # |
| 342 | roll $5, %esi # rotl32(a,5) |
| 343 | addl %esi, %edx # e += rotl32(a,5) |
| 344 | rorl $2, %eax # b = rotl32(b,30) |
| 345 | # 22 |
| 346 | movl -32+4*3(%rsp), %esi # W[(n+13) & 15] |
| 347 | xorl %r14d, %esi # ^W[(n+8) & 15] |
| 348 | xorl %r8d, %esi # ^W[(n+2) & 15] |
| 349 | xorl -32+4*6(%rsp), %esi # ^W[n & 15] |
| 350 | roll %esi # |
| 351 | movl %esi, -32+4*6(%rsp) # store to W[n & 15] |
| 352 | movl %eax, %edi # c |
| 353 | xorl %ebx, %edi # ^d |
| 354 | xorl %ebp, %edi # ^b |
| 355 | leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 356 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 357 | movl %edx, %esi # |
| 358 | roll $5, %esi # rotl32(a,5) |
| 359 | addl %esi, %ecx # e += rotl32(a,5) |
| 360 | rorl $2, %ebp # b = rotl32(b,30) |
| 361 | # 23 |
| 362 | movl -32+4*4(%rsp), %esi # W[(n+13) & 15] |
| 363 | xorl %r15d, %esi # ^W[(n+8) & 15] |
| 364 | xorl %r9d, %esi # ^W[(n+2) & 15] |
| 365 | xorl -32+4*7(%rsp), %esi # ^W[n & 15] |
| 366 | roll %esi # |
| 367 | movl %esi, -32+4*7(%rsp) # store to W[n & 15] |
| 368 | movl %ebp, %edi # c |
| 369 | xorl %eax, %edi # ^d |
| 370 | xorl %edx, %edi # ^b |
| 371 | leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 372 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 373 | movl %ecx, %esi # |
| 374 | roll $5, %esi # rotl32(a,5) |
| 375 | addl %esi, %ebx # e += rotl32(a,5) |
| 376 | rorl $2, %edx # b = rotl32(b,30) |
| 377 | # 24 |
| 378 | movl -32+4*5(%rsp), %esi # W[(n+13) & 15] |
| 379 | xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15] |
| 380 | xorl %r10d, %esi # ^W[(n+2) & 15] |
| 381 | xorl %r8d, %esi # ^W[n & 15] |
| 382 | roll %esi # |
| 383 | movl %esi, %r8d # store to W[n & 15] |
| 384 | movl %edx, %edi # c |
| 385 | xorl %ebp, %edi # ^d |
| 386 | xorl %ecx, %edi # ^b |
| 387 | leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 388 | addl %edi, %eax # e += (c ^ d ^ b) |
| 389 | movl %ebx, %esi # |
| 390 | roll $5, %esi # rotl32(a,5) |
| 391 | addl %esi, %eax # e += rotl32(a,5) |
| 392 | rorl $2, %ecx # b = rotl32(b,30) |
| 393 | # 25 |
| 394 | movl -32+4*6(%rsp), %esi # W[(n+13) & 15] |
| 395 | xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15] |
| 396 | xorl %r11d, %esi # ^W[(n+2) & 15] |
| 397 | xorl %r9d, %esi # ^W[n & 15] |
| 398 | roll %esi # |
| 399 | movl %esi, %r9d # store to W[n & 15] |
| 400 | movl %ecx, %edi # c |
| 401 | xorl %edx, %edi # ^d |
| 402 | xorl %ebx, %edi # ^b |
| 403 | leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 404 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 405 | movl %eax, %esi # |
| 406 | roll $5, %esi # rotl32(a,5) |
| 407 | addl %esi, %ebp # e += rotl32(a,5) |
| 408 | rorl $2, %ebx # b = rotl32(b,30) |
| 409 | # 26 |
| 410 | movl -32+4*7(%rsp), %esi # W[(n+13) & 15] |
| 411 | xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15] |
| 412 | xorl %r12d, %esi # ^W[(n+2) & 15] |
| 413 | xorl %r10d, %esi # ^W[n & 15] |
| 414 | roll %esi # |
| 415 | movl %esi, %r10d # store to W[n & 15] |
| 416 | movl %ebx, %edi # c |
| 417 | xorl %ecx, %edi # ^d |
| 418 | xorl %eax, %edi # ^b |
| 419 | leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 420 | addl %edi, %edx # e += (c ^ d ^ b) |
| 421 | movl %ebp, %esi # |
| 422 | roll $5, %esi # rotl32(a,5) |
| 423 | addl %esi, %edx # e += rotl32(a,5) |
| 424 | rorl $2, %eax # b = rotl32(b,30) |
| 425 | # 27 |
| 426 | movl %r8d, %esi # W[(n+13) & 15] |
| 427 | xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15] |
| 428 | xorl %r13d, %esi # ^W[(n+2) & 15] |
| 429 | xorl %r11d, %esi # ^W[n & 15] |
| 430 | roll %esi # |
| 431 | movl %esi, %r11d # store to W[n & 15] |
| 432 | movl %eax, %edi # c |
| 433 | xorl %ebx, %edi # ^d |
| 434 | xorl %ebp, %edi # ^b |
| 435 | leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 436 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 437 | movl %edx, %esi # |
| 438 | roll $5, %esi # rotl32(a,5) |
| 439 | addl %esi, %ecx # e += rotl32(a,5) |
| 440 | rorl $2, %ebp # b = rotl32(b,30) |
| 441 | # 28 |
| 442 | movl %r9d, %esi # W[(n+13) & 15] |
| 443 | xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15] |
| 444 | xorl %r14d, %esi # ^W[(n+2) & 15] |
| 445 | xorl %r12d, %esi # ^W[n & 15] |
| 446 | roll %esi # |
| 447 | movl %esi, %r12d # store to W[n & 15] |
| 448 | movl %ebp, %edi # c |
| 449 | xorl %eax, %edi # ^d |
| 450 | xorl %edx, %edi # ^b |
| 451 | leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 452 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 453 | movl %ecx, %esi # |
| 454 | roll $5, %esi # rotl32(a,5) |
| 455 | addl %esi, %ebx # e += rotl32(a,5) |
| 456 | rorl $2, %edx # b = rotl32(b,30) |
| 457 | # 29 |
| 458 | movl %r10d, %esi # W[(n+13) & 15] |
| 459 | xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15] |
| 460 | xorl %r15d, %esi # ^W[(n+2) & 15] |
| 461 | xorl %r13d, %esi # ^W[n & 15] |
| 462 | roll %esi # |
| 463 | movl %esi, %r13d # store to W[n & 15] |
| 464 | movl %edx, %edi # c |
| 465 | xorl %ebp, %edi # ^d |
| 466 | xorl %ecx, %edi # ^b |
| 467 | leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 468 | addl %edi, %eax # e += (c ^ d ^ b) |
| 469 | movl %ebx, %esi # |
| 470 | roll $5, %esi # rotl32(a,5) |
| 471 | addl %esi, %eax # e += rotl32(a,5) |
| 472 | rorl $2, %ecx # b = rotl32(b,30) |
| 473 | # 30 |
| 474 | movl %r11d, %esi # W[(n+13) & 15] |
| 475 | xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15] |
| 476 | xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15] |
| 477 | xorl %r14d, %esi # ^W[n & 15] |
| 478 | roll %esi # |
| 479 | movl %esi, %r14d # store to W[n & 15] |
| 480 | movl %ecx, %edi # c |
| 481 | xorl %edx, %edi # ^d |
| 482 | xorl %ebx, %edi # ^b |
| 483 | leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 484 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 485 | movl %eax, %esi # |
| 486 | roll $5, %esi # rotl32(a,5) |
| 487 | addl %esi, %ebp # e += rotl32(a,5) |
| 488 | rorl $2, %ebx # b = rotl32(b,30) |
| 489 | # 31 |
| 490 | movl %r12d, %esi # W[(n+13) & 15] |
| 491 | xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15] |
| 492 | xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15] |
| 493 | xorl %r15d, %esi # ^W[n & 15] |
| 494 | roll %esi # |
| 495 | movl %esi, %r15d # store to W[n & 15] |
| 496 | movl %ebx, %edi # c |
| 497 | xorl %ecx, %edi # ^d |
| 498 | xorl %eax, %edi # ^b |
| 499 | leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 500 | addl %edi, %edx # e += (c ^ d ^ b) |
| 501 | movl %ebp, %esi # |
| 502 | roll $5, %esi # rotl32(a,5) |
| 503 | addl %esi, %edx # e += rotl32(a,5) |
| 504 | rorl $2, %eax # b = rotl32(b,30) |
| 505 | # 32 |
| 506 | movl %r13d, %esi # W[(n+13) & 15] |
| 507 | xorl %r8d, %esi # ^W[(n+8) & 15] |
| 508 | xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15] |
| 509 | xorl -32+4*0(%rsp), %esi # ^W[n & 15] |
| 510 | roll %esi # |
| 511 | movl %esi, -32+4*0(%rsp) # store to W[n & 15] |
| 512 | movl %eax, %edi # c |
| 513 | xorl %ebx, %edi # ^d |
| 514 | xorl %ebp, %edi # ^b |
| 515 | leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 516 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 517 | movl %edx, %esi # |
| 518 | roll $5, %esi # rotl32(a,5) |
| 519 | addl %esi, %ecx # e += rotl32(a,5) |
| 520 | rorl $2, %ebp # b = rotl32(b,30) |
| 521 | # 33 |
| 522 | movl %r14d, %esi # W[(n+13) & 15] |
| 523 | xorl %r9d, %esi # ^W[(n+8) & 15] |
| 524 | xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15] |
| 525 | xorl -32+4*1(%rsp), %esi # ^W[n & 15] |
| 526 | roll %esi # |
| 527 | movl %esi, -32+4*1(%rsp) # store to W[n & 15] |
| 528 | movl %ebp, %edi # c |
| 529 | xorl %eax, %edi # ^d |
| 530 | xorl %edx, %edi # ^b |
| 531 | leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 532 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 533 | movl %ecx, %esi # |
| 534 | roll $5, %esi # rotl32(a,5) |
| 535 | addl %esi, %ebx # e += rotl32(a,5) |
| 536 | rorl $2, %edx # b = rotl32(b,30) |
| 537 | # 34 |
| 538 | movl %r15d, %esi # W[(n+13) & 15] |
| 539 | xorl %r10d, %esi # ^W[(n+8) & 15] |
| 540 | xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15] |
| 541 | xorl -32+4*2(%rsp), %esi # ^W[n & 15] |
| 542 | roll %esi # |
| 543 | movl %esi, -32+4*2(%rsp) # store to W[n & 15] |
| 544 | movl %edx, %edi # c |
| 545 | xorl %ebp, %edi # ^d |
| 546 | xorl %ecx, %edi # ^b |
| 547 | leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 548 | addl %edi, %eax # e += (c ^ d ^ b) |
| 549 | movl %ebx, %esi # |
| 550 | roll $5, %esi # rotl32(a,5) |
| 551 | addl %esi, %eax # e += rotl32(a,5) |
| 552 | rorl $2, %ecx # b = rotl32(b,30) |
| 553 | # 35 |
| 554 | movl -32+4*0(%rsp), %esi # W[(n+13) & 15] |
| 555 | xorl %r11d, %esi # ^W[(n+8) & 15] |
| 556 | xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15] |
| 557 | xorl -32+4*3(%rsp), %esi # ^W[n & 15] |
| 558 | roll %esi # |
| 559 | movl %esi, -32+4*3(%rsp) # store to W[n & 15] |
| 560 | movl %ecx, %edi # c |
| 561 | xorl %edx, %edi # ^d |
| 562 | xorl %ebx, %edi # ^b |
| 563 | leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 564 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 565 | movl %eax, %esi # |
| 566 | roll $5, %esi # rotl32(a,5) |
| 567 | addl %esi, %ebp # e += rotl32(a,5) |
| 568 | rorl $2, %ebx # b = rotl32(b,30) |
| 569 | # 36 |
| 570 | movl -32+4*1(%rsp), %esi # W[(n+13) & 15] |
| 571 | xorl %r12d, %esi # ^W[(n+8) & 15] |
| 572 | xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15] |
| 573 | xorl -32+4*4(%rsp), %esi # ^W[n & 15] |
| 574 | roll %esi # |
| 575 | movl %esi, -32+4*4(%rsp) # store to W[n & 15] |
| 576 | movl %ebx, %edi # c |
| 577 | xorl %ecx, %edi # ^d |
| 578 | xorl %eax, %edi # ^b |
| 579 | leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 580 | addl %edi, %edx # e += (c ^ d ^ b) |
| 581 | movl %ebp, %esi # |
| 582 | roll $5, %esi # rotl32(a,5) |
| 583 | addl %esi, %edx # e += rotl32(a,5) |
| 584 | rorl $2, %eax # b = rotl32(b,30) |
| 585 | # 37 |
| 586 | movl -32+4*2(%rsp), %esi # W[(n+13) & 15] |
| 587 | xorl %r13d, %esi # ^W[(n+8) & 15] |
| 588 | xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15] |
| 589 | xorl -32+4*5(%rsp), %esi # ^W[n & 15] |
| 590 | roll %esi # |
| 591 | movl %esi, -32+4*5(%rsp) # store to W[n & 15] |
| 592 | movl %eax, %edi # c |
| 593 | xorl %ebx, %edi # ^d |
| 594 | xorl %ebp, %edi # ^b |
| 595 | leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 596 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 597 | movl %edx, %esi # |
| 598 | roll $5, %esi # rotl32(a,5) |
| 599 | addl %esi, %ecx # e += rotl32(a,5) |
| 600 | rorl $2, %ebp # b = rotl32(b,30) |
| 601 | # 38 |
| 602 | movl -32+4*3(%rsp), %esi # W[(n+13) & 15] |
| 603 | xorl %r14d, %esi # ^W[(n+8) & 15] |
| 604 | xorl %r8d, %esi # ^W[(n+2) & 15] |
| 605 | xorl -32+4*6(%rsp), %esi # ^W[n & 15] |
| 606 | roll %esi # |
| 607 | movl %esi, -32+4*6(%rsp) # store to W[n & 15] |
| 608 | movl %ebp, %edi # c |
| 609 | xorl %eax, %edi # ^d |
| 610 | xorl %edx, %edi # ^b |
| 611 | leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 612 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 613 | movl %ecx, %esi # |
| 614 | roll $5, %esi # rotl32(a,5) |
| 615 | addl %esi, %ebx # e += rotl32(a,5) |
| 616 | rorl $2, %edx # b = rotl32(b,30) |
| 617 | # 39 |
| 618 | movl -32+4*4(%rsp), %esi # W[(n+13) & 15] |
| 619 | xorl %r15d, %esi # ^W[(n+8) & 15] |
| 620 | xorl %r9d, %esi # ^W[(n+2) & 15] |
| 621 | xorl -32+4*7(%rsp), %esi # ^W[n & 15] |
| 622 | roll %esi # |
| 623 | movl %esi, -32+4*7(%rsp) # store to W[n & 15] |
| 624 | movl %edx, %edi # c |
| 625 | xorl %ebp, %edi # ^d |
| 626 | xorl %ecx, %edi # ^b |
| 627 | leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 628 | addl %edi, %eax # e += (c ^ d ^ b) |
| 629 | movl %ebx, %esi # |
| 630 | roll $5, %esi # rotl32(a,5) |
| 631 | addl %esi, %eax # e += rotl32(a,5) |
| 632 | rorl $2, %ecx # b = rotl32(b,30) |
| 633 | # 40 |
| 634 | movl %ebx, %edi # di: b |
| 635 | movl %ebx, %esi # si: b |
| 636 | orl %ecx, %edi # di: b | c |
| 637 | andl %ecx, %esi # si: b & c |
| 638 | andl %edx, %edi # di: (b | c) & d |
| 639 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 640 | movl -32+4*5(%rsp), %esi # W[(n+13) & 15] |
| 641 | xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15] |
| 642 | xorl %r10d, %esi # ^W[(n+2) & 15] |
| 643 | xorl %r8d, %esi # ^W[n & 15] |
| 644 | roll %esi # |
| 645 | movl %esi, %r8d # store to W[n & 15] |
| 646 | addl %edi, %ebp # += ((b | c) & d) | (b & c) |
| 647 | leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 648 | movl %eax, %esi # |
| 649 | roll $5, %esi # rotl32(a,5) |
| 650 | addl %esi, %ebp # e += rotl32(a,5) |
| 651 | rorl $2, %ebx # b = rotl32(b,30) |
| 652 | # 41 |
| 653 | movl %eax, %edi # di: b |
| 654 | movl %eax, %esi # si: b |
| 655 | orl %ebx, %edi # di: b | c |
| 656 | andl %ebx, %esi # si: b & c |
| 657 | andl %ecx, %edi # di: (b | c) & d |
| 658 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 659 | movl -32+4*6(%rsp), %esi # W[(n+13) & 15] |
| 660 | xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15] |
| 661 | xorl %r11d, %esi # ^W[(n+2) & 15] |
| 662 | xorl %r9d, %esi # ^W[n & 15] |
| 663 | roll %esi # |
| 664 | movl %esi, %r9d # store to W[n & 15] |
| 665 | addl %edi, %edx # += ((b | c) & d) | (b & c) |
| 666 | leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 667 | movl %ebp, %esi # |
| 668 | roll $5, %esi # rotl32(a,5) |
| 669 | addl %esi, %edx # e += rotl32(a,5) |
| 670 | rorl $2, %eax # b = rotl32(b,30) |
| 671 | # 42 |
| 672 | movl %ebp, %edi # di: b |
| 673 | movl %ebp, %esi # si: b |
| 674 | orl %eax, %edi # di: b | c |
| 675 | andl %eax, %esi # si: b & c |
| 676 | andl %ebx, %edi # di: (b | c) & d |
| 677 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 678 | movl -32+4*7(%rsp), %esi # W[(n+13) & 15] |
| 679 | xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15] |
| 680 | xorl %r12d, %esi # ^W[(n+2) & 15] |
| 681 | xorl %r10d, %esi # ^W[n & 15] |
| 682 | roll %esi # |
| 683 | movl %esi, %r10d # store to W[n & 15] |
| 684 | addl %edi, %ecx # += ((b | c) & d) | (b & c) |
| 685 | leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 686 | movl %edx, %esi # |
| 687 | roll $5, %esi # rotl32(a,5) |
| 688 | addl %esi, %ecx # e += rotl32(a,5) |
| 689 | rorl $2, %ebp # b = rotl32(b,30) |
| 690 | # 43 |
| 691 | movl %edx, %edi # di: b |
| 692 | movl %edx, %esi # si: b |
| 693 | orl %ebp, %edi # di: b | c |
| 694 | andl %ebp, %esi # si: b & c |
| 695 | andl %eax, %edi # di: (b | c) & d |
| 696 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 697 | movl %r8d, %esi # W[(n+13) & 15] |
| 698 | xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15] |
| 699 | xorl %r13d, %esi # ^W[(n+2) & 15] |
| 700 | xorl %r11d, %esi # ^W[n & 15] |
| 701 | roll %esi # |
| 702 | movl %esi, %r11d # store to W[n & 15] |
| 703 | addl %edi, %ebx # += ((b | c) & d) | (b & c) |
| 704 | leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 705 | movl %ecx, %esi # |
| 706 | roll $5, %esi # rotl32(a,5) |
| 707 | addl %esi, %ebx # e += rotl32(a,5) |
| 708 | rorl $2, %edx # b = rotl32(b,30) |
| 709 | # 44 |
| 710 | movl %ecx, %edi # di: b |
| 711 | movl %ecx, %esi # si: b |
| 712 | orl %edx, %edi # di: b | c |
| 713 | andl %edx, %esi # si: b & c |
| 714 | andl %ebp, %edi # di: (b | c) & d |
| 715 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 716 | movl %r9d, %esi # W[(n+13) & 15] |
| 717 | xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15] |
| 718 | xorl %r14d, %esi # ^W[(n+2) & 15] |
| 719 | xorl %r12d, %esi # ^W[n & 15] |
| 720 | roll %esi # |
| 721 | movl %esi, %r12d # store to W[n & 15] |
| 722 | addl %edi, %eax # += ((b | c) & d) | (b & c) |
| 723 | leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 724 | movl %ebx, %esi # |
| 725 | roll $5, %esi # rotl32(a,5) |
| 726 | addl %esi, %eax # e += rotl32(a,5) |
| 727 | rorl $2, %ecx # b = rotl32(b,30) |
| 728 | # 45 |
| 729 | movl %ebx, %edi # di: b |
| 730 | movl %ebx, %esi # si: b |
| 731 | orl %ecx, %edi # di: b | c |
| 732 | andl %ecx, %esi # si: b & c |
| 733 | andl %edx, %edi # di: (b | c) & d |
| 734 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 735 | movl %r10d, %esi # W[(n+13) & 15] |
| 736 | xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15] |
| 737 | xorl %r15d, %esi # ^W[(n+2) & 15] |
| 738 | xorl %r13d, %esi # ^W[n & 15] |
| 739 | roll %esi # |
| 740 | movl %esi, %r13d # store to W[n & 15] |
| 741 | addl %edi, %ebp # += ((b | c) & d) | (b & c) |
| 742 | leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 743 | movl %eax, %esi # |
| 744 | roll $5, %esi # rotl32(a,5) |
| 745 | addl %esi, %ebp # e += rotl32(a,5) |
| 746 | rorl $2, %ebx # b = rotl32(b,30) |
| 747 | # 46 |
| 748 | movl %eax, %edi # di: b |
| 749 | movl %eax, %esi # si: b |
| 750 | orl %ebx, %edi # di: b | c |
| 751 | andl %ebx, %esi # si: b & c |
| 752 | andl %ecx, %edi # di: (b | c) & d |
| 753 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 754 | movl %r11d, %esi # W[(n+13) & 15] |
| 755 | xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15] |
| 756 | xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15] |
| 757 | xorl %r14d, %esi # ^W[n & 15] |
| 758 | roll %esi # |
| 759 | movl %esi, %r14d # store to W[n & 15] |
| 760 | addl %edi, %edx # += ((b | c) & d) | (b & c) |
| 761 | leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 762 | movl %ebp, %esi # |
| 763 | roll $5, %esi # rotl32(a,5) |
| 764 | addl %esi, %edx # e += rotl32(a,5) |
| 765 | rorl $2, %eax # b = rotl32(b,30) |
| 766 | # 47 |
| 767 | movl %ebp, %edi # di: b |
| 768 | movl %ebp, %esi # si: b |
| 769 | orl %eax, %edi # di: b | c |
| 770 | andl %eax, %esi # si: b & c |
| 771 | andl %ebx, %edi # di: (b | c) & d |
| 772 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 773 | movl %r12d, %esi # W[(n+13) & 15] |
| 774 | xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15] |
| 775 | xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15] |
| 776 | xorl %r15d, %esi # ^W[n & 15] |
| 777 | roll %esi # |
| 778 | movl %esi, %r15d # store to W[n & 15] |
| 779 | addl %edi, %ecx # += ((b | c) & d) | (b & c) |
| 780 | leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 781 | movl %edx, %esi # |
| 782 | roll $5, %esi # rotl32(a,5) |
| 783 | addl %esi, %ecx # e += rotl32(a,5) |
| 784 | rorl $2, %ebp # b = rotl32(b,30) |
| 785 | # 48 |
| 786 | movl %edx, %edi # di: b |
| 787 | movl %edx, %esi # si: b |
| 788 | orl %ebp, %edi # di: b | c |
| 789 | andl %ebp, %esi # si: b & c |
| 790 | andl %eax, %edi # di: (b | c) & d |
| 791 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 792 | movl %r13d, %esi # W[(n+13) & 15] |
| 793 | xorl %r8d, %esi # ^W[(n+8) & 15] |
| 794 | xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15] |
| 795 | xorl -32+4*0(%rsp), %esi # ^W[n & 15] |
| 796 | roll %esi # |
| 797 | movl %esi, -32+4*0(%rsp) # store to W[n & 15] |
| 798 | addl %edi, %ebx # += ((b | c) & d) | (b & c) |
| 799 | leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 800 | movl %ecx, %esi # |
| 801 | roll $5, %esi # rotl32(a,5) |
| 802 | addl %esi, %ebx # e += rotl32(a,5) |
| 803 | rorl $2, %edx # b = rotl32(b,30) |
| 804 | # 49 |
| 805 | movl %ecx, %edi # di: b |
| 806 | movl %ecx, %esi # si: b |
| 807 | orl %edx, %edi # di: b | c |
| 808 | andl %edx, %esi # si: b & c |
| 809 | andl %ebp, %edi # di: (b | c) & d |
| 810 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 811 | movl %r14d, %esi # W[(n+13) & 15] |
| 812 | xorl %r9d, %esi # ^W[(n+8) & 15] |
| 813 | xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15] |
| 814 | xorl -32+4*1(%rsp), %esi # ^W[n & 15] |
| 815 | roll %esi # |
| 816 | movl %esi, -32+4*1(%rsp) # store to W[n & 15] |
| 817 | addl %edi, %eax # += ((b | c) & d) | (b & c) |
| 818 | leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 819 | movl %ebx, %esi # |
| 820 | roll $5, %esi # rotl32(a,5) |
| 821 | addl %esi, %eax # e += rotl32(a,5) |
| 822 | rorl $2, %ecx # b = rotl32(b,30) |
| 823 | # 50 |
| 824 | movl %ebx, %edi # di: b |
| 825 | movl %ebx, %esi # si: b |
| 826 | orl %ecx, %edi # di: b | c |
| 827 | andl %ecx, %esi # si: b & c |
| 828 | andl %edx, %edi # di: (b | c) & d |
| 829 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 830 | movl %r15d, %esi # W[(n+13) & 15] |
| 831 | xorl %r10d, %esi # ^W[(n+8) & 15] |
| 832 | xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15] |
| 833 | xorl -32+4*2(%rsp), %esi # ^W[n & 15] |
| 834 | roll %esi # |
| 835 | movl %esi, -32+4*2(%rsp) # store to W[n & 15] |
| 836 | addl %edi, %ebp # += ((b | c) & d) | (b & c) |
| 837 | leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 838 | movl %eax, %esi # |
| 839 | roll $5, %esi # rotl32(a,5) |
| 840 | addl %esi, %ebp # e += rotl32(a,5) |
| 841 | rorl $2, %ebx # b = rotl32(b,30) |
| 842 | # 51 |
| 843 | movl %eax, %edi # di: b |
| 844 | movl %eax, %esi # si: b |
| 845 | orl %ebx, %edi # di: b | c |
| 846 | andl %ebx, %esi # si: b & c |
| 847 | andl %ecx, %edi # di: (b | c) & d |
| 848 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 849 | movl -32+4*0(%rsp), %esi # W[(n+13) & 15] |
| 850 | xorl %r11d, %esi # ^W[(n+8) & 15] |
| 851 | xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15] |
| 852 | xorl -32+4*3(%rsp), %esi # ^W[n & 15] |
| 853 | roll %esi # |
| 854 | movl %esi, -32+4*3(%rsp) # store to W[n & 15] |
| 855 | addl %edi, %edx # += ((b | c) & d) | (b & c) |
| 856 | leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 857 | movl %ebp, %esi # |
| 858 | roll $5, %esi # rotl32(a,5) |
| 859 | addl %esi, %edx # e += rotl32(a,5) |
| 860 | rorl $2, %eax # b = rotl32(b,30) |
| 861 | # 52 |
| 862 | movl %ebp, %edi # di: b |
| 863 | movl %ebp, %esi # si: b |
| 864 | orl %eax, %edi # di: b | c |
| 865 | andl %eax, %esi # si: b & c |
| 866 | andl %ebx, %edi # di: (b | c) & d |
| 867 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 868 | movl -32+4*1(%rsp), %esi # W[(n+13) & 15] |
| 869 | xorl %r12d, %esi # ^W[(n+8) & 15] |
| 870 | xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15] |
| 871 | xorl -32+4*4(%rsp), %esi # ^W[n & 15] |
| 872 | roll %esi # |
| 873 | movl %esi, -32+4*4(%rsp) # store to W[n & 15] |
| 874 | addl %edi, %ecx # += ((b | c) & d) | (b & c) |
| 875 | leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 876 | movl %edx, %esi # |
| 877 | roll $5, %esi # rotl32(a,5) |
| 878 | addl %esi, %ecx # e += rotl32(a,5) |
| 879 | rorl $2, %ebp # b = rotl32(b,30) |
| 880 | # 53 |
| 881 | movl %edx, %edi # di: b |
| 882 | movl %edx, %esi # si: b |
| 883 | orl %ebp, %edi # di: b | c |
| 884 | andl %ebp, %esi # si: b & c |
| 885 | andl %eax, %edi # di: (b | c) & d |
| 886 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 887 | movl -32+4*2(%rsp), %esi # W[(n+13) & 15] |
| 888 | xorl %r13d, %esi # ^W[(n+8) & 15] |
| 889 | xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15] |
| 890 | xorl -32+4*5(%rsp), %esi # ^W[n & 15] |
| 891 | roll %esi # |
| 892 | movl %esi, -32+4*5(%rsp) # store to W[n & 15] |
| 893 | addl %edi, %ebx # += ((b | c) & d) | (b & c) |
| 894 | leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 895 | movl %ecx, %esi # |
| 896 | roll $5, %esi # rotl32(a,5) |
| 897 | addl %esi, %ebx # e += rotl32(a,5) |
| 898 | rorl $2, %edx # b = rotl32(b,30) |
| 899 | # 54 |
| 900 | movl %ecx, %edi # di: b |
| 901 | movl %ecx, %esi # si: b |
| 902 | orl %edx, %edi # di: b | c |
| 903 | andl %edx, %esi # si: b & c |
| 904 | andl %ebp, %edi # di: (b | c) & d |
| 905 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 906 | movl -32+4*3(%rsp), %esi # W[(n+13) & 15] |
| 907 | xorl %r14d, %esi # ^W[(n+8) & 15] |
| 908 | xorl %r8d, %esi # ^W[(n+2) & 15] |
| 909 | xorl -32+4*6(%rsp), %esi # ^W[n & 15] |
| 910 | roll %esi # |
| 911 | movl %esi, -32+4*6(%rsp) # store to W[n & 15] |
| 912 | addl %edi, %eax # += ((b | c) & d) | (b & c) |
| 913 | leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 914 | movl %ebx, %esi # |
| 915 | roll $5, %esi # rotl32(a,5) |
| 916 | addl %esi, %eax # e += rotl32(a,5) |
| 917 | rorl $2, %ecx # b = rotl32(b,30) |
| 918 | # 55 |
| 919 | movl %ebx, %edi # di: b |
| 920 | movl %ebx, %esi # si: b |
| 921 | orl %ecx, %edi # di: b | c |
| 922 | andl %ecx, %esi # si: b & c |
| 923 | andl %edx, %edi # di: (b | c) & d |
| 924 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 925 | movl -32+4*4(%rsp), %esi # W[(n+13) & 15] |
| 926 | xorl %r15d, %esi # ^W[(n+8) & 15] |
| 927 | xorl %r9d, %esi # ^W[(n+2) & 15] |
| 928 | xorl -32+4*7(%rsp), %esi # ^W[n & 15] |
| 929 | roll %esi # |
| 930 | movl %esi, -32+4*7(%rsp) # store to W[n & 15] |
| 931 | addl %edi, %ebp # += ((b | c) & d) | (b & c) |
| 932 | leal -0x70e44324(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 933 | movl %eax, %esi # |
| 934 | roll $5, %esi # rotl32(a,5) |
| 935 | addl %esi, %ebp # e += rotl32(a,5) |
| 936 | rorl $2, %ebx # b = rotl32(b,30) |
| 937 | # 56 |
| 938 | movl %eax, %edi # di: b |
| 939 | movl %eax, %esi # si: b |
| 940 | orl %ebx, %edi # di: b | c |
| 941 | andl %ebx, %esi # si: b & c |
| 942 | andl %ecx, %edi # di: (b | c) & d |
| 943 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 944 | movl -32+4*5(%rsp), %esi # W[(n+13) & 15] |
| 945 | xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15] |
| 946 | xorl %r10d, %esi # ^W[(n+2) & 15] |
| 947 | xorl %r8d, %esi # ^W[n & 15] |
| 948 | roll %esi # |
| 949 | movl %esi, %r8d # store to W[n & 15] |
| 950 | addl %edi, %edx # += ((b | c) & d) | (b & c) |
| 951 | leal -0x70e44324(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 952 | movl %ebp, %esi # |
| 953 | roll $5, %esi # rotl32(a,5) |
| 954 | addl %esi, %edx # e += rotl32(a,5) |
| 955 | rorl $2, %eax # b = rotl32(b,30) |
| 956 | # 57 |
| 957 | movl %ebp, %edi # di: b |
| 958 | movl %ebp, %esi # si: b |
| 959 | orl %eax, %edi # di: b | c |
| 960 | andl %eax, %esi # si: b & c |
| 961 | andl %ebx, %edi # di: (b | c) & d |
| 962 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 963 | movl -32+4*6(%rsp), %esi # W[(n+13) & 15] |
| 964 | xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15] |
| 965 | xorl %r11d, %esi # ^W[(n+2) & 15] |
| 966 | xorl %r9d, %esi # ^W[n & 15] |
| 967 | roll %esi # |
| 968 | movl %esi, %r9d # store to W[n & 15] |
| 969 | addl %edi, %ecx # += ((b | c) & d) | (b & c) |
| 970 | leal -0x70e44324(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 971 | movl %edx, %esi # |
| 972 | roll $5, %esi # rotl32(a,5) |
| 973 | addl %esi, %ecx # e += rotl32(a,5) |
| 974 | rorl $2, %ebp # b = rotl32(b,30) |
| 975 | # 58 |
| 976 | movl %edx, %edi # di: b |
| 977 | movl %edx, %esi # si: b |
| 978 | orl %ebp, %edi # di: b | c |
| 979 | andl %ebp, %esi # si: b & c |
| 980 | andl %eax, %edi # di: (b | c) & d |
| 981 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 982 | movl -32+4*7(%rsp), %esi # W[(n+13) & 15] |
| 983 | xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15] |
| 984 | xorl %r12d, %esi # ^W[(n+2) & 15] |
| 985 | xorl %r10d, %esi # ^W[n & 15] |
| 986 | roll %esi # |
| 987 | movl %esi, %r10d # store to W[n & 15] |
| 988 | addl %edi, %ebx # += ((b | c) & d) | (b & c) |
| 989 | leal -0x70e44324(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 990 | movl %ecx, %esi # |
| 991 | roll $5, %esi # rotl32(a,5) |
| 992 | addl %esi, %ebx # e += rotl32(a,5) |
| 993 | rorl $2, %edx # b = rotl32(b,30) |
| 994 | # 59 |
| 995 | movl %ecx, %edi # di: b |
| 996 | movl %ecx, %esi # si: b |
| 997 | orl %edx, %edi # di: b | c |
| 998 | andl %edx, %esi # si: b & c |
| 999 | andl %ebp, %edi # di: (b | c) & d |
| 1000 | orl %esi, %edi # ((b | c) & d) | (b & c) |
| 1001 | movl %r8d, %esi # W[(n+13) & 15] |
| 1002 | xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15] |
| 1003 | xorl %r13d, %esi # ^W[(n+2) & 15] |
| 1004 | xorl %r11d, %esi # ^W[n & 15] |
| 1005 | roll %esi # |
| 1006 | movl %esi, %r11d # store to W[n & 15] |
| 1007 | addl %edi, %eax # += ((b | c) & d) | (b & c) |
| 1008 | leal -0x70e44324(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 1009 | movl %ebx, %esi # |
| 1010 | roll $5, %esi # rotl32(a,5) |
| 1011 | addl %esi, %eax # e += rotl32(a,5) |
| 1012 | rorl $2, %ecx # b = rotl32(b,30) |
| 1013 | # 60 |
| 1014 | movl %r9d, %esi # W[(n+13) & 15] |
| 1015 | xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15] |
| 1016 | xorl %r14d, %esi # ^W[(n+2) & 15] |
| 1017 | xorl %r12d, %esi # ^W[n & 15] |
| 1018 | roll %esi # |
| 1019 | movl %esi, %r12d # store to W[n & 15] |
| 1020 | movl %ecx, %edi # c |
| 1021 | xorl %edx, %edi # ^d |
| 1022 | xorl %ebx, %edi # ^b |
| 1023 | leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 1024 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 1025 | movl %eax, %esi # |
| 1026 | roll $5, %esi # rotl32(a,5) |
| 1027 | addl %esi, %ebp # e += rotl32(a,5) |
| 1028 | rorl $2, %ebx # b = rotl32(b,30) |
| 1029 | # 61 |
| 1030 | movl %r10d, %esi # W[(n+13) & 15] |
| 1031 | xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15] |
| 1032 | xorl %r15d, %esi # ^W[(n+2) & 15] |
| 1033 | xorl %r13d, %esi # ^W[n & 15] |
| 1034 | roll %esi # |
| 1035 | movl %esi, %r13d # store to W[n & 15] |
| 1036 | movl %ebx, %edi # c |
| 1037 | xorl %ecx, %edi # ^d |
| 1038 | xorl %eax, %edi # ^b |
| 1039 | leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 1040 | addl %edi, %edx # e += (c ^ d ^ b) |
| 1041 | movl %ebp, %esi # |
| 1042 | roll $5, %esi # rotl32(a,5) |
| 1043 | addl %esi, %edx # e += rotl32(a,5) |
| 1044 | rorl $2, %eax # b = rotl32(b,30) |
| 1045 | # 62 |
| 1046 | movl %r11d, %esi # W[(n+13) & 15] |
| 1047 | xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15] |
| 1048 | xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15] |
| 1049 | xorl %r14d, %esi # ^W[n & 15] |
| 1050 | roll %esi # |
| 1051 | movl %esi, %r14d # store to W[n & 15] |
| 1052 | movl %eax, %edi # c |
| 1053 | xorl %ebx, %edi # ^d |
| 1054 | xorl %ebp, %edi # ^b |
| 1055 | leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 1056 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 1057 | movl %edx, %esi # |
| 1058 | roll $5, %esi # rotl32(a,5) |
| 1059 | addl %esi, %ecx # e += rotl32(a,5) |
| 1060 | rorl $2, %ebp # b = rotl32(b,30) |
| 1061 | # 63 |
| 1062 | movl %r12d, %esi # W[(n+13) & 15] |
| 1063 | xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15] |
| 1064 | xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15] |
| 1065 | xorl %r15d, %esi # ^W[n & 15] |
| 1066 | roll %esi # |
| 1067 | movl %esi, %r15d # store to W[n & 15] |
| 1068 | movl %ebp, %edi # c |
| 1069 | xorl %eax, %edi # ^d |
| 1070 | xorl %edx, %edi # ^b |
| 1071 | leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 1072 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 1073 | movl %ecx, %esi # |
| 1074 | roll $5, %esi # rotl32(a,5) |
| 1075 | addl %esi, %ebx # e += rotl32(a,5) |
| 1076 | rorl $2, %edx # b = rotl32(b,30) |
| 1077 | # 64 |
| 1078 | movl %r13d, %esi # W[(n+13) & 15] |
| 1079 | xorl %r8d, %esi # ^W[(n+8) & 15] |
| 1080 | xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15] |
| 1081 | xorl -32+4*0(%rsp), %esi # ^W[n & 15] |
| 1082 | roll %esi # |
| 1083 | movl %esi, -32+4*0(%rsp) # store to W[n & 15] |
| 1084 | movl %edx, %edi # c |
| 1085 | xorl %ebp, %edi # ^d |
| 1086 | xorl %ecx, %edi # ^b |
| 1087 | leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 1088 | addl %edi, %eax # e += (c ^ d ^ b) |
| 1089 | movl %ebx, %esi # |
| 1090 | roll $5, %esi # rotl32(a,5) |
| 1091 | addl %esi, %eax # e += rotl32(a,5) |
| 1092 | rorl $2, %ecx # b = rotl32(b,30) |
| 1093 | # 65 |
| 1094 | movl %r14d, %esi # W[(n+13) & 15] |
| 1095 | xorl %r9d, %esi # ^W[(n+8) & 15] |
| 1096 | xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15] |
| 1097 | xorl -32+4*1(%rsp), %esi # ^W[n & 15] |
| 1098 | roll %esi # |
| 1099 | movl %esi, -32+4*1(%rsp) # store to W[n & 15] |
| 1100 | movl %ecx, %edi # c |
| 1101 | xorl %edx, %edi # ^d |
| 1102 | xorl %ebx, %edi # ^b |
| 1103 | leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 1104 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 1105 | movl %eax, %esi # |
| 1106 | roll $5, %esi # rotl32(a,5) |
| 1107 | addl %esi, %ebp # e += rotl32(a,5) |
| 1108 | rorl $2, %ebx # b = rotl32(b,30) |
| 1109 | # 66 |
| 1110 | movl %r15d, %esi # W[(n+13) & 15] |
| 1111 | xorl %r10d, %esi # ^W[(n+8) & 15] |
| 1112 | xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15] |
| 1113 | xorl -32+4*2(%rsp), %esi # ^W[n & 15] |
| 1114 | roll %esi # |
| 1115 | movl %esi, -32+4*2(%rsp) # store to W[n & 15] |
| 1116 | movl %ebx, %edi # c |
| 1117 | xorl %ecx, %edi # ^d |
| 1118 | xorl %eax, %edi # ^b |
| 1119 | leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 1120 | addl %edi, %edx # e += (c ^ d ^ b) |
| 1121 | movl %ebp, %esi # |
| 1122 | roll $5, %esi # rotl32(a,5) |
| 1123 | addl %esi, %edx # e += rotl32(a,5) |
| 1124 | rorl $2, %eax # b = rotl32(b,30) |
| 1125 | # 67 |
| 1126 | movl -32+4*0(%rsp), %esi # W[(n+13) & 15] |
| 1127 | xorl %r11d, %esi # ^W[(n+8) & 15] |
| 1128 | xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15] |
| 1129 | xorl -32+4*3(%rsp), %esi # ^W[n & 15] |
| 1130 | roll %esi # |
| 1131 | movl %esi, -32+4*3(%rsp) # store to W[n & 15] |
| 1132 | movl %eax, %edi # c |
| 1133 | xorl %ebx, %edi # ^d |
| 1134 | xorl %ebp, %edi # ^b |
| 1135 | leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 1136 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 1137 | movl %edx, %esi # |
| 1138 | roll $5, %esi # rotl32(a,5) |
| 1139 | addl %esi, %ecx # e += rotl32(a,5) |
| 1140 | rorl $2, %ebp # b = rotl32(b,30) |
| 1141 | # 68 |
| 1142 | movl -32+4*1(%rsp), %esi # W[(n+13) & 15] |
| 1143 | xorl %r12d, %esi # ^W[(n+8) & 15] |
| 1144 | xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15] |
| 1145 | xorl -32+4*4(%rsp), %esi # ^W[n & 15] |
| 1146 | roll %esi # |
| 1147 | movl %esi, -32+4*4(%rsp) # store to W[n & 15] |
| 1148 | movl %ebp, %edi # c |
| 1149 | xorl %eax, %edi # ^d |
| 1150 | xorl %edx, %edi # ^b |
| 1151 | leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 1152 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 1153 | movl %ecx, %esi # |
| 1154 | roll $5, %esi # rotl32(a,5) |
| 1155 | addl %esi, %ebx # e += rotl32(a,5) |
| 1156 | rorl $2, %edx # b = rotl32(b,30) |
| 1157 | # 69 |
| 1158 | movl -32+4*2(%rsp), %esi # W[(n+13) & 15] |
| 1159 | xorl %r13d, %esi # ^W[(n+8) & 15] |
| 1160 | xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15] |
| 1161 | xorl -32+4*5(%rsp), %esi # ^W[n & 15] |
| 1162 | roll %esi # |
| 1163 | movl %esi, -32+4*5(%rsp) # store to W[n & 15] |
| 1164 | movl %edx, %edi # c |
| 1165 | xorl %ebp, %edi # ^d |
| 1166 | xorl %ecx, %edi # ^b |
| 1167 | leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 1168 | addl %edi, %eax # e += (c ^ d ^ b) |
| 1169 | movl %ebx, %esi # |
| 1170 | roll $5, %esi # rotl32(a,5) |
| 1171 | addl %esi, %eax # e += rotl32(a,5) |
| 1172 | rorl $2, %ecx # b = rotl32(b,30) |
| 1173 | # 70 |
| 1174 | movl -32+4*3(%rsp), %esi # W[(n+13) & 15] |
| 1175 | xorl %r14d, %esi # ^W[(n+8) & 15] |
| 1176 | xorl %r8d, %esi # ^W[(n+2) & 15] |
| 1177 | xorl -32+4*6(%rsp), %esi # ^W[n & 15] |
| 1178 | roll %esi # |
| 1179 | movl %esi, -32+4*6(%rsp) # store to W[n & 15] |
| 1180 | movl %ecx, %edi # c |
| 1181 | xorl %edx, %edi # ^d |
| 1182 | xorl %ebx, %edi # ^b |
| 1183 | leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 1184 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 1185 | movl %eax, %esi # |
| 1186 | roll $5, %esi # rotl32(a,5) |
| 1187 | addl %esi, %ebp # e += rotl32(a,5) |
| 1188 | rorl $2, %ebx # b = rotl32(b,30) |
| 1189 | # 71 |
| 1190 | movl -32+4*4(%rsp), %esi # W[(n+13) & 15] |
| 1191 | xorl %r15d, %esi # ^W[(n+8) & 15] |
| 1192 | xorl %r9d, %esi # ^W[(n+2) & 15] |
| 1193 | xorl -32+4*7(%rsp), %esi # ^W[n & 15] |
| 1194 | roll %esi # |
| 1195 | movl %esi, -32+4*7(%rsp) # store to W[n & 15] |
| 1196 | movl %ebx, %edi # c |
| 1197 | xorl %ecx, %edi # ^d |
| 1198 | xorl %eax, %edi # ^b |
| 1199 | leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 1200 | addl %edi, %edx # e += (c ^ d ^ b) |
| 1201 | movl %ebp, %esi # |
| 1202 | roll $5, %esi # rotl32(a,5) |
| 1203 | addl %esi, %edx # e += rotl32(a,5) |
| 1204 | rorl $2, %eax # b = rotl32(b,30) |
| 1205 | # 72 |
| 1206 | movl -32+4*5(%rsp), %esi # W[(n+13) & 15] |
| 1207 | xorl -32+4*0(%rsp), %esi # ^W[(n+8) & 15] |
| 1208 | xorl %r10d, %esi # ^W[(n+2) & 15] |
| 1209 | xorl %r8d, %esi # ^W[n & 15] |
| 1210 | roll %esi # |
| 1211 | movl %esi, %r8d # store to W[n & 15] |
| 1212 | movl %eax, %edi # c |
| 1213 | xorl %ebx, %edi # ^d |
| 1214 | xorl %ebp, %edi # ^b |
| 1215 | leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 1216 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 1217 | movl %edx, %esi # |
| 1218 | roll $5, %esi # rotl32(a,5) |
| 1219 | addl %esi, %ecx # e += rotl32(a,5) |
| 1220 | rorl $2, %ebp # b = rotl32(b,30) |
| 1221 | # 73 |
| 1222 | movl -32+4*6(%rsp), %esi # W[(n+13) & 15] |
| 1223 | xorl -32+4*1(%rsp), %esi # ^W[(n+8) & 15] |
| 1224 | xorl %r11d, %esi # ^W[(n+2) & 15] |
| 1225 | xorl %r9d, %esi # ^W[n & 15] |
| 1226 | roll %esi # |
| 1227 | movl %esi, %r9d # store to W[n & 15] |
| 1228 | movl %ebp, %edi # c |
| 1229 | xorl %eax, %edi # ^d |
| 1230 | xorl %edx, %edi # ^b |
| 1231 | leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 1232 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 1233 | movl %ecx, %esi # |
| 1234 | roll $5, %esi # rotl32(a,5) |
| 1235 | addl %esi, %ebx # e += rotl32(a,5) |
| 1236 | rorl $2, %edx # b = rotl32(b,30) |
| 1237 | # 74 |
| 1238 | movl -32+4*7(%rsp), %esi # W[(n+13) & 15] |
| 1239 | xorl -32+4*2(%rsp), %esi # ^W[(n+8) & 15] |
| 1240 | xorl %r12d, %esi # ^W[(n+2) & 15] |
| 1241 | xorl %r10d, %esi # ^W[n & 15] |
| 1242 | roll %esi # |
| 1243 | movl %esi, %r10d # store to W[n & 15] |
| 1244 | movl %edx, %edi # c |
| 1245 | xorl %ebp, %edi # ^d |
| 1246 | xorl %ecx, %edi # ^b |
| 1247 | leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 1248 | addl %edi, %eax # e += (c ^ d ^ b) |
| 1249 | movl %ebx, %esi # |
| 1250 | roll $5, %esi # rotl32(a,5) |
| 1251 | addl %esi, %eax # e += rotl32(a,5) |
| 1252 | rorl $2, %ecx # b = rotl32(b,30) |
| 1253 | # 75 |
| 1254 | movl %r8d, %esi # W[(n+13) & 15] |
| 1255 | xorl -32+4*3(%rsp), %esi # ^W[(n+8) & 15] |
| 1256 | xorl %r13d, %esi # ^W[(n+2) & 15] |
| 1257 | xorl %r11d, %esi # ^W[n & 15] |
| 1258 | roll %esi # |
| 1259 | movl %esi, %r11d # store to W[n & 15] |
| 1260 | movl %ecx, %edi # c |
| 1261 | xorl %edx, %edi # ^d |
| 1262 | xorl %ebx, %edi # ^b |
| 1263 | leal -0x359d3e2a(%rbp,%rsi), %ebp # e += RCONST + mixed_W |
| 1264 | addl %edi, %ebp # e += (c ^ d ^ b) |
| 1265 | movl %eax, %esi # |
| 1266 | roll $5, %esi # rotl32(a,5) |
| 1267 | addl %esi, %ebp # e += rotl32(a,5) |
| 1268 | rorl $2, %ebx # b = rotl32(b,30) |
| 1269 | # 76 |
| 1270 | movl %r9d, %esi # W[(n+13) & 15] |
| 1271 | xorl -32+4*4(%rsp), %esi # ^W[(n+8) & 15] |
| 1272 | xorl %r14d, %esi # ^W[(n+2) & 15] |
| 1273 | xorl %r12d, %esi # ^W[n & 15] |
| 1274 | roll %esi # |
| 1275 | movl %esi, %r12d # store to W[n & 15] |
| 1276 | movl %ebx, %edi # c |
| 1277 | xorl %ecx, %edi # ^d |
| 1278 | xorl %eax, %edi # ^b |
| 1279 | leal -0x359d3e2a(%rdx,%rsi), %edx # e += RCONST + mixed_W |
| 1280 | addl %edi, %edx # e += (c ^ d ^ b) |
| 1281 | movl %ebp, %esi # |
| 1282 | roll $5, %esi # rotl32(a,5) |
| 1283 | addl %esi, %edx # e += rotl32(a,5) |
| 1284 | rorl $2, %eax # b = rotl32(b,30) |
| 1285 | # 77 |
| 1286 | movl %r10d, %esi # W[(n+13) & 15] |
| 1287 | xorl -32+4*5(%rsp), %esi # ^W[(n+8) & 15] |
| 1288 | xorl %r15d, %esi # ^W[(n+2) & 15] |
| 1289 | xorl %r13d, %esi # ^W[n & 15] |
| 1290 | roll %esi # |
| 1291 | # store to W[n & 15] - unused, not done |
| 1292 | movl %eax, %edi # c |
| 1293 | xorl %ebx, %edi # ^d |
| 1294 | xorl %ebp, %edi # ^b |
| 1295 | leal -0x359d3e2a(%rcx,%rsi), %ecx # e += RCONST + mixed_W |
| 1296 | addl %edi, %ecx # e += (c ^ d ^ b) |
| 1297 | movl %edx, %esi # |
| 1298 | roll $5, %esi # rotl32(a,5) |
| 1299 | addl %esi, %ecx # e += rotl32(a,5) |
| 1300 | rorl $2, %ebp # b = rotl32(b,30) |
| 1301 | # 78 |
| 1302 | movl %r11d, %esi # W[(n+13) & 15] |
| 1303 | xorl -32+4*6(%rsp), %esi # ^W[(n+8) & 15] |
| 1304 | xorl -32+4*0(%rsp), %esi # ^W[(n+2) & 15] |
| 1305 | xorl %r14d, %esi # ^W[n & 15] |
| 1306 | roll %esi # |
| 1307 | # store to W[n & 15] - unused, not done |
| 1308 | movl %ebp, %edi # c |
| 1309 | xorl %eax, %edi # ^d |
| 1310 | xorl %edx, %edi # ^b |
| 1311 | leal -0x359d3e2a(%rbx,%rsi), %ebx # e += RCONST + mixed_W |
| 1312 | addl %edi, %ebx # e += (c ^ d ^ b) |
| 1313 | movl %ecx, %esi # |
| 1314 | roll $5, %esi # rotl32(a,5) |
| 1315 | addl %esi, %ebx # e += rotl32(a,5) |
| 1316 | rorl $2, %edx # b = rotl32(b,30) |
| 1317 | # 79 |
| 1318 | movl %r12d, %esi # W[(n+13) & 15] |
| 1319 | xorl -32+4*7(%rsp), %esi # ^W[(n+8) & 15] |
| 1320 | xorl -32+4*1(%rsp), %esi # ^W[(n+2) & 15] |
| 1321 | xorl %r15d, %esi # ^W[n & 15] |
| 1322 | roll %esi # |
| 1323 | # store to W[n & 15] - unused, not done |
| 1324 | movl %edx, %edi # c |
| 1325 | xorl %ebp, %edi # ^d |
| 1326 | xorl %ecx, %edi # ^b |
| 1327 | leal -0x359d3e2a(%rax,%rsi), %eax # e += RCONST + mixed_W |
| 1328 | addl %edi, %eax # e += (c ^ d ^ b) |
| 1329 | movl %ebx, %esi # |
| 1330 | roll $5, %esi # rotl32(a,5) |
| 1331 | addl %esi, %eax # e += rotl32(a,5) |
| 1332 | rorl $2, %ecx # b = rotl32(b,30) |
| 1333 | |
| 1334 | popq %rdi # |
| 1335 | addl %eax, 80(%rdi) # ctx->hash[0] += a |
| 1336 | addl %ebx, 84(%rdi) # ctx->hash[1] += b |
| 1337 | addl %ecx, 88(%rdi) # ctx->hash[2] += c |
| 1338 | addl %edx, 92(%rdi) # ctx->hash[3] += d |
| 1339 | addl %ebp, 96(%rdi) # ctx->hash[4] += e |
| 1340 | popq %rbx # |
| 1341 | popq %rbp # |
| 1342 | popq %r12 # |
| 1343 | popq %r13 # |
| 1344 | popq %r14 # |
| 1345 | popq %r15 # |
| 1346 | |
| 1347 | ret |
| 1348 | .size sha1_process_block64, .-sha1_process_block64 |
| 1349 | #endif |