提交 05dc84b8 编写于 作者: B Ben Laurie

Fix DWP when only given three parameters.

上级 7d3ce7ba
...@@ -90,7 +90,12 @@ sub main'DWP ...@@ -90,7 +90,12 @@ sub main'DWP
$reg2="$regs{$reg2}" if defined($regs{$reg2}); $reg2="$regs{$reg2}" if defined($regs{$reg2});
$ret.=$addr if ($addr ne "") && ($addr ne 0); $ret.=$addr if ($addr ne "") && ($addr ne 0);
if ($reg2 ne "") if ($reg2 ne "")
{ $ret.="($reg1,$reg2,$idx)"; } {
if($idx ne "")
{ $ret.="($reg1,$reg2,$idx)"; }
else
{ $ret.="($reg1,$reg2)"; }
}
else else
{ $ret.="($reg1)" } { $ret.="($reg1)" }
return($ret); return($ret);
......
...@@ -65,7 +65,7 @@ asm/rx86-out.o: asm/rx86unix.cpp ...@@ -65,7 +65,7 @@ asm/rx86-out.o: asm/rx86unix.cpp
asm/rx86bsdi.o: asm/rx86unix.cpp asm/rx86bsdi.o: asm/rx86unix.cpp
$(CPP) -DBSDI asm/rx86unix.cpp | sed 's/ :/:/' | as -o asm/rx86bsdi.o $(CPP) -DBSDI asm/rx86unix.cpp | sed 's/ :/:/' | as -o asm/rx86bsdi.o
asm/rx86unix.cpp: asm/rx86unix.cpp: asm/rc4-586.pl
(cd asm; perl rc4-586.pl cpp >rx86unix.cpp) (cd asm; perl rc4-586.pl cpp >rx86unix.cpp)
files: files:
......
/* Run the C pre-processor over this file with one of the following defined
* ELF - elf object files,
* OUT - a.out object files,
* BSDI - BSDI style a.out object files
* SOL - Solaris style elf
*/
#define TYPE(a,b) .type a,b
#define SIZE(a,b) .size a,b
#if defined(OUT) || defined(BSDI)
#define RC4 _RC4
#endif
#ifdef OUT
#define OK 1
#define ALIGN 4
#endif
#ifdef BSDI
#define OK 1
#define ALIGN 4
#undef SIZE
#undef TYPE
#define SIZE(a,b)
#define TYPE(a,b)
#endif
#if defined(ELF) || defined(SOL)
#define OK 1
#define ALIGN 16
#endif
#ifndef OK
You need to define one of
ELF - elf systems - linux-elf, NetBSD and DG-UX
OUT - a.out systems - linux-a.out and FreeBSD
SOL - solaris systems, which are elf with strange comment lines
BSDI - a.out with a very primative version of as.
#endif
/* Let the Assembler begin :-) */
/* Don't even think of reading this code */
/* It was automatically generated by rc4-586.pl */
/* Which is a perl program used to generate the x86 assember for */
/* any of elf, a.out, BSDI,Win32, or Solaris */
/* eric <eay@cryptsoft.com> */
.file "rc4-586.s"
.version "01.01"
gcc2_compiled.:
.text
.align ALIGN
.globl RC4
TYPE(RC4,@function)
RC4:
pushl %ebp
pushl %ebx
movl 12(%esp), %ebp
movl 16(%esp), %ebx
pushl %esi
pushl %edi
movl (%ebp), %ecx
movl 4(%ebp), %edx
movl 28(%esp), %esi
incl %ecx
subl $12, %esp
addl $8, %ebp
andl $255, %ecx
leal -8(%ebx,%esi,), %ebx
movl 44(%esp), %edi
movl %ebx, 8(%esp)
movl (%ebp,%ecx,4), %eax
cmpl %esi, %ebx
jl .L000end
.L001start:
addl $8, %esi
/* Round 0 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, (%esp)
/* Round 1 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, 1(%esp)
/* Round 2 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, 2(%esp)
/* Round 3 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, 3(%esp)
/* Round 4 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, 4(%esp)
/* Round 5 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, 5(%esp)
/* Round 6 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb %bl, 6(%esp)
/* Round 7 */
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
addl $8, %edi
movb %bl, 7(%esp)
/* apply the cipher text */
movl (%esp), %eax
movl -8(%esi), %ebx
xorl %ebx, %eax
movl -4(%esi), %ebx
movl %eax, -8(%edi)
movl 4(%esp), %eax
xorl %ebx, %eax
movl 8(%esp), %ebx
movl %eax, -4(%edi)
movl (%ebp,%ecx,4), %eax
cmpl %ebx, %esi
jle .L001start
.L000end:
/* Round 0 */
addl $8, %ebx
incl %esi
cmpl %esi, %ebx
jl .L002finished
movl %ebx, 8(%esp)
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, (%edi)
/* Round 1 */
movl 8(%esp), %ebx
cmpl %esi, %ebx
jle .L002finished
incl %esi
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, 1(%edi)
/* Round 2 */
movl 8(%esp), %ebx
cmpl %esi, %ebx
jle .L002finished
incl %esi
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, 2(%edi)
/* Round 3 */
movl 8(%esp), %ebx
cmpl %esi, %ebx
jle .L002finished
incl %esi
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, 3(%edi)
/* Round 4 */
movl 8(%esp), %ebx
cmpl %esi, %ebx
jle .L002finished
incl %esi
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, 4(%edi)
/* Round 5 */
movl 8(%esp), %ebx
cmpl %esi, %ebx
jle .L002finished
incl %esi
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movl (%ebp,%ecx,4), %eax
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, 5(%edi)
/* Round 6 */
movl 8(%esp), %ebx
cmpl %esi, %ebx
jle .L002finished
incl %esi
addl %eax, %edx
andl $255, %edx
incl %ecx
movl (%ebp,%edx,4), %ebx
movl %ebx, -4(%ebp,%ecx,4)
addl %eax, %ebx
andl $255, %ecx
andl $255, %ebx
movl %eax, (%ebp,%edx,4)
nop
movl (%ebp,%ebx,4), %ebx
movb -1(%esi), %bh
xorb %bh, %bl
movb %bl, 6(%edi)
.L002finished:
decl %ecx
addl $12, %esp
movl %edx, -4(%ebp)
movb %cl, -8(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.RC4_end:
SIZE(RC4,.RC4_end-RC4)
.ident "RC4"
...@@ -61,7 +61,7 @@ asm/rm86-out.o: asm/rm86unix.cpp ...@@ -61,7 +61,7 @@ asm/rm86-out.o: asm/rm86unix.cpp
asm/rm86bsdi.o: asm/rm86unix.cpp asm/rm86bsdi.o: asm/rm86unix.cpp
$(CPP) -DBSDI asm/rm86unix.cpp | sed 's/ :/:/' | as -o asm/rm86bsdi.o $(CPP) -DBSDI asm/rm86unix.cpp | sed 's/ :/:/' | as -o asm/rm86bsdi.o
asm/rm86unix.cpp: asm/rm86unix.cpp: asm/rmd-586.pl
(cd asm; perl rmd-586.pl cpp >rm86unix.cpp) (cd asm; perl rmd-586.pl cpp >rm86unix.cpp)
files: files:
......
此差异已折叠。
...@@ -530,7 +530,7 @@ sub ripemd160_block ...@@ -530,7 +530,7 @@ sub ripemd160_block
# &mov($tmp2, &wparam(0)); # Moved into last round # &mov($tmp2, &wparam(0)); # Moved into last round
&mov($tmp1, &DWP( 4,$tmp2,"",0)); # ctx->B &mov($tmp1, &DWP( 4,$tmp2,"",0)); # ctx->B
&add($D, $tmp1); &add($D, $tmp1);
&mov($tmp1, &swtmp(1+18)); # $c &mov($tmp1, &swtmp(1+18)); # $c
&add($D, $tmp1); &add($D, $tmp1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册