Browse Source

* cleanup

florian 9 months ago
parent
commit
54dcfa78f8
2 changed files with 0 additions and 58 deletions
  1. 0 33
      rtl/i386/set.inc
  2. 0 25
      rtl/x86_64/set.inc

+ 0 - 33
rtl/i386/set.inc

@@ -415,32 +415,6 @@ asm
     sub    $16, %ecx
     jl     .LFallback  { probably dead branch... }
 
-{$if false}
-{ Scans 16 bytes at a time left to right with early exits.
-  Would be better for large enough sets (maybe around 64 bytes or even more) — if they existed, but worse for actually existing 32.
-  Kept for the future. }
-    push   %ebx
-    pxor   %xmm2, %xmm2 { xmm2 = 0 }
-    add    %ecx, %eax { set1 += size }
-    add    %ecx, %edx { set2 += size }
-    neg    %ecx { Now "size" (ecx) = -(orig.size - 16), "set1" (eax) points to orig.set1 + orig.size - 16, "set2" (edx) points to orig.set2 + orig.size - 16.
-                 Loop ends on "size" >= 0, leaving up to 16 tail bytes. }
-.L16x_Loop:
-    movdqu (%eax,%ecx), %xmm1
-    movdqu (%edx,%ecx), %xmm0
-    pandn  %xmm1, %xmm0
-    pcmpeqb %xmm2, %xmm0
-    pmovmskb %xmm0, %ebx
-    inc    %bx
-    jnz    .LNo
-    add    $16, %ecx
-    js     .L16x_Loop
-    pop    %ebx
-
-    movdqu (%eax), %xmm1
-    movdqu (%edx), %xmm0
-    pandn  %xmm1, %xmm0
-{$else}
 { Folds all 16-byte "set1 and not set2" chunks with OR and checks the final result for zero. Better for small enough sets. }
     movdqu (%eax), %xmm1
     movdqu (%edx), %xmm2
@@ -455,7 +429,6 @@ asm
     ja     .L16x_Loop
 
     pxor   %xmm0, %xmm0
-{$endif}
     pcmpeqb %xmm2,%xmm0
     pmovmskb %xmm0, %ecx
     xor    %eax, %eax
@@ -466,12 +439,6 @@ asm
 .LFallback:
     add    $16, %ecx
     jmp    fpc_varset_contains_sets_plain
-
-{$if false}
-.LNo:
-    xor    %eax, %eax
-    pop    %ebx
-{$endif}
 end;
 
 function fpc_varset_contains_sets_dispatch(const set1,set2;size : ptrint):boolean; forward;

+ 0 - 25
rtl/x86_64/set.inc

@@ -163,30 +163,6 @@ asm
     sub    size, 16
     jl     @Bytewise_Prepare { probably dead branch... }
 
-{$if false}
-{ Scans 16 bytes at a time left to right with early exits.
-  Would be better for large enough sets (maybe around 64 bytes or even more) — if they existed, but worse for actually existing 32.
-  Kept for the future. }
-    pxor   xmm2, xmm2 { xmm2 = 0 }
-    add    set1, size
-    add    set2, size
-    neg    size { Now "size" = -(orig.size - 16), "set1" points to orig.set1 + orig.size - 16, "set2" points to orig.set2 + orig.size - 16.
-                  Loop ends on "size" >= 0, leaving up to 16 tail bytes. }
-@16x_Loop:
-    movdqu xmm1, xmmword ptr [set1 + size]
-    movdqu xmm0, xmmword ptr [set2 + size]
-    pandn  xmm0, xmm1
-    pcmpeqb xmm0, xmm2
-    pmovmskb eax, xmm0
-    inc    ax
-    jnz    @No
-    add    size, 16
-    js     @16x_Loop
-
-    movdqu xmm1, xmmword ptr [set1]
-    movdqu xmm0, xmmword ptr [set2]
-    pandn  xmm0, xmm1
-{$else}
 { Folds all 16-byte "set1 and not set2" chunks with OR and checks the final result for zero. Better for small enough sets. }
     movdqu xmm1, xmmword ptr [set1]
     movdqu xmm2, xmmword ptr [set2]
@@ -201,7 +177,6 @@ asm
     ja     @16x_Loop
 
     pxor   xmm0, xmm0
-{$endif}
     pcmpeqb xmm0, xmm2
     pmovmskb ecx, xmm0
     xor    eax, eax