diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 1a4be3be34199..3661de89e8070 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -47324,6 +47324,19 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG, if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget)) return R; + // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant + // avoids slow variable shift (moving shift amount to ECX etc.) + if (isOneConstant(N1) && N0->hasOneUse()) { + SDValue Src = N0; + while ((Src.getOpcode() == ISD::ZERO_EXTEND || + Src.getOpcode() == ISD::TRUNCATE) && + Src.getOperand(0)->hasOneUse()) + Src = Src.getOperand(0); + if (Src.getOpcode() == ISD::SRL && !isa(Src.getOperand(1))) + if (SDValue BT = getBT(Src.getOperand(0), Src.getOperand(1), dl, DAG)) + return getSETCC(X86::COND_B, BT, dl, DAG); + } + if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) { // Attempt to recursively combine a bitmask AND with shuffles. SDValue Op(N, 0); diff --git a/llvm/test/CodeGen/X86/setcc.ll b/llvm/test/CodeGen/X86/setcc.ll index 57431887f58c6..229632b25dcf5 100644 --- a/llvm/test/CodeGen/X86/setcc.ll +++ b/llvm/test/CodeGen/X86/setcc.ll @@ -139,19 +139,17 @@ define zeroext i1 @t6(i32 %a) #0 { define zeroext i1 @t7(i32 %0) { ; X86-LABEL: t7: ; X86: ## %bb.0: -; X86-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-NEXT: movb $19, %al -; X86-NEXT: shrb %cl, %al -; X86-NEXT: andb $1, %al +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl $19, %ecx +; X86-NEXT: btl %eax, %ecx +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: t7: ; X64: ## %bb.0: -; X64-NEXT: movl %edi, %ecx -; X64-NEXT: movb $19, %al -; X64-NEXT: ## kill: def $cl killed $cl killed $ecx -; X64-NEXT: shrb %cl, %al -; X64-NEXT: andb $1, %al +; X64-NEXT: movl $19, %eax +; X64-NEXT: btl %edi, %eax +; X64-NEXT: setb %al ; X64-NEXT: retq %2 = trunc i32 %0 to i5 %3 = lshr i5 -13, %2 @@ -163,20 +161,16 @@ define zeroext i1 @t7(i32 %0) { define zeroext i1 @t8(i8 %0, i8 %1) { ; X86-LABEL: t8: ; X86: ## %bb.0: -; X86-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: shrb %cl, %al -; X86-NEXT: andb $1, %al +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: btl %eax, %ecx +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: t8: ; X64: ## %bb.0: -; X64-NEXT: movl %esi, %ecx -; X64-NEXT: movl %edi, %eax -; X64-NEXT: ## kill: def $cl killed $cl killed $ecx -; X64-NEXT: shrb %cl, %al -; X64-NEXT: andb $1, %al -; X64-NEXT: ## kill: def $al killed $al killed $eax +; X64-NEXT: btl %esi, %edi +; X64-NEXT: setb %al ; X64-NEXT: retq %3 = lshr i8 %0, %1 %4 = and i8 %3, 1