Skip to content

Missed optimization with autovectorized saturating truncation (clamp) #104875

@okaneco

Description

@okaneco

In Rust, trying to clamp and truncate from a slice of i32 to a slice of u8 using the standard library's clamp function produces more instructions than manually clamping with max and min.

pub fn clamp(input: &[i32], output: &mut [u8]) {
    for (&i, o) in input.iter().zip(output.iter_mut()) {
        *o = i.clamp(0, 255) as u8;
    }
}
pub fn manual_clamp(input: &[i32], output: &mut [u8]) {
    for (&i, o) in input.iter().zip(output.iter_mut()) {
        *o = i.max(0).min(255) as u8;
    }
}

https://rust.godbolt.org/z/zf73jsqjq

Assembly instructions

Clamp

.LBB0_4:
        movdqu  xmm6, xmmword ptr [rdi + 4*r8]
        movdqu  xmm5, xmmword ptr [rdi + 4*r8 + 16]
        pxor    xmm3, xmm3
        pcmpgtd xmm3, xmm6
        packssdw        xmm3, xmm3
        packsswb        xmm3, xmm3
        pxor    xmm4, xmm4
        pcmpgtd xmm4, xmm5
        packssdw        xmm4, xmm4
        packsswb        xmm4, xmm4
        movdqa  xmm7, xmm6
        pxor    xmm7, xmm0
        movdqa  xmm8, xmm1
        pcmpgtd xmm8, xmm7
        pand    xmm6, xmm8
        pandn   xmm8, xmm2
        por     xmm8, xmm6
        packuswb        xmm8, xmm8
        packuswb        xmm8, xmm8
        pandn   xmm3, xmm8
        movdqa  xmm6, xmm5
        pxor    xmm6, xmm0
        movdqa  xmm7, xmm1
        pcmpgtd xmm7, xmm6
        pand    xmm5, xmm7
        pandn   xmm7, xmm2
        por     xmm7, xmm5
        packuswb        xmm7, xmm7
        packuswb        xmm7, xmm7
        pandn   xmm4, xmm7
        movd    dword ptr [rdx + r8], xmm3
        movd    dword ptr [rdx + r8 + 4], xmm4
        add     r8, 8
        cmp     rsi, r8
        jne     .LBB0_4

Manual clamp

.LBB0_4:
        movdqu  xmm0, xmmword ptr [rdi + 4*r8]
        packssdw        xmm0, xmm0
        packuswb        xmm0, xmm0
        movdqu  xmm1, xmmword ptr [rdi + 4*r8 + 16]
        packssdw        xmm1, xmm1
        packuswb        xmm1, xmm1
        movd    dword ptr [rdx + r8], xmm0
        movd    dword ptr [rdx + r8 + 4], xmm1
        add     r8, 8
        cmp     rsi, r8
        jne     .LBB0_4

Emitted IR - https://alive2.llvm.org/ce/z/hbU88w

Clamp

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, ptr %input.0, i64 %index
  %1 = getelementptr inbounds i8, ptr %output.0, i64 %index
  %2 = getelementptr inbounds i8, ptr %0, i64 16
  %wide.load = load <4 x i32>, ptr %0, align 4
  %wide.load6 = load <4 x i32>, ptr %2, align 4
  %3 = icmp slt <4 x i32> %wide.load, zeroinitializer
  %4 = icmp slt <4 x i32> %wide.load6, zeroinitializer
  %5 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %wide.load, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
  %6 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %wide.load6, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
  %7 = trunc nuw <4 x i32> %5 to <4 x i8>
  %8 = trunc nuw <4 x i32> %6 to <4 x i8>
  %9 = select <4 x i1> %3, <4 x i8> zeroinitializer, <4 x i8> %7
  %10 = select <4 x i1> %4, <4 x i8> zeroinitializer, <4 x i8> %8
  %11 = getelementptr inbounds i8, ptr %1, i64 4
  store <4 x i8> %9, ptr %1, align 1
  store <4 x i8> %10, ptr %11, align 1
  %index.next = add nuw i64 %index, 8
  %12 = icmp eq i64 %index.next, %n.vec
  br i1 %12, label %middle.block, label %vector.body, !llvm.loop !3

Manual clamp

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, ptr %input.0, i64 %index
  %1 = getelementptr inbounds i8, ptr %output.0, i64 %index
  %2 = getelementptr inbounds i8, ptr %0, i64 16
  %wide.load = load <4 x i32>, ptr %0, align 4
  %wide.load7 = load <4 x i32>, ptr %2, align 4
  %3 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %wide.load, <4 x i32> zeroinitializer)
  %4 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %wide.load7, <4 x i32> zeroinitializer)
  %5 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %3, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
  %6 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %4, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
  %7 = trunc nuw <4 x i32> %5 to <4 x i8>
  %8 = trunc nuw <4 x i32> %6 to <4 x i8>
  %9 = getelementptr inbounds i8, ptr %1, i64 4
  store <4 x i8> %7, ptr %1, align 1
  store <4 x i8> %8, ptr %9, align 1
  %index.next = add nuw i64 %index, 8
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body, !llvm.loop !8

The standard library clamp is implemented as the following.

fn clamp(n: i32, min: i32, max: i32) -> i32 {
    assert!(min <= max);
    if n < min {
        min
    } else if n > max {
        max
    } else {
        n
    }
}

It seems OK to transform the standard library clamp to the manual clamp, minimized IR.
alive2 proof - https://alive2.llvm.org/ce/z/pRRVhU
Rust source - https://rust.godbolt.org/z/3PxW1xWqo

define i8 @src(i32 %input) {
  %1 = icmp slt i32 %input, 0
  %2 = tail call i32 @llvm.umin.i32(i32 %input, i32 255)
  %3 = trunc nuw i32 %2 to i8
  %4 = select i1 %1, i8 0, i8 %3
  ret i8 %4
}

define i8 @tgt(i32 %input) {
  %1 = tail call i32 @llvm.smax.i32(i32 %input, i32 0)
  %2 = tail call i32 @llvm.umin.i32(i32 %1, i32 255)
  %3 = trunc nuw i32 %2 to i8
  ret i8 %3
}

Real world examples from functions in the image-webp crate
https://rust.godbolt.org/z/veGzv1dPx - source
https://rust.godbolt.org/z/sf4v6ceGM - source

Originally reported rust-lang/rust#125738

Metadata

Metadata

Assignees

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions