summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
blob: 840cb33d95723b33905e3b4b0eb74df557fb756a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
;
;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
;  Use of this source code is governed by a BSD-style license
;  that can be found in the LICENSE file in the root of the source
;  tree. An additional intellectual property rights grant can be found
;  in the file PATENTS.  All contributing project authors may
;  be found in the AUTHORS file in the root of the source tree.
;

    EXPORT |vp8_subtract_b_neon|
    EXPORT |vp8_subtract_mby_neon|
    EXPORT |vp8_subtract_mbuv_neon|

    INCLUDE vp8_asm_enc_offsets.asm

    ARM
    REQUIRE8
    PRESERVE8

    AREA ||.text||, CODE, READONLY, ALIGN=2

;void vp8_subtract_b_neon(BLOCK *be, BLOCKD *bd, int pitch)
|vp8_subtract_b_neon| PROC

    stmfd   sp!, {r4-r7}

    ldr     r3, [r0, #vp8_block_base_src]
    ldr     r4, [r0, #vp8_block_src]
    ldr     r5, [r0, #vp8_block_src_diff]
    ldr     r3, [r3]
    ldr     r6, [r0, #vp8_block_src_stride]
    add     r3, r3, r4                      ; src = *base_src + src
    ldr     r7, [r1, #vp8_blockd_predictor]

    vld1.8          {d0}, [r3], r6          ;load src
    vld1.8          {d1}, [r7], r2          ;load pred
    vld1.8          {d2}, [r3], r6
    vld1.8          {d3}, [r7], r2
    vld1.8          {d4}, [r3], r6
    vld1.8          {d5}, [r7], r2
    vld1.8          {d6}, [r3], r6
    vld1.8          {d7}, [r7], r2

    vsubl.u8        q10, d0, d1
    vsubl.u8        q11, d2, d3
    vsubl.u8        q12, d4, d5
    vsubl.u8        q13, d6, d7

    mov             r2, r2, lsl #1

    vst1.16         {d20}, [r5], r2         ;store diff
    vst1.16         {d22}, [r5], r2
    vst1.16         {d24}, [r5], r2
    vst1.16         {d26}, [r5], r2

    ldmfd   sp!, {r4-r7}
    bx              lr

    ENDP


;==========================================
;void vp8_subtract_mby_neon(short *diff, unsigned char *src, int src_stride
;                           unsigned char *pred, int pred_stride)
|vp8_subtract_mby_neon| PROC
    push            {r4-r7}
    vpush           {d8-d15}

    mov             r12, #4
    ldr             r4, [sp, #80]           ; pred_stride
    mov             r6, #32                 ; "diff" stride x2
    add             r5, r0, #16             ; second diff pointer

subtract_mby_loop
    vld1.8          {q0}, [r1], r2          ;load src
    vld1.8          {q1}, [r3], r4          ;load pred
    vld1.8          {q2}, [r1], r2
    vld1.8          {q3}, [r3], r4
    vld1.8          {q4}, [r1], r2
    vld1.8          {q5}, [r3], r4
    vld1.8          {q6}, [r1], r2
    vld1.8          {q7}, [r3], r4

    vsubl.u8        q8, d0, d2
    vsubl.u8        q9, d1, d3
    vsubl.u8        q10, d4, d6
    vsubl.u8        q11, d5, d7
    vsubl.u8        q12, d8, d10
    vsubl.u8        q13, d9, d11
    vsubl.u8        q14, d12, d14
    vsubl.u8        q15, d13, d15

    vst1.16         {q8}, [r0], r6          ;store diff
    vst1.16         {q9}, [r5], r6
    vst1.16         {q10}, [r0], r6
    vst1.16         {q11}, [r5], r6
    vst1.16         {q12}, [r0], r6
    vst1.16         {q13}, [r5], r6
    vst1.16         {q14}, [r0], r6
    vst1.16         {q15}, [r5], r6

    subs            r12, r12, #1
    bne             subtract_mby_loop

    vpop            {d8-d15}
    pop             {r4-r7}
    bx              lr
    ENDP

;=================================
;void vp8_subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc,
;                         int src_stride, unsigned char *upred,
;                         unsigned char *vpred, int pred_stride)

|vp8_subtract_mbuv_neon| PROC
    push            {r4-r7}
    vpush           {d8-d15}

    ldr             r4, [sp, #80]       ; upred
    ldr             r5, [sp, #84]       ; vpred
    ldr             r6, [sp, #88]       ; pred_stride
    add             r0, r0, #512        ; short *udiff = diff + 256;
    mov             r12, #32            ; "diff" stride x2
    add             r7, r0, #16         ; second diff pointer

;u
    vld1.8          {d0}, [r1], r3      ;load usrc
    vld1.8          {d1}, [r4], r6      ;load upred
    vld1.8          {d2}, [r1], r3
    vld1.8          {d3}, [r4], r6
    vld1.8          {d4}, [r1], r3
    vld1.8          {d5}, [r4], r6
    vld1.8          {d6}, [r1], r3
    vld1.8          {d7}, [r4], r6
    vld1.8          {d8}, [r1], r3
    vld1.8          {d9}, [r4], r6
    vld1.8          {d10}, [r1], r3
    vld1.8          {d11}, [r4], r6
    vld1.8          {d12}, [r1], r3
    vld1.8          {d13}, [r4], r6
    vld1.8          {d14}, [r1], r3
    vld1.8          {d15}, [r4], r6

    vsubl.u8        q8, d0, d1
    vsubl.u8        q9, d2, d3
    vsubl.u8        q10, d4, d5
    vsubl.u8        q11, d6, d7
    vsubl.u8        q12, d8, d9
    vsubl.u8        q13, d10, d11
    vsubl.u8        q14, d12, d13
    vsubl.u8        q15, d14, d15

    vst1.16         {q8}, [r0], r12     ;store diff
    vst1.16         {q9}, [r7], r12
    vst1.16         {q10}, [r0], r12
    vst1.16         {q11}, [r7], r12
    vst1.16         {q12}, [r0], r12
    vst1.16         {q13}, [r7], r12
    vst1.16         {q14}, [r0], r12
    vst1.16         {q15}, [r7], r12

;v
    vld1.8          {d0}, [r2], r3      ;load vsrc
    vld1.8          {d1}, [r5], r6      ;load vpred
    vld1.8          {d2}, [r2], r3
    vld1.8          {d3}, [r5], r6
    vld1.8          {d4}, [r2], r3
    vld1.8          {d5}, [r5], r6
    vld1.8          {d6}, [r2], r3
    vld1.8          {d7}, [r5], r6
    vld1.8          {d8}, [r2], r3
    vld1.8          {d9}, [r5], r6
    vld1.8          {d10}, [r2], r3
    vld1.8          {d11}, [r5], r6
    vld1.8          {d12}, [r2], r3
    vld1.8          {d13}, [r5], r6
    vld1.8          {d14}, [r2], r3
    vld1.8          {d15}, [r5], r6

    vsubl.u8        q8, d0, d1
    vsubl.u8        q9, d2, d3
    vsubl.u8        q10, d4, d5
    vsubl.u8        q11, d6, d7
    vsubl.u8        q12, d8, d9
    vsubl.u8        q13, d10, d11
    vsubl.u8        q14, d12, d13
    vsubl.u8        q15, d14, d15

    vst1.16         {q8}, [r0], r12     ;store diff
    vst1.16         {q9}, [r7], r12
    vst1.16         {q10}, [r0], r12
    vst1.16         {q11}, [r7], r12
    vst1.16         {q12}, [r0], r12
    vst1.16         {q13}, [r7], r12
    vst1.16         {q14}, [r0], r12
    vst1.16         {q15}, [r7], r12

    vpop            {d8-d15}
    pop             {r4-r7}
    bx              lr

    ENDP

    END