4761
|
1 //
|
|
2 // Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
|
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 //
|
|
5 // This code is free software; you can redistribute it and/or modify it
|
|
6 // under the terms of the GNU General Public License version 2 only, as
|
|
7 // published by the Free Software Foundation.
|
|
8 //
|
|
9 // This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 // version 2 for more details (a copy is included in the LICENSE file that
|
|
13 // accompanied this code).
|
|
14 //
|
|
15 // You should have received a copy of the GNU General Public License version
|
|
16 // 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 //
|
|
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 // or visit www.oracle.com if you need additional information or have any
|
|
21 // questions.
|
|
22 //
|
|
23 //
|
|
24
|
|
25 // X86 Common Architecture Description File
|
|
26
|
|
27 source %{
|
|
28 // Float masks come from different places depending on platform.
|
|
29 #ifdef _LP64
|
|
30 static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }
|
|
31 static address float_signflip() { return StubRoutines::x86::float_sign_flip(); }
|
|
32 static address double_signmask() { return StubRoutines::x86::double_sign_mask(); }
|
|
33 static address double_signflip() { return StubRoutines::x86::double_sign_flip(); }
|
|
34 #else
|
|
35 static address float_signmask() { return (address)float_signmask_pool; }
|
|
36 static address float_signflip() { return (address)float_signflip_pool; }
|
|
37 static address double_signmask() { return (address)double_signmask_pool; }
|
|
38 static address double_signflip() { return (address)double_signflip_pool; }
|
|
39 #endif
|
|
40 %}
|
|
41
|
|
42 // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
|
|
43
|
|
44 instruct addF_reg(regF dst, regF src) %{
|
|
45 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
46 match(Set dst (AddF dst src));
|
|
47
|
|
48 format %{ "addss $dst, $src" %}
|
|
49 ins_cost(150);
|
|
50 ins_encode %{
|
|
51 __ addss($dst$$XMMRegister, $src$$XMMRegister);
|
|
52 %}
|
|
53 ins_pipe(pipe_slow);
|
|
54 %}
|
|
55
|
|
56 instruct addF_mem(regF dst, memory src) %{
|
|
57 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
58 match(Set dst (AddF dst (LoadF src)));
|
|
59
|
|
60 format %{ "addss $dst, $src" %}
|
|
61 ins_cost(150);
|
|
62 ins_encode %{
|
|
63 __ addss($dst$$XMMRegister, $src$$Address);
|
|
64 %}
|
|
65 ins_pipe(pipe_slow);
|
|
66 %}
|
|
67
|
|
68 instruct addF_imm(regF dst, immF con) %{
|
|
69 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
70 match(Set dst (AddF dst con));
|
|
71 format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
72 ins_cost(150);
|
|
73 ins_encode %{
|
|
74 __ addss($dst$$XMMRegister, $constantaddress($con));
|
|
75 %}
|
|
76 ins_pipe(pipe_slow);
|
|
77 %}
|
|
78
|
|
79 instruct vaddF_reg(regF dst, regF src1, regF src2) %{
|
|
80 predicate(UseAVX > 0);
|
|
81 match(Set dst (AddF src1 src2));
|
|
82
|
|
83 format %{ "vaddss $dst, $src1, $src2" %}
|
|
84 ins_cost(150);
|
|
85 ins_encode %{
|
|
86 __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
87 %}
|
|
88 ins_pipe(pipe_slow);
|
|
89 %}
|
|
90
|
|
91 instruct vaddF_mem(regF dst, regF src1, memory src2) %{
|
|
92 predicate(UseAVX > 0);
|
|
93 match(Set dst (AddF src1 (LoadF src2)));
|
|
94
|
|
95 format %{ "vaddss $dst, $src1, $src2" %}
|
|
96 ins_cost(150);
|
|
97 ins_encode %{
|
|
98 __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
99 %}
|
|
100 ins_pipe(pipe_slow);
|
|
101 %}
|
|
102
|
|
103 instruct vaddF_imm(regF dst, regF src, immF con) %{
|
|
104 predicate(UseAVX > 0);
|
|
105 match(Set dst (AddF src con));
|
|
106
|
|
107 format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
108 ins_cost(150);
|
|
109 ins_encode %{
|
|
110 __ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
111 %}
|
|
112 ins_pipe(pipe_slow);
|
|
113 %}
|
|
114
|
|
115 instruct addD_reg(regD dst, regD src) %{
|
|
116 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
117 match(Set dst (AddD dst src));
|
|
118
|
|
119 format %{ "addsd $dst, $src" %}
|
|
120 ins_cost(150);
|
|
121 ins_encode %{
|
|
122 __ addsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
123 %}
|
|
124 ins_pipe(pipe_slow);
|
|
125 %}
|
|
126
|
|
127 instruct addD_mem(regD dst, memory src) %{
|
|
128 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
129 match(Set dst (AddD dst (LoadD src)));
|
|
130
|
|
131 format %{ "addsd $dst, $src" %}
|
|
132 ins_cost(150);
|
|
133 ins_encode %{
|
|
134 __ addsd($dst$$XMMRegister, $src$$Address);
|
|
135 %}
|
|
136 ins_pipe(pipe_slow);
|
|
137 %}
|
|
138
|
|
139 instruct addD_imm(regD dst, immD con) %{
|
|
140 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
141 match(Set dst (AddD dst con));
|
|
142 format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
143 ins_cost(150);
|
|
144 ins_encode %{
|
|
145 __ addsd($dst$$XMMRegister, $constantaddress($con));
|
|
146 %}
|
|
147 ins_pipe(pipe_slow);
|
|
148 %}
|
|
149
|
|
150 instruct vaddD_reg(regD dst, regD src1, regD src2) %{
|
|
151 predicate(UseAVX > 0);
|
|
152 match(Set dst (AddD src1 src2));
|
|
153
|
|
154 format %{ "vaddsd $dst, $src1, $src2" %}
|
|
155 ins_cost(150);
|
|
156 ins_encode %{
|
|
157 __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
158 %}
|
|
159 ins_pipe(pipe_slow);
|
|
160 %}
|
|
161
|
|
162 instruct vaddD_mem(regD dst, regD src1, memory src2) %{
|
|
163 predicate(UseAVX > 0);
|
|
164 match(Set dst (AddD src1 (LoadD src2)));
|
|
165
|
|
166 format %{ "vaddsd $dst, $src1, $src2" %}
|
|
167 ins_cost(150);
|
|
168 ins_encode %{
|
|
169 __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
170 %}
|
|
171 ins_pipe(pipe_slow);
|
|
172 %}
|
|
173
|
|
174 instruct vaddD_imm(regD dst, regD src, immD con) %{
|
|
175 predicate(UseAVX > 0);
|
|
176 match(Set dst (AddD src con));
|
|
177
|
|
178 format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
179 ins_cost(150);
|
|
180 ins_encode %{
|
|
181 __ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
182 %}
|
|
183 ins_pipe(pipe_slow);
|
|
184 %}
|
|
185
|
|
186 instruct subF_reg(regF dst, regF src) %{
|
|
187 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
188 match(Set dst (SubF dst src));
|
|
189
|
|
190 format %{ "subss $dst, $src" %}
|
|
191 ins_cost(150);
|
|
192 ins_encode %{
|
|
193 __ subss($dst$$XMMRegister, $src$$XMMRegister);
|
|
194 %}
|
|
195 ins_pipe(pipe_slow);
|
|
196 %}
|
|
197
|
|
198 instruct subF_mem(regF dst, memory src) %{
|
|
199 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
200 match(Set dst (SubF dst (LoadF src)));
|
|
201
|
|
202 format %{ "subss $dst, $src" %}
|
|
203 ins_cost(150);
|
|
204 ins_encode %{
|
|
205 __ subss($dst$$XMMRegister, $src$$Address);
|
|
206 %}
|
|
207 ins_pipe(pipe_slow);
|
|
208 %}
|
|
209
|
|
210 instruct subF_imm(regF dst, immF con) %{
|
|
211 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
212 match(Set dst (SubF dst con));
|
|
213 format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
214 ins_cost(150);
|
|
215 ins_encode %{
|
|
216 __ subss($dst$$XMMRegister, $constantaddress($con));
|
|
217 %}
|
|
218 ins_pipe(pipe_slow);
|
|
219 %}
|
|
220
|
|
221 instruct vsubF_reg(regF dst, regF src1, regF src2) %{
|
|
222 predicate(UseAVX > 0);
|
|
223 match(Set dst (SubF src1 src2));
|
|
224
|
|
225 format %{ "vsubss $dst, $src1, $src2" %}
|
|
226 ins_cost(150);
|
|
227 ins_encode %{
|
|
228 __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
229 %}
|
|
230 ins_pipe(pipe_slow);
|
|
231 %}
|
|
232
|
|
233 instruct vsubF_mem(regF dst, regF src1, memory src2) %{
|
|
234 predicate(UseAVX > 0);
|
|
235 match(Set dst (SubF src1 (LoadF src2)));
|
|
236
|
|
237 format %{ "vsubss $dst, $src1, $src2" %}
|
|
238 ins_cost(150);
|
|
239 ins_encode %{
|
|
240 __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
241 %}
|
|
242 ins_pipe(pipe_slow);
|
|
243 %}
|
|
244
|
|
245 instruct vsubF_imm(regF dst, regF src, immF con) %{
|
|
246 predicate(UseAVX > 0);
|
|
247 match(Set dst (SubF src con));
|
|
248
|
|
249 format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
250 ins_cost(150);
|
|
251 ins_encode %{
|
|
252 __ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
253 %}
|
|
254 ins_pipe(pipe_slow);
|
|
255 %}
|
|
256
|
|
257 instruct subD_reg(regD dst, regD src) %{
|
|
258 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
259 match(Set dst (SubD dst src));
|
|
260
|
|
261 format %{ "subsd $dst, $src" %}
|
|
262 ins_cost(150);
|
|
263 ins_encode %{
|
|
264 __ subsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
265 %}
|
|
266 ins_pipe(pipe_slow);
|
|
267 %}
|
|
268
|
|
269 instruct subD_mem(regD dst, memory src) %{
|
|
270 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
271 match(Set dst (SubD dst (LoadD src)));
|
|
272
|
|
273 format %{ "subsd $dst, $src" %}
|
|
274 ins_cost(150);
|
|
275 ins_encode %{
|
|
276 __ subsd($dst$$XMMRegister, $src$$Address);
|
|
277 %}
|
|
278 ins_pipe(pipe_slow);
|
|
279 %}
|
|
280
|
|
281 instruct subD_imm(regD dst, immD con) %{
|
|
282 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
283 match(Set dst (SubD dst con));
|
|
284 format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
285 ins_cost(150);
|
|
286 ins_encode %{
|
|
287 __ subsd($dst$$XMMRegister, $constantaddress($con));
|
|
288 %}
|
|
289 ins_pipe(pipe_slow);
|
|
290 %}
|
|
291
|
|
292 instruct vsubD_reg(regD dst, regD src1, regD src2) %{
|
|
293 predicate(UseAVX > 0);
|
|
294 match(Set dst (SubD src1 src2));
|
|
295
|
|
296 format %{ "vsubsd $dst, $src1, $src2" %}
|
|
297 ins_cost(150);
|
|
298 ins_encode %{
|
|
299 __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
300 %}
|
|
301 ins_pipe(pipe_slow);
|
|
302 %}
|
|
303
|
|
304 instruct vsubD_mem(regD dst, regD src1, memory src2) %{
|
|
305 predicate(UseAVX > 0);
|
|
306 match(Set dst (SubD src1 (LoadD src2)));
|
|
307
|
|
308 format %{ "vsubsd $dst, $src1, $src2" %}
|
|
309 ins_cost(150);
|
|
310 ins_encode %{
|
|
311 __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
312 %}
|
|
313 ins_pipe(pipe_slow);
|
|
314 %}
|
|
315
|
|
316 instruct vsubD_imm(regD dst, regD src, immD con) %{
|
|
317 predicate(UseAVX > 0);
|
|
318 match(Set dst (SubD src con));
|
|
319
|
|
320 format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
321 ins_cost(150);
|
|
322 ins_encode %{
|
|
323 __ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
324 %}
|
|
325 ins_pipe(pipe_slow);
|
|
326 %}
|
|
327
|
|
328 instruct mulF_reg(regF dst, regF src) %{
|
|
329 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
330 match(Set dst (MulF dst src));
|
|
331
|
|
332 format %{ "mulss $dst, $src" %}
|
|
333 ins_cost(150);
|
|
334 ins_encode %{
|
|
335 __ mulss($dst$$XMMRegister, $src$$XMMRegister);
|
|
336 %}
|
|
337 ins_pipe(pipe_slow);
|
|
338 %}
|
|
339
|
|
340 instruct mulF_mem(regF dst, memory src) %{
|
|
341 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
342 match(Set dst (MulF dst (LoadF src)));
|
|
343
|
|
344 format %{ "mulss $dst, $src" %}
|
|
345 ins_cost(150);
|
|
346 ins_encode %{
|
|
347 __ mulss($dst$$XMMRegister, $src$$Address);
|
|
348 %}
|
|
349 ins_pipe(pipe_slow);
|
|
350 %}
|
|
351
|
|
352 instruct mulF_imm(regF dst, immF con) %{
|
|
353 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
354 match(Set dst (MulF dst con));
|
|
355 format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
356 ins_cost(150);
|
|
357 ins_encode %{
|
|
358 __ mulss($dst$$XMMRegister, $constantaddress($con));
|
|
359 %}
|
|
360 ins_pipe(pipe_slow);
|
|
361 %}
|
|
362
|
|
363 instruct vmulF_reg(regF dst, regF src1, regF src2) %{
|
|
364 predicate(UseAVX > 0);
|
|
365 match(Set dst (MulF src1 src2));
|
|
366
|
|
367 format %{ "vmulss $dst, $src1, $src2" %}
|
|
368 ins_cost(150);
|
|
369 ins_encode %{
|
|
370 __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
371 %}
|
|
372 ins_pipe(pipe_slow);
|
|
373 %}
|
|
374
|
|
375 instruct vmulF_mem(regF dst, regF src1, memory src2) %{
|
|
376 predicate(UseAVX > 0);
|
|
377 match(Set dst (MulF src1 (LoadF src2)));
|
|
378
|
|
379 format %{ "vmulss $dst, $src1, $src2" %}
|
|
380 ins_cost(150);
|
|
381 ins_encode %{
|
|
382 __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
383 %}
|
|
384 ins_pipe(pipe_slow);
|
|
385 %}
|
|
386
|
|
387 instruct vmulF_imm(regF dst, regF src, immF con) %{
|
|
388 predicate(UseAVX > 0);
|
|
389 match(Set dst (MulF src con));
|
|
390
|
|
391 format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
392 ins_cost(150);
|
|
393 ins_encode %{
|
|
394 __ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
395 %}
|
|
396 ins_pipe(pipe_slow);
|
|
397 %}
|
|
398
|
|
399 instruct mulD_reg(regD dst, regD src) %{
|
|
400 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
401 match(Set dst (MulD dst src));
|
|
402
|
|
403 format %{ "mulsd $dst, $src" %}
|
|
404 ins_cost(150);
|
|
405 ins_encode %{
|
|
406 __ mulsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
407 %}
|
|
408 ins_pipe(pipe_slow);
|
|
409 %}
|
|
410
|
|
411 instruct mulD_mem(regD dst, memory src) %{
|
|
412 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
413 match(Set dst (MulD dst (LoadD src)));
|
|
414
|
|
415 format %{ "mulsd $dst, $src" %}
|
|
416 ins_cost(150);
|
|
417 ins_encode %{
|
|
418 __ mulsd($dst$$XMMRegister, $src$$Address);
|
|
419 %}
|
|
420 ins_pipe(pipe_slow);
|
|
421 %}
|
|
422
|
|
423 instruct mulD_imm(regD dst, immD con) %{
|
|
424 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
425 match(Set dst (MulD dst con));
|
|
426 format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
427 ins_cost(150);
|
|
428 ins_encode %{
|
|
429 __ mulsd($dst$$XMMRegister, $constantaddress($con));
|
|
430 %}
|
|
431 ins_pipe(pipe_slow);
|
|
432 %}
|
|
433
|
|
434 instruct vmulD_reg(regD dst, regD src1, regD src2) %{
|
|
435 predicate(UseAVX > 0);
|
|
436 match(Set dst (MulD src1 src2));
|
|
437
|
|
438 format %{ "vmulsd $dst, $src1, $src2" %}
|
|
439 ins_cost(150);
|
|
440 ins_encode %{
|
|
441 __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
442 %}
|
|
443 ins_pipe(pipe_slow);
|
|
444 %}
|
|
445
|
|
446 instruct vmulD_mem(regD dst, regD src1, memory src2) %{
|
|
447 predicate(UseAVX > 0);
|
|
448 match(Set dst (MulD src1 (LoadD src2)));
|
|
449
|
|
450 format %{ "vmulsd $dst, $src1, $src2" %}
|
|
451 ins_cost(150);
|
|
452 ins_encode %{
|
|
453 __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
454 %}
|
|
455 ins_pipe(pipe_slow);
|
|
456 %}
|
|
457
|
|
458 instruct vmulD_imm(regD dst, regD src, immD con) %{
|
|
459 predicate(UseAVX > 0);
|
|
460 match(Set dst (MulD src con));
|
|
461
|
|
462 format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
463 ins_cost(150);
|
|
464 ins_encode %{
|
|
465 __ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
466 %}
|
|
467 ins_pipe(pipe_slow);
|
|
468 %}
|
|
469
|
|
470 instruct divF_reg(regF dst, regF src) %{
|
|
471 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
472 match(Set dst (DivF dst src));
|
|
473
|
|
474 format %{ "divss $dst, $src" %}
|
|
475 ins_cost(150);
|
|
476 ins_encode %{
|
|
477 __ divss($dst$$XMMRegister, $src$$XMMRegister);
|
|
478 %}
|
|
479 ins_pipe(pipe_slow);
|
|
480 %}
|
|
481
|
|
482 instruct divF_mem(regF dst, memory src) %{
|
|
483 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
484 match(Set dst (DivF dst (LoadF src)));
|
|
485
|
|
486 format %{ "divss $dst, $src" %}
|
|
487 ins_cost(150);
|
|
488 ins_encode %{
|
|
489 __ divss($dst$$XMMRegister, $src$$Address);
|
|
490 %}
|
|
491 ins_pipe(pipe_slow);
|
|
492 %}
|
|
493
|
|
494 instruct divF_imm(regF dst, immF con) %{
|
|
495 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
496 match(Set dst (DivF dst con));
|
|
497 format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
498 ins_cost(150);
|
|
499 ins_encode %{
|
|
500 __ divss($dst$$XMMRegister, $constantaddress($con));
|
|
501 %}
|
|
502 ins_pipe(pipe_slow);
|
|
503 %}
|
|
504
|
|
505 instruct vdivF_reg(regF dst, regF src1, regF src2) %{
|
|
506 predicate(UseAVX > 0);
|
|
507 match(Set dst (DivF src1 src2));
|
|
508
|
|
509 format %{ "vdivss $dst, $src1, $src2" %}
|
|
510 ins_cost(150);
|
|
511 ins_encode %{
|
|
512 __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
513 %}
|
|
514 ins_pipe(pipe_slow);
|
|
515 %}
|
|
516
|
|
517 instruct vdivF_mem(regF dst, regF src1, memory src2) %{
|
|
518 predicate(UseAVX > 0);
|
|
519 match(Set dst (DivF src1 (LoadF src2)));
|
|
520
|
|
521 format %{ "vdivss $dst, $src1, $src2" %}
|
|
522 ins_cost(150);
|
|
523 ins_encode %{
|
|
524 __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
525 %}
|
|
526 ins_pipe(pipe_slow);
|
|
527 %}
|
|
528
|
|
529 instruct vdivF_imm(regF dst, regF src, immF con) %{
|
|
530 predicate(UseAVX > 0);
|
|
531 match(Set dst (DivF src con));
|
|
532
|
|
533 format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
534 ins_cost(150);
|
|
535 ins_encode %{
|
|
536 __ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
537 %}
|
|
538 ins_pipe(pipe_slow);
|
|
539 %}
|
|
540
|
|
541 instruct divD_reg(regD dst, regD src) %{
|
|
542 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
543 match(Set dst (DivD dst src));
|
|
544
|
|
545 format %{ "divsd $dst, $src" %}
|
|
546 ins_cost(150);
|
|
547 ins_encode %{
|
|
548 __ divsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
549 %}
|
|
550 ins_pipe(pipe_slow);
|
|
551 %}
|
|
552
|
|
553 instruct divD_mem(regD dst, memory src) %{
|
|
554 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
555 match(Set dst (DivD dst (LoadD src)));
|
|
556
|
|
557 format %{ "divsd $dst, $src" %}
|
|
558 ins_cost(150);
|
|
559 ins_encode %{
|
|
560 __ divsd($dst$$XMMRegister, $src$$Address);
|
|
561 %}
|
|
562 ins_pipe(pipe_slow);
|
|
563 %}
|
|
564
|
|
565 instruct divD_imm(regD dst, immD con) %{
|
|
566 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
567 match(Set dst (DivD dst con));
|
|
568 format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
569 ins_cost(150);
|
|
570 ins_encode %{
|
|
571 __ divsd($dst$$XMMRegister, $constantaddress($con));
|
|
572 %}
|
|
573 ins_pipe(pipe_slow);
|
|
574 %}
|
|
575
|
|
576 instruct vdivD_reg(regD dst, regD src1, regD src2) %{
|
|
577 predicate(UseAVX > 0);
|
|
578 match(Set dst (DivD src1 src2));
|
|
579
|
|
580 format %{ "vdivsd $dst, $src1, $src2" %}
|
|
581 ins_cost(150);
|
|
582 ins_encode %{
|
|
583 __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
584 %}
|
|
585 ins_pipe(pipe_slow);
|
|
586 %}
|
|
587
|
|
588 instruct vdivD_mem(regD dst, regD src1, memory src2) %{
|
|
589 predicate(UseAVX > 0);
|
|
590 match(Set dst (DivD src1 (LoadD src2)));
|
|
591
|
|
592 format %{ "vdivsd $dst, $src1, $src2" %}
|
|
593 ins_cost(150);
|
|
594 ins_encode %{
|
|
595 __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
596 %}
|
|
597 ins_pipe(pipe_slow);
|
|
598 %}
|
|
599
|
|
600 instruct vdivD_imm(regD dst, regD src, immD con) %{
|
|
601 predicate(UseAVX > 0);
|
|
602 match(Set dst (DivD src con));
|
|
603
|
|
604 format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
605 ins_cost(150);
|
|
606 ins_encode %{
|
|
607 __ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
608 %}
|
|
609 ins_pipe(pipe_slow);
|
|
610 %}
|
|
611
|
|
612 instruct absF_reg(regF dst) %{
|
|
613 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
614 match(Set dst (AbsF dst));
|
|
615 ins_cost(150);
|
|
616 format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
|
|
617 ins_encode %{
|
|
618 __ andps($dst$$XMMRegister, ExternalAddress(float_signmask()));
|
|
619 %}
|
|
620 ins_pipe(pipe_slow);
|
|
621 %}
|
|
622
|
|
623 instruct vabsF_reg(regF dst, regF src) %{
|
|
624 predicate(UseAVX > 0);
|
|
625 match(Set dst (AbsF src));
|
|
626 ins_cost(150);
|
|
627 format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %}
|
|
628 ins_encode %{
|
|
629 __ vandps($dst$$XMMRegister, $src$$XMMRegister,
|
|
630 ExternalAddress(float_signmask()));
|
|
631 %}
|
|
632 ins_pipe(pipe_slow);
|
|
633 %}
|
|
634
|
|
635 instruct absD_reg(regD dst) %{
|
|
636 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
637 match(Set dst (AbsD dst));
|
|
638 ins_cost(150);
|
|
639 format %{ "andpd $dst, [0x7fffffffffffffff]\t"
|
|
640 "# abs double by sign masking" %}
|
|
641 ins_encode %{
|
|
642 __ andpd($dst$$XMMRegister, ExternalAddress(double_signmask()));
|
|
643 %}
|
|
644 ins_pipe(pipe_slow);
|
|
645 %}
|
|
646
|
|
647 instruct vabsD_reg(regD dst, regD src) %{
|
|
648 predicate(UseAVX > 0);
|
|
649 match(Set dst (AbsD src));
|
|
650 ins_cost(150);
|
|
651 format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t"
|
|
652 "# abs double by sign masking" %}
|
|
653 ins_encode %{
|
|
654 __ vandpd($dst$$XMMRegister, $src$$XMMRegister,
|
|
655 ExternalAddress(double_signmask()));
|
|
656 %}
|
|
657 ins_pipe(pipe_slow);
|
|
658 %}
|
|
659
|
|
660 instruct negF_reg(regF dst) %{
|
|
661 predicate((UseSSE>=1) && (UseAVX == 0));
|
|
662 match(Set dst (NegF dst));
|
|
663 ins_cost(150);
|
|
664 format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
|
|
665 ins_encode %{
|
|
666 __ xorps($dst$$XMMRegister, ExternalAddress(float_signflip()));
|
|
667 %}
|
|
668 ins_pipe(pipe_slow);
|
|
669 %}
|
|
670
|
|
671 instruct vnegF_reg(regF dst, regF src) %{
|
|
672 predicate(UseAVX > 0);
|
|
673 match(Set dst (NegF src));
|
|
674 ins_cost(150);
|
|
675 format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %}
|
|
676 ins_encode %{
|
|
677 __ vxorps($dst$$XMMRegister, $src$$XMMRegister,
|
|
678 ExternalAddress(float_signflip()));
|
|
679 %}
|
|
680 ins_pipe(pipe_slow);
|
|
681 %}
|
|
682
|
|
683 instruct negD_reg(regD dst) %{
|
|
684 predicate((UseSSE>=2) && (UseAVX == 0));
|
|
685 match(Set dst (NegD dst));
|
|
686 ins_cost(150);
|
|
687 format %{ "xorpd $dst, [0x8000000000000000]\t"
|
|
688 "# neg double by sign flipping" %}
|
|
689 ins_encode %{
|
|
690 __ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip()));
|
|
691 %}
|
|
692 ins_pipe(pipe_slow);
|
|
693 %}
|
|
694
|
|
695 instruct vnegD_reg(regD dst, regD src) %{
|
|
696 predicate(UseAVX > 0);
|
|
697 match(Set dst (NegD src));
|
|
698 ins_cost(150);
|
|
699 format %{ "vxorpd $dst, $src, [0x8000000000000000]\t"
|
|
700 "# neg double by sign flipping" %}
|
|
701 ins_encode %{
|
|
702 __ vxorpd($dst$$XMMRegister, $src$$XMMRegister,
|
|
703 ExternalAddress(double_signflip()));
|
|
704 %}
|
|
705 ins_pipe(pipe_slow);
|
|
706 %}
|
|
707
|
|
708 instruct sqrtF_reg(regF dst, regF src) %{
|
|
709 predicate(UseSSE>=1);
|
|
710 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
|
|
711
|
|
712 format %{ "sqrtss $dst, $src" %}
|
|
713 ins_cost(150);
|
|
714 ins_encode %{
|
|
715 __ sqrtss($dst$$XMMRegister, $src$$XMMRegister);
|
|
716 %}
|
|
717 ins_pipe(pipe_slow);
|
|
718 %}
|
|
719
|
|
720 instruct sqrtF_mem(regF dst, memory src) %{
|
|
721 predicate(UseSSE>=1);
|
|
722 match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
|
|
723
|
|
724 format %{ "sqrtss $dst, $src" %}
|
|
725 ins_cost(150);
|
|
726 ins_encode %{
|
|
727 __ sqrtss($dst$$XMMRegister, $src$$Address);
|
|
728 %}
|
|
729 ins_pipe(pipe_slow);
|
|
730 %}
|
|
731
|
|
732 instruct sqrtF_imm(regF dst, immF con) %{
|
|
733 predicate(UseSSE>=1);
|
|
734 match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
|
|
735 format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
736 ins_cost(150);
|
|
737 ins_encode %{
|
|
738 __ sqrtss($dst$$XMMRegister, $constantaddress($con));
|
|
739 %}
|
|
740 ins_pipe(pipe_slow);
|
|
741 %}
|
|
742
|
|
743 instruct sqrtD_reg(regD dst, regD src) %{
|
|
744 predicate(UseSSE>=2);
|
|
745 match(Set dst (SqrtD src));
|
|
746
|
|
747 format %{ "sqrtsd $dst, $src" %}
|
|
748 ins_cost(150);
|
|
749 ins_encode %{
|
|
750 __ sqrtsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
751 %}
|
|
752 ins_pipe(pipe_slow);
|
|
753 %}
|
|
754
|
|
755 instruct sqrtD_mem(regD dst, memory src) %{
|
|
756 predicate(UseSSE>=2);
|
|
757 match(Set dst (SqrtD (LoadD src)));
|
|
758
|
|
759 format %{ "sqrtsd $dst, $src" %}
|
|
760 ins_cost(150);
|
|
761 ins_encode %{
|
|
762 __ sqrtsd($dst$$XMMRegister, $src$$Address);
|
|
763 %}
|
|
764 ins_pipe(pipe_slow);
|
|
765 %}
|
|
766
|
|
767 instruct sqrtD_imm(regD dst, immD con) %{
|
|
768 predicate(UseSSE>=2);
|
|
769 match(Set dst (SqrtD con));
|
|
770 format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
771 ins_cost(150);
|
|
772 ins_encode %{
|
|
773 __ sqrtsd($dst$$XMMRegister, $constantaddress($con));
|
|
774 %}
|
|
775 ins_pipe(pipe_slow);
|
|
776 %}
|
|
777
|