comparison src/cpu/x86/vm/c1_LinearScan_x86.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ff1a29907b6c
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_c1_LinearScan_x86.cpp.incl"
27
28
29 //----------------------------------------------------------------------
30 // Allocation of FPU stack slots (Intel x86 only)
31 //----------------------------------------------------------------------
32
33 void LinearScan::allocate_fpu_stack() {
34 // First compute which FPU registers are live at the start of each basic block
35 // (To minimize the amount of work we have to do if we have to merge FPU stacks)
36 if (ComputeExactFPURegisterUsage) {
37 Interval* intervals_in_register, *intervals_in_memory;
38 create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL);
39
40 // ignore memory intervals by overwriting intervals_in_memory
41 // the dummy interval is needed to enforce the walker to walk until the given id:
42 // without it, the walker stops when the unhandled-list is empty -> live information
43 // beyond this point would be incorrect.
44 Interval* dummy_interval = new Interval(any_reg);
45 dummy_interval->add_range(max_jint - 2, max_jint - 1);
46 dummy_interval->set_next(Interval::end());
47 intervals_in_memory = dummy_interval;
48
49 IntervalWalker iw(this, intervals_in_register, intervals_in_memory);
50
51 const int num_blocks = block_count();
52 for (int i = 0; i < num_blocks; i++) {
53 BlockBegin* b = block_at(i);
54
55 // register usage is only needed for merging stacks -> compute only
56 // when more than one predecessor.
57 // the block must not have any spill moves at the beginning (checked by assertions)
58 // spill moves would use intervals that are marked as handled and so the usage bit
59 // would been set incorrectly
60
61 // NOTE: the check for number_of_preds > 1 is necessary. A block with only one
62 // predecessor may have spill moves at the begin of the block.
63 // If an interval ends at the current instruction id, it is not possible
64 // to decide if the register is live or not at the block begin -> the
65 // register information would be incorrect.
66 if (b->number_of_preds() > 1) {
67 int id = b->first_lir_instruction_id();
68 BitMap regs(FrameMap::nof_fpu_regs);
69 regs.clear();
70
71 iw.walk_to(id); // walk after the first instruction (always a label) of the block
72 assert(iw.current_position() == id, "did not walk completely to id");
73
74 // Only consider FPU values in registers
75 Interval* interval = iw.active_first(fixedKind);
76 while (interval != Interval::end()) {
77 int reg = interval->assigned_reg();
78 assert(reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg, "no fpu register");
79 assert(interval->assigned_regHi() == -1, "must not have hi register (doubles stored in one register)");
80 assert(interval->from() <= id && id < interval->to(), "interval out of range");
81
82 #ifndef PRODUCT
83 if (TraceFPURegisterUsage) {
84 tty->print("fpu reg %d is live because of ", reg - pd_first_fpu_reg); interval->print();
85 }
86 #endif
87
88 regs.set_bit(reg - pd_first_fpu_reg);
89 interval = interval->next();
90 }
91
92 b->set_fpu_register_usage(regs);
93
94 #ifndef PRODUCT
95 if (TraceFPURegisterUsage) {
96 tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->print_cr("");
97 }
98 #endif
99 }
100 }
101 }
102
103 FpuStackAllocator alloc(ir()->compilation(), this);
104 _fpu_stack_allocator = &alloc;
105 alloc.allocate();
106 _fpu_stack_allocator = NULL;
107 }
108
109
110 FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
111 : _compilation(compilation)
112 , _lir(NULL)
113 , _pos(-1)
114 , _allocator(allocator)
115 , _sim(compilation)
116 , _temp_sim(compilation)
117 {}
118
119 void FpuStackAllocator::allocate() {
120 int num_blocks = allocator()->block_count();
121 for (int i = 0; i < num_blocks; i++) {
122 // Set up to process block
123 BlockBegin* block = allocator()->block_at(i);
124 intArray* fpu_stack_state = block->fpu_stack_state();
125
126 #ifndef PRODUCT
127 if (TraceFPUStack) {
128 tty->cr();
129 tty->print_cr("------- Begin of new Block %d -------", block->block_id());
130 }
131 #endif
132
133 assert(fpu_stack_state != NULL ||
134 block->end()->as_Base() != NULL ||
135 block->is_set(BlockBegin::exception_entry_flag),
136 "FPU stack state must be present due to linear-scan order for FPU stack allocation");
137 // note: exception handler entries always start with an empty fpu stack
138 // because stack merging would be too complicated
139
140 if (fpu_stack_state != NULL) {
141 sim()->read_state(fpu_stack_state);
142 } else {
143 sim()->clear();
144 }
145
146 #ifndef PRODUCT
147 if (TraceFPUStack) {
148 tty->print("Reading FPU state for block %d:", block->block_id());
149 sim()->print();
150 tty->cr();
151 }
152 #endif
153
154 allocate_block(block);
155 CHECK_BAILOUT();
156 }
157 }
158
159 void FpuStackAllocator::allocate_block(BlockBegin* block) {
160 bool processed_merge = false;
161 LIR_OpList* insts = block->lir()->instructions_list();
162 set_lir(block->lir());
163 set_pos(0);
164
165
166 // Note: insts->length() may change during loop
167 while (pos() < insts->length()) {
168 LIR_Op* op = insts->at(pos());
169 _debug_information_computed = false;
170
171 #ifndef PRODUCT
172 if (TraceFPUStack) {
173 op->print();
174 }
175 check_invalid_lir_op(op);
176 #endif
177
178 LIR_OpBranch* branch = op->as_OpBranch();
179 LIR_Op1* op1 = op->as_Op1();
180 LIR_Op2* op2 = op->as_Op2();
181 LIR_OpCall* opCall = op->as_OpCall();
182
183 if (branch != NULL && branch->block() != NULL) {
184 if (!processed_merge) {
185 // propagate stack at first branch to a successor
186 processed_merge = true;
187 bool required_merge = merge_fpu_stack_with_successors(block);
188
189 assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches");
190 }
191
192 } else if (op1 != NULL) {
193 handle_op1(op1);
194 } else if (op2 != NULL) {
195 handle_op2(op2);
196 } else if (opCall != NULL) {
197 handle_opCall(opCall);
198 }
199
200 compute_debug_information(op);
201
202 set_pos(1 + pos());
203 }
204
205 // Propagate stack when block does not end with branch
206 if (!processed_merge) {
207 merge_fpu_stack_with_successors(block);
208 }
209 }
210
211
212 void FpuStackAllocator::compute_debug_information(LIR_Op* op) {
213 if (!_debug_information_computed && op->id() != -1 && allocator()->has_info(op->id())) {
214 visitor.visit(op);
215
216 // exception handling
217 if (allocator()->compilation()->has_exception_handlers()) {
218 XHandlers* xhandlers = visitor.all_xhandler();
219 int n = xhandlers->length();
220 for (int k = 0; k < n; k++) {
221 allocate_exception_handler(xhandlers->handler_at(k));
222 }
223 } else {
224 assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
225 }
226
227 // compute debug information
228 int n = visitor.info_count();
229 assert(n > 0, "should not visit operation otherwise");
230
231 for (int j = 0; j < n; j++) {
232 CodeEmitInfo* info = visitor.info_at(j);
233 // Compute debug information
234 allocator()->compute_debug_info(info, op->id());
235 }
236 }
237 _debug_information_computed = true;
238 }
239
240 void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
241 if (!sim()->is_empty()) {
242 LIR_List* old_lir = lir();
243 int old_pos = pos();
244 intArray* old_state = sim()->write_state();
245
246 #ifndef PRODUCT
247 if (TraceFPUStack) {
248 tty->cr();
249 tty->print_cr("------- begin of exception handler -------");
250 }
251 #endif
252
253 if (xhandler->entry_code() == NULL) {
254 // need entry code to clear FPU stack
255 LIR_List* entry_code = new LIR_List(_compilation);
256 entry_code->jump(xhandler->entry_block());
257 xhandler->set_entry_code(entry_code);
258 }
259
260 LIR_OpList* insts = xhandler->entry_code()->instructions_list();
261 set_lir(xhandler->entry_code());
262 set_pos(0);
263
264 // Note: insts->length() may change during loop
265 while (pos() < insts->length()) {
266 LIR_Op* op = insts->at(pos());
267
268 #ifndef PRODUCT
269 if (TraceFPUStack) {
270 op->print();
271 }
272 check_invalid_lir_op(op);
273 #endif
274
275 switch (op->code()) {
276 case lir_move:
277 assert(op->as_Op1() != NULL, "must be LIR_Op1");
278 assert(pos() != insts->length() - 1, "must not be last operation");
279
280 handle_op1((LIR_Op1*)op);
281 break;
282
283 case lir_branch:
284 assert(op->as_OpBranch()->cond() == lir_cond_always, "must be unconditional branch");
285 assert(pos() == insts->length() - 1, "must be last operation");
286
287 // remove all remaining dead registers from FPU stack
288 clear_fpu_stack(LIR_OprFact::illegalOpr);
289 break;
290
291 default:
292 // other operations not allowed in exception entry code
293 ShouldNotReachHere();
294 }
295
296 set_pos(pos() + 1);
297 }
298
299 #ifndef PRODUCT
300 if (TraceFPUStack) {
301 tty->cr();
302 tty->print_cr("------- end of exception handler -------");
303 }
304 #endif
305
306 set_lir(old_lir);
307 set_pos(old_pos);
308 sim()->read_state(old_state);
309 }
310 }
311
312
313 int FpuStackAllocator::fpu_num(LIR_Opr opr) {
314 assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
315 return opr->is_single_fpu() ? opr->fpu_regnr() : opr->fpu_regnrLo();
316 }
317
318 int FpuStackAllocator::tos_offset(LIR_Opr opr) {
319 return sim()->offset_from_tos(fpu_num(opr));
320 }
321
322
323 LIR_Opr FpuStackAllocator::to_fpu_stack(LIR_Opr opr) {
324 assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
325
326 int stack_offset = tos_offset(opr);
327 if (opr->is_single_fpu()) {
328 return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
329 } else {
330 assert(opr->is_double_fpu(), "shouldn't call this otherwise");
331 return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
332 }
333 }
334
335 LIR_Opr FpuStackAllocator::to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset) {
336 assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
337 assert(dont_check_offset || tos_offset(opr) == 0, "operand is not on stack top");
338
339 int stack_offset = 0;
340 if (opr->is_single_fpu()) {
341 return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
342 } else {
343 assert(opr->is_double_fpu(), "shouldn't call this otherwise");
344 return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
345 }
346 }
347
348
349
350 void FpuStackAllocator::insert_op(LIR_Op* op) {
351 lir()->insert_before(pos(), op);
352 set_pos(1 + pos());
353 }
354
355
356 void FpuStackAllocator::insert_exchange(int offset) {
357 if (offset > 0) {
358 LIR_Op1* fxch_op = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
359 insert_op(fxch_op);
360 sim()->swap(offset);
361
362 #ifndef PRODUCT
363 if (TraceFPUStack) {
364 tty->print("Exchanged register: %d New state: ", sim()->get_slot(0)); sim()->print(); tty->cr();
365 }
366 #endif
367
368 }
369 }
370
371 void FpuStackAllocator::insert_exchange(LIR_Opr opr) {
372 insert_exchange(tos_offset(opr));
373 }
374
375
376 void FpuStackAllocator::insert_free(int offset) {
377 // move stack slot to the top of stack and then pop it
378 insert_exchange(offset);
379
380 LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
381 insert_op(fpop);
382 sim()->pop();
383
384 #ifndef PRODUCT
385 if (TraceFPUStack) {
386 tty->print("Inserted pop New state: "); sim()->print(); tty->cr();
387 }
388 #endif
389 }
390
391
392 void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr) {
393 if (sim()->contains(fpu_num(opr))) {
394 int res_slot = tos_offset(opr);
395 insert_free(res_slot);
396 }
397 }
398
399 void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore) {
400 if (fpu_num(opr) != fpu_num(ignore) && sim()->contains(fpu_num(opr))) {
401 int res_slot = tos_offset(opr);
402 insert_free(res_slot);
403 }
404 }
405
406 void FpuStackAllocator::insert_copy(LIR_Opr from, LIR_Opr to) {
407 int offset = tos_offset(from);
408 LIR_Op1* fld = new LIR_Op1(lir_fld, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
409 insert_op(fld);
410
411 sim()->push(fpu_num(to));
412
413 #ifndef PRODUCT
414 if (TraceFPUStack) {
415 tty->print("Inserted copy (%d -> %d) New state: ", fpu_num(from), fpu_num(to)); sim()->print(); tty->cr();
416 }
417 #endif
418 }
419
420 void FpuStackAllocator::do_rename(LIR_Opr from, LIR_Opr to) {
421 sim()->rename(fpu_num(from), fpu_num(to));
422 }
423
424 void FpuStackAllocator::do_push(LIR_Opr opr) {
425 sim()->push(fpu_num(opr));
426 }
427
428 void FpuStackAllocator::pop_if_last_use(LIR_Op* op, LIR_Opr opr) {
429 assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
430 assert(tos_offset(opr) == 0, "can only pop stack top");
431
432 if (opr->is_last_use()) {
433 op->set_fpu_pop_count(1);
434 sim()->pop();
435 }
436 }
437
438 void FpuStackAllocator::pop_always(LIR_Op* op, LIR_Opr opr) {
439 assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
440 assert(tos_offset(opr) == 0, "can only pop stack top");
441
442 op->set_fpu_pop_count(1);
443 sim()->pop();
444 }
445
446 void FpuStackAllocator::clear_fpu_stack(LIR_Opr preserve) {
447 int result_stack_size = (preserve->is_fpu_register() && !preserve->is_xmm_register() ? 1 : 0);
448 while (sim()->stack_size() > result_stack_size) {
449 assert(!sim()->slot_is_empty(0), "not allowed");
450
451 if (result_stack_size == 0 || sim()->get_slot(0) != fpu_num(preserve)) {
452 insert_free(0);
453 } else {
454 // move "preserve" to bottom of stack so that all other stack slots can be popped
455 insert_exchange(sim()->stack_size() - 1);
456 }
457 }
458 }
459
460
461 void FpuStackAllocator::handle_op1(LIR_Op1* op1) {
462 LIR_Opr in = op1->in_opr();
463 LIR_Opr res = op1->result_opr();
464
465 LIR_Opr new_in = in; // new operands relative to the actual fpu stack top
466 LIR_Opr new_res = res;
467
468 // Note: this switch is processed for all LIR_Op1, regardless if they have FPU-arguments,
469 // so checks for is_float_kind() are necessary inside the cases
470 switch (op1->code()) {
471
472 case lir_return: {
473 // FPU-Stack must only contain the (optional) fpu return value.
474 // All remaining dead values are popped from the stack
475 // If the input operand is a fpu-register, it is exchanged to the bottom of the stack
476
477 clear_fpu_stack(in);
478 if (in->is_fpu_register() && !in->is_xmm_register()) {
479 new_in = to_fpu_stack_top(in);
480 }
481
482 break;
483 }
484
485 case lir_move: {
486 if (in->is_fpu_register() && !in->is_xmm_register()) {
487 if (res->is_xmm_register()) {
488 // move from fpu register to xmm register (necessary for operations that
489 // are not available in the SSE instruction set)
490 insert_exchange(in);
491 new_in = to_fpu_stack_top(in);
492 pop_always(op1, in);
493
494 } else if (res->is_fpu_register() && !res->is_xmm_register()) {
495 // move from fpu-register to fpu-register:
496 // * input and result register equal:
497 // nothing to do
498 // * input register is last use:
499 // rename the input register to result register -> input register
500 // not present on fpu-stack afterwards
501 // * input register not last use:
502 // duplicate input register to result register to preserve input
503 //
504 // Note: The LIR-Assembler does not produce any code for fpu register moves,
505 // so input and result stack index must be equal
506
507 if (fpu_num(in) == fpu_num(res)) {
508 // nothing to do
509 } else if (in->is_last_use()) {
510 insert_free_if_dead(res);//, in);
511 do_rename(in, res);
512 } else {
513 insert_free_if_dead(res);
514 insert_copy(in, res);
515 }
516 new_in = to_fpu_stack(res);
517 new_res = new_in;
518
519 } else {
520 // move from fpu-register to memory
521 // input operand must be on top of stack
522
523 insert_exchange(in);
524
525 // create debug information here because afterwards the register may have been popped
526 compute_debug_information(op1);
527
528 new_in = to_fpu_stack_top(in);
529 pop_if_last_use(op1, in);
530 }
531
532 } else if (res->is_fpu_register() && !res->is_xmm_register()) {
533 // move from memory/constant to fpu register
534 // result is pushed on the stack
535
536 insert_free_if_dead(res);
537
538 // create debug information before register is pushed
539 compute_debug_information(op1);
540
541 do_push(res);
542 new_res = to_fpu_stack_top(res);
543 }
544 break;
545 }
546
547 case lir_neg: {
548 if (in->is_fpu_register() && !in->is_xmm_register()) {
549 assert(res->is_fpu_register() && !res->is_xmm_register(), "must be");
550 assert(in->is_last_use(), "old value gets destroyed");
551
552 insert_free_if_dead(res, in);
553 insert_exchange(in);
554 new_in = to_fpu_stack_top(in);
555
556 do_rename(in, res);
557 new_res = to_fpu_stack_top(res);
558 }
559 break;
560 }
561
562 case lir_convert: {
563 Bytecodes::Code bc = op1->as_OpConvert()->bytecode();
564 switch (bc) {
565 case Bytecodes::_d2f:
566 case Bytecodes::_f2d:
567 assert(res->is_fpu_register(), "must be");
568 assert(in->is_fpu_register(), "must be");
569
570 if (!in->is_xmm_register() && !res->is_xmm_register()) {
571 // this is quite the same as a move from fpu-register to fpu-register
572 // Note: input and result operands must have different types
573 if (fpu_num(in) == fpu_num(res)) {
574 // nothing to do
575 new_in = to_fpu_stack(in);
576 } else if (in->is_last_use()) {
577 insert_free_if_dead(res);//, in);
578 new_in = to_fpu_stack(in);
579 do_rename(in, res);
580 } else {
581 insert_free_if_dead(res);
582 insert_copy(in, res);
583 new_in = to_fpu_stack_top(in, true);
584 }
585 new_res = to_fpu_stack(res);
586 }
587
588 break;
589
590 case Bytecodes::_i2f:
591 case Bytecodes::_l2f:
592 case Bytecodes::_i2d:
593 case Bytecodes::_l2d:
594 assert(res->is_fpu_register(), "must be");
595 if (!res->is_xmm_register()) {
596 insert_free_if_dead(res);
597 do_push(res);
598 new_res = to_fpu_stack_top(res);
599 }
600 break;
601
602 case Bytecodes::_f2i:
603 case Bytecodes::_d2i:
604 assert(in->is_fpu_register(), "must be");
605 if (!in->is_xmm_register()) {
606 insert_exchange(in);
607 new_in = to_fpu_stack_top(in);
608
609 // TODO: update registes of stub
610 }
611 break;
612
613 case Bytecodes::_f2l:
614 case Bytecodes::_d2l:
615 assert(in->is_fpu_register(), "must be");
616 if (!in->is_xmm_register()) {
617 insert_exchange(in);
618 new_in = to_fpu_stack_top(in);
619 pop_always(op1, in);
620 }
621 break;
622
623 case Bytecodes::_i2l:
624 case Bytecodes::_l2i:
625 case Bytecodes::_i2b:
626 case Bytecodes::_i2c:
627 case Bytecodes::_i2s:
628 // no fpu operands
629 break;
630
631 default:
632 ShouldNotReachHere();
633 }
634 break;
635 }
636
637 case lir_roundfp: {
638 assert(in->is_fpu_register() && !in->is_xmm_register(), "input must be in register");
639 assert(res->is_stack(), "result must be on stack");
640
641 insert_exchange(in);
642 new_in = to_fpu_stack_top(in);
643 pop_if_last_use(op1, in);
644 break;
645 }
646
647 default: {
648 assert(!in->is_float_kind() && !res->is_float_kind(), "missed a fpu-operation");
649 }
650 }
651
652 op1->set_in_opr(new_in);
653 op1->set_result_opr(new_res);
654 }
655
656 void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
657 LIR_Opr left = op2->in_opr1();
658 if (!left->is_float_kind()) {
659 return;
660 }
661 if (left->is_xmm_register()) {
662 return;
663 }
664
665 LIR_Opr right = op2->in_opr2();
666 LIR_Opr res = op2->result_opr();
667 LIR_Opr new_left = left; // new operands relative to the actual fpu stack top
668 LIR_Opr new_right = right;
669 LIR_Opr new_res = res;
670
671 assert(!left->is_xmm_register() && !right->is_xmm_register() && !res->is_xmm_register(), "not for xmm registers");
672
673 switch (op2->code()) {
674 case lir_cmp:
675 case lir_cmp_fd2i:
676 case lir_ucmp_fd2i: {
677 assert(left->is_fpu_register(), "invalid LIR");
678 assert(right->is_fpu_register(), "invalid LIR");
679
680 // the left-hand side must be on top of stack.
681 // the right-hand side is never popped, even if is_last_use is set
682 insert_exchange(left);
683 new_left = to_fpu_stack_top(left);
684 new_right = to_fpu_stack(right);
685 pop_if_last_use(op2, left);
686 break;
687 }
688
689 case lir_mul_strictfp:
690 case lir_div_strictfp: {
691 assert(op2->tmp_opr()->is_fpu_register(), "strict operations need temporary fpu stack slot");
692 insert_free_if_dead(op2->tmp_opr());
693 assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
694 // fall-through: continue with the normal handling of lir_mul and lir_div
695 }
696 case lir_add:
697 case lir_sub:
698 case lir_mul:
699 case lir_div: {
700 assert(left->is_fpu_register(), "must be");
701 assert(res->is_fpu_register(), "must be");
702 assert(left->is_equal(res), "must be");
703
704 // either the left-hand or the right-hand side must be on top of stack
705 // (if right is not a register, left must be on top)
706 if (!right->is_fpu_register()) {
707 insert_exchange(left);
708 new_left = to_fpu_stack_top(left);
709 } else {
710 // no exchange necessary if right is alredy on top of stack
711 if (tos_offset(right) == 0) {
712 new_left = to_fpu_stack(left);
713 new_right = to_fpu_stack_top(right);
714 } else {
715 insert_exchange(left);
716 new_left = to_fpu_stack_top(left);
717 new_right = to_fpu_stack(right);
718 }
719
720 if (right->is_last_use()) {
721 op2->set_fpu_pop_count(1);
722
723 if (tos_offset(right) == 0) {
724 sim()->pop();
725 } else {
726 // if left is on top of stack, the result is placed in the stack
727 // slot of right, so a renaming from right to res is necessary
728 assert(tos_offset(left) == 0, "must be");
729 sim()->pop();
730 do_rename(right, res);
731 }
732 }
733 }
734 new_res = to_fpu_stack(res);
735
736 break;
737 }
738
739 case lir_rem: {
740 assert(left->is_fpu_register(), "must be");
741 assert(right->is_fpu_register(), "must be");
742 assert(res->is_fpu_register(), "must be");
743 assert(left->is_equal(res), "must be");
744
745 // Must bring both operands to top of stack with following operand ordering:
746 // * fpu stack before rem: ... right left
747 // * fpu stack after rem: ... left
748 if (tos_offset(right) != 1) {
749 insert_exchange(right);
750 insert_exchange(1);
751 }
752 insert_exchange(left);
753 assert(tos_offset(right) == 1, "check");
754 assert(tos_offset(left) == 0, "check");
755
756 new_left = to_fpu_stack_top(left);
757 new_right = to_fpu_stack(right);
758
759 op2->set_fpu_pop_count(1);
760 sim()->pop();
761 do_rename(right, res);
762
763 new_res = to_fpu_stack_top(res);
764 break;
765 }
766
767 case lir_log:
768 case lir_log10:
769 case lir_abs:
770 case lir_sqrt: {
771 // Right argument appears to be unused
772 assert(right->is_illegal(), "must be");
773 assert(left->is_fpu_register(), "must be");
774 assert(res->is_fpu_register(), "must be");
775 assert(left->is_last_use(), "old value gets destroyed");
776
777 insert_free_if_dead(res, left);
778 insert_exchange(left);
779 do_rename(left, res);
780
781 new_left = to_fpu_stack_top(res);
782 new_res = new_left;
783
784 op2->set_fpu_stack_size(sim()->stack_size());
785 break;
786 }
787
788
789 case lir_tan:
790 case lir_sin:
791 case lir_cos: {
792 // sin and cos need two temporary fpu stack slots, so there are two temporary
793 // registers (stored in right and temp of the operation).
794 // the stack allocator must guarantee that the stack slots are really free,
795 // otherwise there might be a stack overflow.
796 assert(left->is_fpu_register(), "must be");
797 assert(res->is_fpu_register(), "must be");
798 // assert(left->is_last_use(), "old value gets destroyed");
799 assert(right->is_fpu_register(), "right is used as the first temporary register");
800 assert(op2->tmp_opr()->is_fpu_register(), "temp is used as the second temporary register");
801 assert(fpu_num(left) != fpu_num(right) && fpu_num(right) != fpu_num(op2->tmp_opr()) && fpu_num(op2->tmp_opr()) != fpu_num(res), "need distinct temp registers");
802
803 insert_free_if_dead(right);
804 insert_free_if_dead(op2->tmp_opr());
805
806 insert_free_if_dead(res, left);
807 insert_exchange(left);
808 do_rename(left, res);
809
810 new_left = to_fpu_stack_top(res);
811 new_res = new_left;
812
813 op2->set_fpu_stack_size(sim()->stack_size());
814 assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
815 break;
816 }
817
818 default: {
819 assert(false, "missed a fpu-operation");
820 }
821 }
822
823 op2->set_in_opr1(new_left);
824 op2->set_in_opr2(new_right);
825 op2->set_result_opr(new_res);
826 }
827
828 void FpuStackAllocator::handle_opCall(LIR_OpCall* opCall) {
829 LIR_Opr res = opCall->result_opr();
830
831 // clear fpu-stack before call
832 // it may contain dead values that could not have been remved by previous operations
833 clear_fpu_stack(LIR_OprFact::illegalOpr);
834 assert(sim()->is_empty(), "fpu stack must be empty now");
835
836 // compute debug information before (possible) fpu result is pushed
837 compute_debug_information(opCall);
838
839 if (res->is_fpu_register() && !res->is_xmm_register()) {
840 do_push(res);
841 opCall->set_result_opr(to_fpu_stack_top(res));
842 }
843 }
844
845 #ifndef PRODUCT
846 void FpuStackAllocator::check_invalid_lir_op(LIR_Op* op) {
847 switch (op->code()) {
848 case lir_24bit_FPU:
849 case lir_reset_FPU:
850 case lir_ffree:
851 assert(false, "operations not allowed in lir. If one of these operations is needed, check if they have fpu operands");
852 break;
853
854 case lir_fpop_raw:
855 case lir_fxch:
856 case lir_fld:
857 assert(false, "operations only inserted by FpuStackAllocator");
858 break;
859 }
860 }
861 #endif
862
863
864 void FpuStackAllocator::merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg) {
865 LIR_Op1* move = new LIR_Op1(lir_move, LIR_OprFact::doubleConst(0), LIR_OprFact::double_fpu(reg)->make_fpu_stack_offset());
866
867 instrs->instructions_list()->push(move);
868
869 cur_sim->push(reg);
870 move->set_result_opr(to_fpu_stack(move->result_opr()));
871
872 #ifndef PRODUCT
873 if (TraceFPUStack) {
874 tty->print("Added new register: %d New state: ", reg); cur_sim->print(); tty->cr();
875 }
876 #endif
877 }
878
879 void FpuStackAllocator::merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot) {
880 assert(slot > 0, "no exchange necessary");
881
882 LIR_Op1* fxch = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(slot));
883 instrs->instructions_list()->push(fxch);
884 cur_sim->swap(slot);
885
886 #ifndef PRODUCT
887 if (TraceFPUStack) {
888 tty->print("Exchanged register: %d New state: ", cur_sim->get_slot(slot)); cur_sim->print(); tty->cr();
889 }
890 #endif
891 }
892
893 void FpuStackAllocator::merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim) {
894 int reg = cur_sim->get_slot(0);
895
896 LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
897 instrs->instructions_list()->push(fpop);
898 cur_sim->pop(reg);
899
900 #ifndef PRODUCT
901 if (TraceFPUStack) {
902 tty->print("Removed register: %d New state: ", reg); cur_sim->print(); tty->cr();
903 }
904 #endif
905 }
906
907 bool FpuStackAllocator::merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot) {
908 int reg = cur_sim->get_slot(change_slot);
909
910 for (int slot = start_slot; slot >= 0; slot--) {
911 int new_reg = sux_sim->get_slot(slot);
912
913 if (!cur_sim->contains(new_reg)) {
914 cur_sim->set_slot(change_slot, new_reg);
915
916 #ifndef PRODUCT
917 if (TraceFPUStack) {
918 tty->print("Renamed register %d to %d New state: ", reg, new_reg); cur_sim->print(); tty->cr();
919 }
920 #endif
921
922 return true;
923 }
924 }
925 return false;
926 }
927
928
929 void FpuStackAllocator::merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim) {
930 #ifndef PRODUCT
931 if (TraceFPUStack) {
932 tty->cr();
933 tty->print("before merging: pred: "); cur_sim->print(); tty->cr();
934 tty->print(" sux: "); sux_sim->print(); tty->cr();
935 }
936
937 int slot;
938 for (slot = 0; slot < cur_sim->stack_size(); slot++) {
939 assert(!cur_sim->slot_is_empty(slot), "not handled by algorithm");
940 }
941 for (slot = 0; slot < sux_sim->stack_size(); slot++) {
942 assert(!sux_sim->slot_is_empty(slot), "not handled by algorithm");
943 }
944 #endif
945
946 // size difference between cur and sux that must be resolved by adding or removing values form the stack
947 int size_diff = cur_sim->stack_size() - sux_sim->stack_size();
948
949 if (!ComputeExactFPURegisterUsage) {
950 // add slots that are currently free, but used in successor
951 // When the exact FPU register usage is computed, the stack does
952 // not contain dead values at merging -> no values must be added
953
954 int sux_slot = sux_sim->stack_size() - 1;
955 while (size_diff < 0) {
956 assert(sux_slot >= 0, "slot out of bounds -> error in algorithm");
957
958 int reg = sux_sim->get_slot(sux_slot);
959 if (!cur_sim->contains(reg)) {
960 merge_insert_add(instrs, cur_sim, reg);
961 size_diff++;
962
963 if (sux_slot + size_diff != 0) {
964 merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
965 }
966 }
967 sux_slot--;
968 }
969 }
970
971 assert(cur_sim->stack_size() >= sux_sim->stack_size(), "stack size must be equal or greater now");
972 assert(size_diff == cur_sim->stack_size() - sux_sim->stack_size(), "must be");
973
974 // stack merge algorithm:
975 // 1) as long as the current stack top is not in the right location (that meens
976 // it should not be on the stack top), exchange it into the right location
977 // 2) if the stack top is right, but the remaining stack is not ordered correctly,
978 // the stack top is exchanged away to get another value on top ->
979 // now step 1) can be continued
980 // the stack can also contain unused items -> these items are removed from stack
981
982 int finished_slot = sux_sim->stack_size() - 1;
983 while (finished_slot >= 0 || size_diff > 0) {
984 while (size_diff > 0 || (cur_sim->stack_size() > 0 && cur_sim->get_slot(0) != sux_sim->get_slot(0))) {
985 int reg = cur_sim->get_slot(0);
986 if (sux_sim->contains(reg)) {
987 int sux_slot = sux_sim->offset_from_tos(reg);
988 merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
989
990 } else if (!merge_rename(cur_sim, sux_sim, finished_slot, 0)) {
991 assert(size_diff > 0, "must be");
992
993 merge_insert_pop(instrs, cur_sim);
994 size_diff--;
995 }
996 assert(cur_sim->stack_size() == 0 || cur_sim->get_slot(0) != reg, "register must have been changed");
997 }
998
999 while (finished_slot >= 0 && cur_sim->get_slot(finished_slot) == sux_sim->get_slot(finished_slot)) {
1000 finished_slot--;
1001 }
1002
1003 if (finished_slot >= 0) {
1004 int reg = cur_sim->get_slot(finished_slot);
1005
1006 if (sux_sim->contains(reg) || !merge_rename(cur_sim, sux_sim, finished_slot, finished_slot)) {
1007 assert(sux_sim->contains(reg) || size_diff > 0, "must be");
1008 merge_insert_xchg(instrs, cur_sim, finished_slot);
1009 }
1010 assert(cur_sim->get_slot(finished_slot) != reg, "register must have been changed");
1011 }
1012 }
1013
1014 #ifndef PRODUCT
1015 if (TraceFPUStack) {
1016 tty->print("after merging: pred: "); cur_sim->print(); tty->cr();
1017 tty->print(" sux: "); sux_sim->print(); tty->cr();
1018 tty->cr();
1019 }
1020 #endif
1021 assert(cur_sim->stack_size() == sux_sim->stack_size(), "stack size must be equal now");
1022 }
1023
1024
1025 void FpuStackAllocator::merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs) {
1026 #ifndef PRODUCT
1027 if (TraceFPUStack) {
1028 tty->cr();
1029 tty->print("before cleanup: state: "); cur_sim->print(); tty->cr();
1030 tty->print(" live: "); live_fpu_regs.print_on(tty); tty->cr();
1031 }
1032 #endif
1033
1034 int slot = 0;
1035 while (slot < cur_sim->stack_size()) {
1036 int reg = cur_sim->get_slot(slot);
1037 if (!live_fpu_regs.at(reg)) {
1038 if (slot != 0) {
1039 merge_insert_xchg(instrs, cur_sim, slot);
1040 }
1041 merge_insert_pop(instrs, cur_sim);
1042 } else {
1043 slot++;
1044 }
1045 }
1046
1047 #ifndef PRODUCT
1048 if (TraceFPUStack) {
1049 tty->print("after cleanup: state: "); cur_sim->print(); tty->cr();
1050 tty->print(" live: "); live_fpu_regs.print_on(tty); tty->cr();
1051 tty->cr();
1052 }
1053
1054 // check if fpu stack only contains live registers
1055 for (unsigned int i = 0; i < live_fpu_regs.size(); i++) {
1056 if (live_fpu_regs.at(i) != cur_sim->contains(i)) {
1057 tty->print_cr("mismatch between required and actual stack content");
1058 break;
1059 }
1060 }
1061 #endif
1062 }
1063
1064
1065 bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
1066 #ifndef PRODUCT
1067 if (TraceFPUStack) {
1068 tty->print_cr("Propagating FPU stack state for B%d at LIR_Op position %d to successors:",
1069 block->block_id(), pos());
1070 sim()->print();
1071 tty->cr();
1072 }
1073 #endif
1074
1075 bool changed = false;
1076 int number_of_sux = block->number_of_sux();
1077
1078 if (number_of_sux == 1 && block->sux_at(0)->number_of_preds() > 1) {
1079 // The successor has at least two incoming edges, so a stack merge will be necessary
1080 // If this block is the first predecessor, cleanup the current stack and propagate it
1081 // If this block is not the first predecessor, a stack merge will be necessary
1082
1083 BlockBegin* sux = block->sux_at(0);
1084 intArray* state = sux->fpu_stack_state();
1085 LIR_List* instrs = new LIR_List(_compilation);
1086
1087 if (state != NULL) {
1088 // Merge with a successors that already has a FPU stack state
1089 // the block must only have one successor because critical edges must been split
1090 FpuStackSim* cur_sim = sim();
1091 FpuStackSim* sux_sim = temp_sim();
1092 sux_sim->read_state(state);
1093
1094 merge_fpu_stack(instrs, cur_sim, sux_sim);
1095
1096 } else {
1097 // propagate current FPU stack state to successor without state
1098 // clean up stack first so that there are no dead values on the stack
1099 if (ComputeExactFPURegisterUsage) {
1100 FpuStackSim* cur_sim = sim();
1101 BitMap live_fpu_regs = block->sux_at(0)->fpu_register_usage();
1102 assert(live_fpu_regs.size() == FrameMap::nof_fpu_regs, "missing register usage");
1103
1104 merge_cleanup_fpu_stack(instrs, cur_sim, live_fpu_regs);
1105 }
1106
1107 intArray* state = sim()->write_state();
1108 if (TraceFPUStack) {
1109 tty->print_cr("Setting FPU stack state of B%d (merge path)", sux->block_id());
1110 sim()->print(); tty->cr();
1111 }
1112 sux->set_fpu_stack_state(state);
1113 }
1114
1115 if (instrs->instructions_list()->length() > 0) {
1116 lir()->insert_before(pos(), instrs);
1117 set_pos(instrs->instructions_list()->length() + pos());
1118 changed = true;
1119 }
1120
1121 } else {
1122 // Propagate unmodified Stack to successors where a stack merge is not necessary
1123 intArray* state = sim()->write_state();
1124 for (int i = 0; i < number_of_sux; i++) {
1125 BlockBegin* sux = block->sux_at(i);
1126
1127 #ifdef ASSERT
1128 for (int j = 0; j < sux->number_of_preds(); j++) {
1129 assert(block == sux->pred_at(j), "all critical edges must be broken");
1130 }
1131
1132 // check if new state is same
1133 if (sux->fpu_stack_state() != NULL) {
1134 intArray* sux_state = sux->fpu_stack_state();
1135 assert(state->length() == sux_state->length(), "overwriting existing stack state");
1136 for (int j = 0; j < state->length(); j++) {
1137 assert(state->at(j) == sux_state->at(j), "overwriting existing stack state");
1138 }
1139 }
1140 #endif
1141 #ifndef PRODUCT
1142 if (TraceFPUStack) {
1143 tty->print_cr("Setting FPU stack state of B%d", sux->block_id());
1144 sim()->print(); tty->cr();
1145 }
1146 #endif
1147
1148 sux->set_fpu_stack_state(state);
1149 }
1150 }
1151
1152 #ifndef PRODUCT
1153 // assertions that FPU stack state conforms to all successors' states
1154 intArray* cur_state = sim()->write_state();
1155 for (int i = 0; i < number_of_sux; i++) {
1156 BlockBegin* sux = block->sux_at(i);
1157 intArray* sux_state = sux->fpu_stack_state();
1158
1159 assert(sux_state != NULL, "no fpu state");
1160 assert(cur_state->length() == sux_state->length(), "incorrect length");
1161 for (int i = 0; i < cur_state->length(); i++) {
1162 assert(cur_state->at(i) == sux_state->at(i), "element not equal");
1163 }
1164 }
1165 #endif
1166
1167 return changed;
1168 }