Mercurial > hg > graal-compiler
comparison src/share/vm/opto/mulnode.cpp @ 824:18a08a7e16b5
5057225: Remove useless I2L conversions
Summary: The optimizer should be told to normalize (AndL (ConvI2L x) 0xFF) to (ConvI2L (AndI x 0xFF)), and then the existing matcher rule will work for free.
Reviewed-by: kvn
author | twisti |
---|---|
date | Fri, 26 Jun 2009 07:26:10 -0700 |
parents | 337400e7a5dd |
children | 52898b0c43e9 |
comparison
equal
deleted
inserted
replaced
823:14367225a853 | 824:18a08a7e16b5 |
---|---|
428 Node *AndINode::Identity( PhaseTransform *phase ) { | 428 Node *AndINode::Identity( PhaseTransform *phase ) { |
429 | 429 |
430 // x & x => x | 430 // x & x => x |
431 if (phase->eqv(in(1), in(2))) return in(1); | 431 if (phase->eqv(in(1), in(2))) return in(1); |
432 | 432 |
433 Node *load = in(1); | 433 Node* in1 = in(1); |
434 const TypeInt *t2 = phase->type( in(2) )->isa_int(); | 434 uint op = in1->Opcode(); |
435 if( t2 && t2->is_con() ) { | 435 const TypeInt* t2 = phase->type(in(2))->isa_int(); |
436 if (t2 && t2->is_con()) { | |
436 int con = t2->get_con(); | 437 int con = t2->get_con(); |
437 // Masking off high bits which are always zero is useless. | 438 // Masking off high bits which are always zero is useless. |
438 const TypeInt* t1 = phase->type( in(1) )->isa_int(); | 439 const TypeInt* t1 = phase->type( in(1) )->isa_int(); |
439 if (t1 != NULL && t1->_lo >= 0) { | 440 if (t1 != NULL && t1->_lo >= 0) { |
440 jint t1_support = ((jint)1 << (1 + log2_intptr(t1->_hi))) - 1; | 441 jint t1_support = right_n_bits(1 + log2_intptr(t1->_hi)); |
441 if ((t1_support & con) == t1_support) | 442 if ((t1_support & con) == t1_support) |
442 return load; | 443 return in1; |
443 } | 444 } |
444 uint lop = load->Opcode(); | |
445 if( lop == Op_LoadUS && | |
446 con == 0x0000FFFF ) // Already zero-extended | |
447 return load; | |
448 // Masking off the high bits of a unsigned-shift-right is not | 445 // Masking off the high bits of a unsigned-shift-right is not |
449 // needed either. | 446 // needed either. |
450 if( lop == Op_URShiftI ) { | 447 if (op == Op_URShiftI) { |
451 const TypeInt *t12 = phase->type( load->in(2) )->isa_int(); | 448 const TypeInt* t12 = phase->type(in1->in(2))->isa_int(); |
452 if( t12 && t12->is_con() ) { // Shift is by a constant | 449 if (t12 && t12->is_con()) { // Shift is by a constant |
453 int shift = t12->get_con(); | 450 int shift = t12->get_con(); |
454 shift &= BitsPerJavaInteger - 1; // semantics of Java shifts | 451 shift &= BitsPerJavaInteger - 1; // semantics of Java shifts |
455 int mask = max_juint >> shift; | 452 int mask = max_juint >> shift; |
456 if( (mask&con) == mask ) // If AND is useless, skip it | 453 if ((mask & con) == mask) // If AND is useless, skip it |
457 return load; | 454 return in1; |
458 } | 455 } |
459 } | 456 } |
460 } | 457 } |
461 return MulNode::Identity(phase); | 458 return MulNode::Identity(phase); |
462 } | 459 } |
474 if( lop == Op_LoadUS && | 471 if( lop == Op_LoadUS && |
475 (mask & 0xFFFF0000) ) // Can we make a smaller mask? | 472 (mask & 0xFFFF0000) ) // Can we make a smaller mask? |
476 return new (phase->C, 3) AndINode(load,phase->intcon(mask&0xFFFF)); | 473 return new (phase->C, 3) AndINode(load,phase->intcon(mask&0xFFFF)); |
477 | 474 |
478 // Masking bits off of a Short? Loading a Character does some masking | 475 // Masking bits off of a Short? Loading a Character does some masking |
479 if( lop == Op_LoadS && | 476 if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) { |
480 (mask & 0xFFFF0000) == 0 ) { | |
481 Node *ldus = new (phase->C, 3) LoadUSNode(load->in(MemNode::Control), | 477 Node *ldus = new (phase->C, 3) LoadUSNode(load->in(MemNode::Control), |
482 load->in(MemNode::Memory), | 478 load->in(MemNode::Memory), |
483 load->in(MemNode::Address), | 479 load->in(MemNode::Address), |
484 load->adr_type()); | 480 load->adr_type()); |
485 ldus = phase->transform(ldus); | 481 ldus = phase->transform(ldus); |
486 return new (phase->C, 3) AndINode(ldus, phase->intcon(mask&0xFFFF)); | 482 return new (phase->C, 3) AndINode(ldus, phase->intcon(mask & 0xFFFF)); |
487 } | 483 } |
488 | 484 |
489 // Masking sign bits off of a Byte? Do an unsigned byte load. | 485 // Masking sign bits off of a Byte? Do an unsigned byte load plus |
490 if (lop == Op_LoadB && mask == 0x000000FF) { | 486 // an and. |
491 return new (phase->C, 3) LoadUBNode(load->in(MemNode::Control), | |
492 load->in(MemNode::Memory), | |
493 load->in(MemNode::Address), | |
494 load->adr_type()); | |
495 } | |
496 | |
497 // Masking sign bits off of a Byte plus additional lower bits? Do | |
498 // an unsigned byte load plus an and. | |
499 if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) { | 487 if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) { |
500 Node* ldub = new (phase->C, 3) LoadUBNode(load->in(MemNode::Control), | 488 Node* ldub = new (phase->C, 3) LoadUBNode(load->in(MemNode::Control), |
501 load->in(MemNode::Memory), | 489 load->in(MemNode::Memory), |
502 load->in(MemNode::Address), | 490 load->in(MemNode::Address), |
503 load->adr_type()); | 491 load->adr_type()); |
603 const jlong mask = t2->get_con(); | 591 const jlong mask = t2->get_con(); |
604 | 592 |
605 Node* in1 = in(1); | 593 Node* in1 = in(1); |
606 uint op = in1->Opcode(); | 594 uint op = in1->Opcode(); |
607 | 595 |
608 // Masking sign bits off of an integer? Do an unsigned integer to long load. | 596 // Masking sign bits off of an integer? Do an unsigned integer to |
609 if (op == Op_ConvI2L && in1->in(1)->Opcode() == Op_LoadI && mask == 0x00000000FFFFFFFFL) { | 597 // long load. |
598 // NOTE: This check must be *before* we try to convert the AndLNode | |
599 // to an AndINode and commute it with ConvI2LNode because | |
600 // 0xFFFFFFFFL masks the whole integer and we get a sign extension, | |
601 // which is wrong. | |
602 if (op == Op_ConvI2L && in1->in(1)->Opcode() == Op_LoadI && mask == CONST64(0x00000000FFFFFFFF)) { | |
610 Node* load = in1->in(1); | 603 Node* load = in1->in(1); |
611 return new (phase->C, 3) LoadUI2LNode(load->in(MemNode::Control), | 604 return new (phase->C, 3) LoadUI2LNode(load->in(MemNode::Control), |
612 load->in(MemNode::Memory), | 605 load->in(MemNode::Memory), |
613 load->in(MemNode::Address), | 606 load->in(MemNode::Address), |
614 load->adr_type()); | 607 load->adr_type()); |
615 } | 608 } |
616 | 609 |
610 // Are we masking a long that was converted from an int with a mask | |
611 // that fits in 32-bits? Commute them and use an AndINode. | |
612 if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF00000000)) == 0) { | |
613 // If we are doing an UI2L conversion (i.e. the mask is | |
614 // 0x00000000FFFFFFFF) we cannot convert the AndL to an AndI | |
615 // because the AndI would be optimized away later in Identity. | |
616 if (mask != CONST64(0x00000000FFFFFFFF)) { | |
617 Node* andi = new (phase->C, 3) AndINode(in1->in(1), phase->intcon(mask)); | |
618 andi = phase->transform(andi); | |
619 return new (phase->C, 2) ConvI2LNode(andi); | |
620 } | |
621 } | |
622 | |
617 // Masking off sign bits? Dont make them! | 623 // Masking off sign bits? Dont make them! |
618 if (op == Op_RShiftL) { | 624 if (op == Op_RShiftL) { |
619 const TypeInt *t12 = phase->type(in1->in(2))->isa_int(); | 625 const TypeInt* t12 = phase->type(in1->in(2))->isa_int(); |
620 if( t12 && t12->is_con() ) { // Shift is by a constant | 626 if( t12 && t12->is_con() ) { // Shift is by a constant |
621 int shift = t12->get_con(); | 627 int shift = t12->get_con(); |
622 shift &= BitsPerJavaLong - 1; // semantics of Java shifts | 628 shift &= BitsPerJavaLong - 1; // semantics of Java shifts |
623 const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1); | 629 const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1); |
624 // If the AND'ing of the 2 masks has no bits, then only original shifted | 630 // If the AND'ing of the 2 masks has no bits, then only original shifted |
625 // bits survive. NO sign-extension bits survive the maskings. | 631 // bits survive. NO sign-extension bits survive the maskings. |
626 if( (sign_bits_mask & mask) == 0 ) { | 632 if( (sign_bits_mask & mask) == 0 ) { |
627 // Use zero-fill shift instead | 633 // Use zero-fill shift instead |
628 Node *zshift = phase->transform(new (phase->C, 3) URShiftLNode(in1->in(1), in1->in(2))); | 634 Node *zshift = phase->transform(new (phase->C, 3) URShiftLNode(in1->in(1), in1->in(2))); |
629 return new (phase->C, 3) AndLNode( zshift, in(2) ); | 635 return new (phase->C, 3) AndLNode(zshift, in(2)); |
630 } | 636 } |
631 } | 637 } |
632 } | 638 } |
633 | 639 |
634 return MulNode::Ideal(phase, can_reshape); | 640 return MulNode::Ideal(phase, can_reshape); |