changeset 1489:cff162798819

6888953: some calls to function-like macros are missing semicolons Reviewed-by: pbk, kvn
author jcoomes
date Sun, 11 Oct 2009 16:19:25 -0700
parents 615a9d95d265
children f03d0a26bf83
files src/cpu/sparc/vm/assembler_sparc.cpp src/cpu/x86/vm/assembler_x86.cpp src/cpu/x86/vm/c1_LIRAssembler_x86.cpp src/share/vm/adlc/output_c.cpp src/share/vm/classfile/dictionary.cpp src/share/vm/classfile/loaderConstraints.cpp src/share/vm/classfile/resolutionErrors.cpp src/share/vm/code/nmethod.cpp src/share/vm/compiler/compileBroker.hpp src/share/vm/compiler/compileLog.cpp src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp src/share/vm/interpreter/oopMapCache.cpp src/share/vm/interpreter/templateInterpreter.cpp src/share/vm/memory/blockOffsetTable.cpp src/share/vm/memory/heapInspection.cpp src/share/vm/oops/generateOopMap.cpp src/share/vm/oops/klassVtable.cpp src/share/vm/opto/node.cpp src/share/vm/opto/output.cpp src/share/vm/opto/phaseX.hpp src/share/vm/prims/forte.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/vmThread.cpp src/share/vm/utilities/xmlstream.cpp
diffstat 31 files changed, 36 insertions(+), 35 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -4082,7 +4082,7 @@
 // make it work.
 static void check_index(int ind) {
   assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
-         "Invariants.")
+         "Invariants.");
 }
 
 static void generate_satb_log_enqueue(bool with_frame) {
--- a/src/cpu/x86/vm/assembler_x86.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -7975,7 +7975,7 @@
       case 2: return "special";
       case 3: return "empty";
     }
-    ShouldNotReachHere()
+    ShouldNotReachHere();
     return NULL;
   }
 
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -2844,7 +2844,7 @@
   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
   __ jump(RuntimeAddress(__ pc()));
 
-  assert(__ offset() - start <= call_stub_size, "stub too big")
+  assert(__ offset() - start <= call_stub_size, "stub too big");
   __ end_a_stub();
 }
 
--- a/src/share/vm/adlc/output_c.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/adlc/output_c.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -721,8 +721,8 @@
   fprintf(fp_cpp, "  }\n");
   fprintf(fp_cpp, "#endif\n\n");
 #endif
-  fprintf(fp_cpp, "  assert(this, \"NULL pipeline info\")\n");
-  fprintf(fp_cpp, "  assert(pred, \"NULL predecessor pipline info\")\n\n");
+  fprintf(fp_cpp, "  assert(this, \"NULL pipeline info\");\n");
+  fprintf(fp_cpp, "  assert(pred, \"NULL predecessor pipline info\");\n\n");
   fprintf(fp_cpp, "  if (pred->hasFixedLatency())\n    return (pred->fixedLatency());\n\n");
   fprintf(fp_cpp, "  // If this is not an operand, then assume a dependence with 0 latency\n");
   fprintf(fp_cpp, "  if (opnd > _read_stage_count)\n    return (0);\n\n");
--- a/src/share/vm/classfile/dictionary.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/classfile/dictionary.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -127,7 +127,7 @@
 
 
 bool Dictionary::do_unloading(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   bool class_was_unloaded = false;
   int  index = 0; // Defined here for portability! Do not move
 
--- a/src/share/vm/classfile/loaderConstraints.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/classfile/loaderConstraints.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -103,7 +103,7 @@
 
 
 void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   // Remove unloaded entries from constraint table
   for (int index = 0; index < table_size(); index++) {
     LoaderConstraintEntry** p = bucket_addr(index);
--- a/src/share/vm/classfile/resolutionErrors.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/classfile/resolutionErrors.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -102,7 +102,7 @@
 
 // Remove unloaded entries from the table
 void ResolutionErrorTable::purge_resolution_errors(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   for (int i = 0; i < table_size(); i++) {
     for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) {
       ResolutionErrorEntry* entry = *p;
--- a/src/share/vm/code/nmethod.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/code/nmethod.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -1534,7 +1534,8 @@
             }
           }
           ic->set_to_clean();
-          assert(ic->cached_oop() == NULL, "cached oop in IC should be cleared")
+          assert(ic->cached_oop() == NULL,
+                 "cached oop in IC should be cleared");
         }
       }
     }
--- a/src/share/vm/compiler/compileBroker.hpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/compiler/compileBroker.hpp	Sun Oct 11 16:19:25 2009 -0700
@@ -310,7 +310,7 @@
 
   static AbstractCompiler* compiler(int level ) {
     if (level == CompLevel_fast_compile) return _compilers[0];
-    assert(level == CompLevel_highest_tier, "what level?")
+    assert(level == CompLevel_highest_tier, "what level?");
     return _compilers[1];
   }
 
--- a/src/share/vm/compiler/compileLog.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/compiler/compileLog.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -68,7 +68,7 @@
     return attrs;
   } else {
     // park it in the buffer, so we can put a null on the end
-    assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer")
+    assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer");
     int klen = attrs - kind;
     strncpy(buffer, kind, klen);
     buffer[klen] = 0;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -118,7 +118,7 @@
     // TreeList from the first chunk to the next chunk and update all
     // the TreeList pointers in the chunks in the list.
     if (nextTC == NULL) {
-      assert(prevFC == NULL, "Not last chunk in the list")
+      assert(prevFC == NULL, "Not last chunk in the list");
       set_tail(NULL);
       set_head(NULL);
     } else {
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -517,7 +517,7 @@
   assert(blk_start != NULL && blk_end > blk_start,
          "phantom block");
   assert(blk_end > threshold, "should be past threshold");
-  assert(blk_start <= threshold, "blk_start should be at or before threshold")
+  assert(blk_start <= threshold, "blk_start should be at or before threshold");
   assert(pointer_delta(threshold, blk_start) <= N_words,
          "offset should be <= BlockOffsetSharedArray::N");
   assert(Universe::heap()->is_in_reserved(blk_start),
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -4965,7 +4965,7 @@
   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   while (pop_unclean_region_list_locked() != NULL) ;
   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
-         "Postconditions of loop.")
+         "Postconditions of loop.");
   while (pop_free_region_list_locked() != NULL) ;
   assert(_free_region_list == NULL, "Postcondition of loop.");
   if (_free_region_list_size != 0) {
--- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -325,7 +325,7 @@
 
     eden_size = align_size_down(eden_size, alignment);
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // To may resize into from space as long as it is clear of live data.
     // From space must remain page aligned, though, so we need to do some
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -326,7 +326,7 @@
     }
 
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // To may resize into from space as long as it is clear of live data.
     // From space must remain page aligned, though, so we need to do some
@@ -413,7 +413,7 @@
                        pointer_delta(to_start, eden_start, sizeof(char)));
     }
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // Don't let eden shrink down to 0 or less.
     eden_end = MAX2(eden_end, eden_start + alignment);
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Sun Oct 11 16:19:25 2009 -0700
@@ -65,7 +65,7 @@
   // and releasing the heap lock, which is held during gc's anyway. This method is not
   // safe for use at the same time as allocate_noexpand()!
   HeapWord* cas_allocate_noexpand(size_t word_size) {
-    assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint")
+    assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
     HeapWord* res = object_space()->cas_allocate(word_size);
     if (res != NULL) {
       _start_array.allocate_block(res);
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -3277,7 +3277,7 @@
     if (status == ParMarkBitMap::incomplete) {
       // The last obj that starts in the source region does not end in the
       // region.
-      assert(closure.source() < end_addr, "sanity")
+      assert(closure.source() < end_addr, "sanity");
       HeapWord* const obj_beg = closure.source();
       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
                                        src_space_top);
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -521,7 +521,7 @@
     }
 
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // To may resize into from space as long as it is clear of live data.
     // From space must remain page aligned, though, so we need to do some
@@ -605,7 +605,7 @@
                        pointer_delta(to_start, eden_start, sizeof(char)));
     }
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // Could choose to not let eden shrink
     // to_start = MAX2(to_start, eden_end);
--- a/src/share/vm/interpreter/oopMapCache.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/interpreter/oopMapCache.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -224,8 +224,8 @@
   // If we are doing mark sweep _method may not have a valid header
   // $$$ This used to happen only for m/s collections; we might want to
   // think of an appropriate generalization of this distinction.
-  guarantee(Universe::heap()->is_gc_active() ||
-            _method->is_oop_or_null(), "invalid oop in oopMapCache")
+  guarantee(Universe::heap()->is_gc_active() || _method->is_oop_or_null(),
+            "invalid oop in oopMapCache");
 }
 
 #ifdef ENABLE_ZAP_DEAD_LOCALS
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -457,7 +457,7 @@
 
 void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
   assert(t->is_valid(), "template must exist");
-  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions")
+  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions");
   wep = __ pc(); generate_and_dispatch(t);
 }
 
--- a/src/share/vm/memory/blockOffsetTable.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/memory/blockOffsetTable.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -689,7 +689,7 @@
   assert(blk_end > _next_offset_threshold,
          "should be past threshold");
   assert(blk_start <= _next_offset_threshold,
-         "blk_start should be at or before threshold")
+         "blk_start should be at or before threshold");
   assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words,
          "offset should be <= BlockOffsetSharedArray::N");
   assert(Universe::heap()->is_in_reserved(blk_start),
--- a/src/share/vm/memory/heapInspection.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/memory/heapInspection.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -315,7 +315,7 @@
 
 void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result) {
   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
-  assert(Heap_lock->is_locked(), "should have the Heap_lock")
+  assert(Heap_lock->is_locked(), "should have the Heap_lock");
 
   // Ensure that the heap is parsable
   Universe::heap()->ensure_parsability(false);  // no need to retire TALBs
--- a/src/share/vm/oops/generateOopMap.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/oops/generateOopMap.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -807,7 +807,7 @@
 }
 
 CellTypeState GenerateOopMap::get_var(int localNo) {
-  assert(localNo < _max_locals + _nof_refval_conflicts, "variable read error")
+  assert(localNo < _max_locals + _nof_refval_conflicts, "variable read error");
   if (localNo < 0 || localNo > _max_locals) {
     verify_error("variable read error: r%d", localNo);
     return valCTS; // just to pick something;
--- a/src/share/vm/oops/klassVtable.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/oops/klassVtable.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -899,7 +899,7 @@
   int nof_methods = methods()->length();
   HandleMark hm;
   KlassHandle klass = _klass;
-  assert(nof_methods > 0, "at least one method must exist for interface to be in vtable")
+  assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
   Handle interface_loader (THREAD, instanceKlass::cast(interf_h())->class_loader());
   int ime_num = 0;
 
--- a/src/share/vm/opto/node.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/opto/node.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -1188,7 +1188,7 @@
         Node* use = dead->last_out(k);
         igvn->hash_delete(use);       // Yank from hash table prior to mod
         if (use->in(0) == dead) {     // Found another dead node
-          assert (!use->is_Con(), "Control for Con node should be Root node.")
+          assert (!use->is_Con(), "Control for Con node should be Root node.");
           use->set_req(0, top);       // Cut dead edge to prevent processing
           nstack.push(use);           // the dead node again.
         } else {                      // Else found a not-dead user
--- a/src/share/vm/opto/output.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/opto/output.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -1231,7 +1231,7 @@
         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) {
           padding = nop_size;
         }
-        assert( labels_not_set || padding == 0, "instruction should already be aligned")
+        assert( labels_not_set || padding == 0, "instruction should already be aligned");
 
         if(padding > 0) {
           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
--- a/src/share/vm/opto/phaseX.hpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/opto/phaseX.hpp	Sun Oct 11 16:19:25 2009 -0700
@@ -310,7 +310,7 @@
   void dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited);
 
   uint   _count_progress;       // For profiling, count transforms that make progress
-  void   set_progress()        { ++_count_progress; assert( allow_progress(),"No progress allowed during verification") }
+  void   set_progress()        { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); }
   void   clear_progress()      { _count_progress = 0; }
   uint   made_progress() const { return _count_progress; }
 
--- a/src/share/vm/prims/forte.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/prims/forte.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -647,7 +647,7 @@
 void Forte::register_stub(const char* name, address start, address end) {
 #if !defined(_WINDOWS) && !defined(IA64)
   assert(pointer_delta(end, start, sizeof(jbyte)) < INT_MAX,
-    "Code size exceeds maximum range")
+         "Code size exceeds maximum range");
 
   collector_func_load((char*)name, NULL, NULL, start,
     pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
--- a/src/share/vm/runtime/frame.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/runtime/frame.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -844,7 +844,7 @@
   }
 
   void oop_at_offset_do(int offset) {
-    assert (offset >= 0, "illegal offset")
+    assert (offset >= 0, "illegal offset");
     oop* addr = (oop*) _fr->entry_frame_argument_at(offset);
     _f->do_oop(addr);
   }
--- a/src/share/vm/runtime/vmThread.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -106,7 +106,7 @@
   // restore queue to empty state
   _queue[prio]->set_next(_queue[prio]);
   _queue[prio]->set_prev(_queue[prio]);
-  assert(queue_empty(prio), "drain corrupted queue")
+  assert(queue_empty(prio), "drain corrupted queue");
 #ifdef DEBUG
   int len = 0;
   VM_Operation* cur;
--- a/src/share/vm/utilities/xmlstream.cpp	Tue Apr 27 18:13:47 2010 -0700
+++ b/src/share/vm/utilities/xmlstream.cpp	Sun Oct 11 16:19:25 2009 -0700
@@ -328,7 +328,7 @@
 // ------------------------------------------------------------------
 void xmlStream::va_done(const char* format, va_list ap) {
   char buffer[200];
-  guarantee(strlen(format) + 10 < sizeof(buffer), "bigger format buffer")
+  guarantee(strlen(format) + 10 < sizeof(buffer), "bigger format buffer");
   const char* kind = format;
   const char* kind_end = strchr(kind, ' ');
   size_t kind_len = (kind_end != NULL) ? (kind_end - kind) : strlen(kind);