Skip to content

Commit

Permalink
HBASE-17224 Fix lots of spelling errors in HBase logging and exceptio…
Browse files Browse the repository at this point in the history
…n messages (Grant Sohn)
  • Loading branch information
jmhsieh committed Dec 1, 2016
1 parent 540ede3 commit eeaea4a
Show file tree
Hide file tree
Showing 29 changed files with 38 additions and 38 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1249,7 +1249,7 @@ public short getDFSReplication() {
public HColumnDescriptor setDFSReplication(short replication) {
if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) {
throw new IllegalArgumentException(
"DFS replication factor cannot be less than 1 if explictly set.");
"DFS replication factor cannot be less than 1 if explicitly set.");
}
setValue(DFS_REPLICATION, Short.toString(replication));
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[
} else if (!peerHtd.equals(htd)) {
throw new IllegalArgumentException("Table " + tableName.getNameAsString()
+ " exists in peer cluster " + repPeer.getId()
+ ", but the table descriptors are not same when comapred with source cluster."
+ ", but the table descriptors are not same when compared with source cluster."
+ " Thus can not enable the table's replication switch.");
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ public void trackPeerConfigChanges(ReplicationPeerConfigListener listener) {

@Override
public void abort(String why, Throwable e) {
LOG.fatal("The ReplicationPeer coresponding to peer " + peerConfig
LOG.fatal("The ReplicationPeer corresponding to peer " + peerConfig
+ " was aborted for the following reason(s):" + why, e);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ public ByteBuffer getBuffer() {

if (LOG.isTraceEnabled()) {
long allocations = allocationsRef.incrementAndGet();
LOG.trace("runningAverage=" + runningAverage + ", alloctions=" + allocations);
LOG.trace("runningAverage=" + runningAverage + ", allocations=" + allocations);
}
return bb;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ public HDFSBlocksDistribution() {
*/
@Override
public synchronized String toString() {
return "number of unique hosts in the disribution=" +
return "number of unique hosts in the distribution=" +
this.hostAndWeights.size();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ public static String parseMasterServerName(String rsZnodePath) {
String[] rsZnodeParts = rsZnodePath.split("/");
masterServerName = rsZnodeParts[rsZnodeParts.length -1];
} catch (IndexOutOfBoundsException e) {
LOG.warn("String " + rsZnodePath + " has wrong fromat", e);
LOG.warn("String " + rsZnodePath + " has wrong format", e);
}
return masterServerName;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1598,7 +1598,7 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean
blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread,
doVerificationThruHBaseChecksum);
if (blk != null) {
HFile.LOG.warn("HDFS checksum verification suceeded for file " +
HFile.LOG.warn("HDFS checksum verification succeeded for file " +
pathName + " at offset " +
offset + " filesize " + fileSize);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1024,7 +1024,7 @@ private void checkIOErrorIsTolerated() {
if (this.ioErrorStartTime > 0) {
if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) {
LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration +
"ms, disabing cache, please check your IOEngine");
"ms, disabling cache, please check your IOEngine");
disableCache();
}
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ Map<LoadQueueItem, ByteBuffer> performBulkLoad(final Admin admin, Table table,
// need to reload split keys each iteration.
final Pair<byte[][], byte[][]> startEndKeys = regionLocator.getStartEndKeys();
if (count != 0) {
LOG.info("Split occured while grouping HFiles, retry attempt " +
LOG.info("Split occurred while grouping HFiles, retry attempt " +
+ count + " with " + queue.size() + " files remaining to group or split");
}

Expand Down Expand Up @@ -903,12 +903,12 @@ protected Pair<List<LoadQueueItem>, String> groupOrSplit(
if (indexForCallable < 0) {
throw new IOException("The first region info for table "
+ table.getName()
+ " cann't be found in hbase:meta.Please use hbck tool to fix it first.");
+ " can't be found in hbase:meta.Please use hbck tool to fix it first.");
} else if ((indexForCallable == startEndKeys.getFirst().length - 1)
&& !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) {
throw new IOException("The last region info for table "
+ table.getName()
+ " cann't be found in hbase:meta.Please use hbck tool to fix it first.");
+ " can't be found in hbase:meta.Please use hbck tool to fix it first.");
} else if (indexForCallable + 1 < startEndKeys.getFirst().length
&& !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable],
startEndKeys.getFirst()[indexForCallable + 1]) == 0)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(
return pair;
} catch (ReplicationException e) {
throw new IOException(
"An error occured while trying to connect to the remove peer cluster", e);
"An error occurred while trying to connect to the remove peer cluster", e);
} finally {
if (peer != null) {
peer.close();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ public List<Float> getDispersionInformation() {

public void print(boolean isDetailMode) {
if (!isFilledUp) {
System.err.println("[Error] Region assignment verfication report" +
System.err.println("[Error] Region assignment verification report" +
"hasn't been filled up");
}
DecimalFormat df = new java.text.DecimalFormat( "#.##");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ public void requestMobCompaction(Configuration conf, FileSystem fs, TableName ta
try {
master.reportMobCompactionEnd(tableName);
} catch (IOException e1) {
LOG.error("Failed to mark end of mob compation", e1);
LOG.error("Failed to mark end of mob compaction", e1);
}
throw e;
}
Expand Down Expand Up @@ -131,7 +131,7 @@ public void run() {
try {
master.reportMobCompactionEnd(tableName);
} catch (IOException e) {
LOG.error("Failed to mark end of mob compation", e);
LOG.error("Failed to mark end of mob compaction", e);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -976,8 +976,8 @@ public static void main(String args[]) throws IOException {
opt.addOption("tables", true,
"The list of table names splitted by ',' ;" +
"For example: -tables: t1,t2,...,tn");
opt.addOption("l", "locality", true, "enforce the maxium locality");
opt.addOption("m", "min-move", true, "enforce minium assignment move");
opt.addOption("l", "locality", true, "enforce the maximum locality");
opt.addOption("m", "min-move", true, "enforce minimum assignment move");
opt.addOption("diff", false, "calculate difference between assignment plans");
opt.addOption("munkres", false,
"use munkres to place secondaries and tertiaries");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1091,7 +1091,7 @@ protected HRegionInfo getRegionInfo(final byte [] regionName) {
}
return hri;
} catch (IOException e) {
server.abort("Aborting because error occoured while reading "
server.abort("Aborting because error occurred while reading "
+ Bytes.toStringBinary(regionName) + " from hbase:meta", e);
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ private ServerName[] singleRackCase(HRegionInfo regionInfo,
getOneRandomServer(primaryRack, serverSkipSet);

if (secondaryRS == null || tertiaryRS == null) {
LOG.error("Cannot place the secondary and terinary" +
LOG.error("Cannot place the secondary and ternary" +
"region server for region " +
regionInfo.getRegionNameAsString());
}
Expand Down Expand Up @@ -498,7 +498,7 @@ private ServerName[] multiRackCase(HRegionInfo regionInfo,
ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet);

if (secondaryRS == null || tertiaryRS == null) {
LOG.error("Cannot place the secondary and terinary" +
LOG.error("Cannot place the secondary and ternary" +
"region server for region " +
regionInfo.getRegionNameAsString());
}
Expand Down Expand Up @@ -603,4 +603,4 @@ public static String getFavoredNodesAsString(List<ServerName> nodes) {
}
return strBuf.toString();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ private static void cleanAnyRemainingRows(final MasterProcedureEnv env,
}
}
if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName +
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName +
" from " + TableName.META_TABLE_NAME);
metaTable.delete(deletes);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -416,14 +416,14 @@ private void doMergeInRS(final MasterProcedureEnv env) throws IOException {
forcible,
getUser());
LOG.info("Sent merge to server " + getServerName(env) + " for region " +
getRegionsToMergeListEncodedNameString() + ", focible=" + forcible);
getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible);
return;
} catch (RegionOpeningException roe) {
// Do a retry since region should be online on RS immediately
LOG.warn("Failed mergering regions in " + getServerName(env) + ", retrying...", roe);
} catch (Exception ie) {
LOG.warn("Failed sending merge to " + getServerName(env) + " for regions " +
getRegionsToMergeListEncodedNameString() + ", focible=" + forcible, ie);
getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible, ie);
return;
}
} while ((duration = EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -697,7 +697,7 @@ private void preSplitRegionBeforePONR(final MasterProcedureEnv env)
HRegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
LOG.error("Row key of mutation from coprossor is not parsable as region name."
LOG.error("Row key of mutation from coprocessor is not parsable as region name."
+ "Mutations from coprocessor should only for hbase:meta table.");
throw e;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ private synchronized void startNewSubprocedure(String path) {
try {
byte[] data = ZKUtil.getData(zkController.getWatcher(), path);
if (!ProtobufUtil.isPBMagicPrefix(data)) {
String msg = "Data in for starting procuedure " + opName +
String msg = "Data in for starting procedure " + opName +
" is illegally formatted (no pb magic). " +
"Killing the procedure: " + Bytes.toString(data);
LOG.error(msg);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ public void chore() {
+ region.getRegionInfo() + " under the store " + store.getColumnFamilyName());
}
} catch (Exception e) {
LOG.error("Exception while trying to close and archive the comapcted store "
LOG.error("Exception while trying to close and archive the compacted store "
+ "files of the store " + store.getColumnFamilyName() + " in the" + " region "
+ region.getRegionInfo(), e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ public void start(ChoreService service) {

public void stop() {
// The thread is Daemon. Just interrupting the ongoing process.
LOG.info("Stoping HeapMemoryTuner chore.");
LOG.info("Stopping HeapMemoryTuner chore.");
this.heapMemTunerChore.cancel(true);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ PairOfSameType<Region> createDaughters(final Server server,
HRegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
LOG.error("Row key of mutation from coprossor is not parsable as region name."
LOG.error("Row key of mutation from coprocessor is not parsable as region name."
+ "Mutations from coprocessor should only for hbase:meta table.");
throw e;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ public void process() throws IOException {
break;
case PREEMPTED:
SplitLogCounters.tot_wkr_preempt_task.incrementAndGet();
LOG.warn("task execution prempted " + splitTaskDetails.getWALFile());
LOG.warn("task execution preempted " + splitTaskDetails.getWALFile());
break;
case ERR:
if (server != null && !server.isStopped()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ private void doBulkLoad(LoadIncrementalHFiles loadHFiles, Table table,
// need to reload split keys each iteration.
startEndKeys = locator.getStartEndKeys();
if (count != 0) {
LOG.warn("Error occured while replicating HFiles, retry attempt " + count + " with "
LOG.warn("Error occurred while replicating HFiles, retry attempt " + count + " with "
+ queue.size() + " files still remaining to replicate.");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ private void updateGlobalCache(ListMultimap<String,TablePermission> userPerms) {
mtime.incrementAndGet();
} catch (IOException e) {
// Never happens
LOG.error("Error occured while updating the global cache", e);
LOG.error("Error occurred while updating the global cache", e);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ public byte[] retrievePassword(AuthenticationTokenIdentifier identifier)
synchronized (this) {
if (!leaderElector.isAlive() || leaderElector.isStopped()) {
LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":"
+ leaderElector.getId() + "] is stoped or not alive");
+ leaderElector.getId() + "] is stopped or not alive");
leaderElector.start();
LOG.info("Thread leaderElector [" + leaderElector.getName() + ":"
+ leaderElector.getId() + "] is started");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -899,7 +899,7 @@ public void checkRegionBoundaries() {
if (!valid) {
errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
tablesInfo.get(regionInfo.getTable()));
LOG.warn("Region's boundaries not alligned between stores and META for:");
LOG.warn("Region's boundaries not aligned between stores and META for:");
LOG.warn(currentRegionBoundariesInformation);
}
}
Expand Down Expand Up @@ -933,7 +933,7 @@ private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
FileSystem fs = p.getFileSystem(getConf());
FileStatus[] dirs = fs.listStatus(p);
if (dirs == null) {
LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " +
LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " +
p + ". This dir could probably be deleted.");
return ;
}
Expand Down Expand Up @@ -1103,7 +1103,7 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti
Path rootDir = getSidelineDir();
Path dst = new Path(rootDir, pathStr.substring(index + 1));
fs.mkdirs(dst.getParent());
LOG.info("Trying to sildeline reference file "
LOG.info("Trying to sideline reference file "
+ path + " to " + dst);
setShouldRerun();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ void checkTableDir(Path tableDir) throws IOException {
try {
f.get();
} catch (ExecutionException e) {
LOG.warn("Failed to quaratine an HFile in regiondir "
LOG.warn("Failed to quarantine an HFile in regiondir "
+ rdcs.get(i).regionDir, e.getCause());
// rethrow IOExceptions
if (e.getCause() instanceof IOException) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1001,7 +1001,7 @@ public void waitUntilDrained() {
try {
controller.dataAvailable.wait(2000);
} catch (InterruptedException e) {
LOG.warn("Got intrerrupted while waiting for EntryBuffers is drained");
LOG.warn("Got interrupted while waiting for EntryBuffers is drained");
Thread.interrupted();
break;
}
Expand Down

0 comments on commit eeaea4a

Please sign in to comment.