Skip to content

Commit

Permalink
HDDS-4581. Cleanup usage of volumeArgs in KeyRequests. (apache#1693)
Browse files Browse the repository at this point in the history
* HDDS-4581. Cleanup usage of volumeArgs in KeyRequests.
  • Loading branch information
bharatviswa504 committed Dec 14, 2020
1 parent e0c8556 commit e0ce757
Show file tree
Hide file tree
Showing 24 changed files with 47 additions and 154 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
import org.slf4j.Logger;
Expand Down Expand Up @@ -187,7 +186,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
boolean acquiredLock = false;

OmKeyInfo omKeyInfo = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
final List<OmKeyLocationInfo> locations = new ArrayList<>();
List<OmKeyInfo> missingParentInfos;
Expand Down Expand Up @@ -279,7 +277,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.collect(Collectors.toList());
omKeyInfo.appendNewBlocks(newLocationList, false);

omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// check bucket and volume quota
long preAllocatedSpace = newLocationList.size()
Expand Down Expand Up @@ -310,8 +307,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.setOpenVersion(openVersion).build())
.setCmdType(Type.CreateFile);
omClientResponse = new OMFileCreateResponse(omResponse.build(),
omKeyInfo, missingParentInfos, clientID, omVolumeArgs,
omBucketInfo.copyObject());
omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject());

result = Result.SUCCESS;
} catch (IOException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
Expand Down Expand Up @@ -167,7 +166,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

OmKeyInfo openKeyInfo = null;
IOException exception = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
boolean acquiredLock = false;

Expand Down Expand Up @@ -196,7 +194,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

List<OmKeyLocationInfo> newLocationList = Collections.singletonList(
OmKeyLocationInfo.getFromProtobuf(blockLocation));
omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);

acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
volumeName, bucketName);
Expand All @@ -221,11 +218,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex));

omBucketInfo.incrUsedBytes(preAllocatedSpace);

omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
.setKeyLocation(blockLocation).build());
omClientResponse = new OMAllocateBlockResponse(omResponse.build(),
openKeyInfo, clientID, omVolumeArgs, omBucketInfo.copyObject());
openKeyInfo, clientID, omBucketInfo.copyObject());

LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
volumeName, bucketName, openKeyName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
Expand Down Expand Up @@ -123,7 +122,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

IOException exception = null;
OmKeyInfo omKeyInfo = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
OMClientResponse omClientResponse = null;
boolean bucketLockAcquired = false;
Expand Down Expand Up @@ -193,7 +191,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

long scmBlockSize = ozoneManager.getScmBlockSize();
int factor = omKeyInfo.getFactor().getNumber();
omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// Block was pre-requested and UsedBytes updated when createKey and
// AllocatedBlock. The space occupied by the Key shall be based on
Expand All @@ -204,8 +201,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omBucketInfo.incrUsedBytes(correctedSpace);

omClientResponse = new OMKeyCommitResponse(omResponse.build(),
omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs,
omBucketInfo.copyObject());
omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject());

result = Result.SUCCESS;
} catch (IOException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
Expand Down Expand Up @@ -197,7 +196,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
OmKeyInfo omKeyInfo = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
final List< OmKeyLocationInfo > locations = new ArrayList<>();

Expand Down Expand Up @@ -288,7 +286,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.collect(Collectors.toList());
omKeyInfo.appendNewBlocks(newLocationList, false);

omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// Here we refer to the implementation of HDFS:
// If the key size is 600MB, when createKey, keyLocationInfo in
Expand Down Expand Up @@ -319,8 +316,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.setOpenVersion(openVersion).build())
.setCmdType(Type.CreateKey);
omClientResponse = new OMKeyCreateResponse(omResponse.build(),
omKeyInfo, missingParentInfos, clientID, omVolumeArgs,
omBucketInfo.copyObject());
omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject());

result = Result.SUCCESS;
} catch (IOException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
Expand Down Expand Up @@ -109,7 +108,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
Result result = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
Expand Down Expand Up @@ -143,7 +141,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
keyName)),
new CacheValue<>(Optional.absent(), trxnLogIndex));

omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);

long quotaReleased = sumBlockLengths(omKeyInfo);
Expand All @@ -156,7 +153,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

omClientResponse = new OMKeyDeleteResponse(omResponse
.setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs,
omKeyInfo, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject());

result = Result.SUCCESS;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.PrefixManager;
Expand Down Expand Up @@ -609,30 +608,6 @@ protected boolean checkDirectoryAlreadyExists(String volumeName,
return false;
}

/**
* Return volume info for the specified volume. If the volume does not
* exist, returns {@code null}.
* @param omMetadataManager
* @param volume
* @return OmVolumeArgs
* @throws IOException
*/
protected OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager,
String volume) {

OmVolumeArgs volumeArgs = null;

CacheValue<OmVolumeArgs> value =
omMetadataManager.getVolumeTable().getCacheValue(
new CacheKey<>(omMetadataManager.getVolumeKey(volume)));

if (value != null) {
volumeArgs = value.getCacheValue();
}

return volumeArgs;
}

/**
* @return the number of bytes used by blocks pointed to by {@code omKeyInfo}.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import org.apache.hadoop.ozone.om.ResolvedBucket;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
Expand Down Expand Up @@ -156,7 +155,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
}

long quotaReleased = 0;
OmVolumeArgs omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
OmBucketInfo omBucketInfo =
getBucketInfo(omMetadataManager, volumeName, bucketName);

Expand All @@ -177,8 +175,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys))
.setStatus(deleteStatus ? OK : PARTIAL_DELETE)
.setSuccess(deleteStatus).build(), omKeyInfoList,
ozoneManager.isRatisEnabled(), omVolumeArgs,
omBucketInfo.copyObject());
ozoneManager.isRatisEnabled(), omBucketInfo.copyObject());

result = Result.SUCCESS;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@

import com.google.common.base.Optional;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.slf4j.Logger;
Expand Down Expand Up @@ -106,7 +105,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
getOmRequest());
OMClientResponse omClientResponse = null;
Result result = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
Expand All @@ -125,7 +123,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

OmKeyInfo omKeyInfo =
omMetadataManager.getOpenKeyTable().get(multipartKey);
omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);

// If there is no entry in openKeyTable, then there is no multipart
Expand Down Expand Up @@ -168,7 +165,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omResponse.setAbortMultiPartUploadResponse(
MultipartUploadAbortResponse.newBuilder()).build(),
multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
omVolumeArgs, omBucketInfo.copyObject());
omBucketInfo.copyObject());

result = Result.SUCCESS;
} catch (IOException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
Expand Down Expand Up @@ -116,7 +115,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
String multipartKey = null;
OmMultipartKeyInfo multipartKeyInfo = null;
Result result = null;
OmVolumeArgs omVolumeArgs = null;
OmBucketInfo omBucketInfo = null;
OmBucketInfo copyBucketInfo = null;
try {
Expand Down Expand Up @@ -214,7 +212,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

long scmBlockSize = ozoneManager.getScmBlockSize();
int factor = omKeyInfo.getFactor().getNumber();
omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// Block was pre-requested and UsedBytes updated when createKey and
// AllocatedBlock. The space occupied by the Key shall be based on
Expand All @@ -230,7 +227,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omClientResponse = new S3MultipartUploadCommitPartResponse(
omResponse.build(), multipartKey, openKey,
multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
ozoneManager.isRatisEnabled(), omVolumeArgs,
ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject());

result = Result.SUCCESS;
Expand All @@ -240,7 +237,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omClientResponse = new S3MultipartUploadCommitPartResponse(
createErrorOMResponse(omResponse, exception), multipartKey, openKey,
multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
ozoneManager.isRatisEnabled(), omVolumeArgs, copyBucketInfo);
ozoneManager.isRatisEnabled(), copyBucketInfo);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
omDoubleBufferHelper);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@

import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
Expand All @@ -41,10 +40,10 @@ public class OMFileCreateResponse extends OMKeyCreateResponse {

public OMFileCreateResponse(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo, @Nonnull List<OmKeyInfo> parentKeyInfos,
long openKeySessionID, @Nonnull OmVolumeArgs omVolumeArgs,
long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo) {
super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID,
omVolumeArgs, omBucketInfo);
omBucketInfo);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
Expand All @@ -41,16 +40,14 @@ public class OMAllocateBlockResponse extends OMClientResponse {

private OmKeyInfo omKeyInfo;
private long clientID;
private OmVolumeArgs omVolumeArgs;
private OmBucketInfo omBucketInfo;

public OMAllocateBlockResponse(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo, long clientID,
@Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) {
@Nonnull OmBucketInfo omBucketInfo) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
this.clientID = clientID;
this.omVolumeArgs = omVolumeArgs;
this.omBucketInfo = omBucketInfo;
}

Expand All @@ -74,7 +71,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,

// update bucket usedBytes.
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
omMetadataManager.getBucketKey(omVolumeArgs.getVolume(),
omBucketInfo.getBucketName()), omBucketInfo);
omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
omKeyInfo.getBucketName()), omBucketInfo);
}
}
Loading

0 comments on commit e0ce757

Please sign in to comment.