|
@@ -45,12 +45,11 @@
|
|
|
#include "dfuwu.hpp"
|
|
|
#include "fverror.hpp"
|
|
|
#include "nbcd.hpp"
|
|
|
+#include "thorcommon.hpp"
|
|
|
|
|
|
#include "jstring.hpp"
|
|
|
#include "exception_util.hpp"
|
|
|
|
|
|
-#include "ws_dfuService.hpp"
|
|
|
-
|
|
|
#include "hqlerror.hpp"
|
|
|
#include "hqlexpr.hpp"
|
|
|
#include "hqlutil.hpp"
|
|
@@ -61,6 +60,11 @@
|
|
|
#include "jflz.hpp"
|
|
|
#include "digisign.hpp"
|
|
|
|
|
|
+#include "wsdfuaccess/wsdfuaccess.hpp"
|
|
|
+
|
|
|
+#include "ws_dfuService.hpp"
|
|
|
+
|
|
|
+using namespace wsdfuaccess;
|
|
|
using namespace cryptohelper;
|
|
|
|
|
|
|
|
@@ -85,6 +89,7 @@ static const char *DFUFileIdSeparator = "|";
|
|
|
static const char *DFUFileCreate_FileNamePostfix = ".wsdfucreate.tmp";
|
|
|
static const char *DFUFileCreate_GroupNamePrefix = "wsdfucreate";
|
|
|
static const char *ConfigurationDirectoryForDataCategory = "data";
|
|
|
+static std::atomic<unsigned> dfuCreateUniqId{1};
|
|
|
|
|
|
const unsigned NODE_GROUP_CACHE_DEFAULT_TIMEOUT = 30*60*1000; //30 minutes
|
|
|
|
|
@@ -5887,7 +5892,7 @@ int CWsDfuEx::GetIndexData(IEspContext &context, bool bSchemaOnly, const char* i
|
|
|
return iRet;
|
|
|
}
|
|
|
|
|
|
-void CWsDfuEx::getFilePartsInfo(IEspContext &context, IFileDescriptor *fdesc, unsigned numParts, bool forFileCreate, IEspDFUFileAccessInfo &accessInfo)
|
|
|
+void CWsDfuEx::getFilePartsInfo(IEspContext &context, IFileDescriptor &fileDesc, bool forFileCreate, IEspDFUFileAccessInfo &accessInfo)
|
|
|
{
|
|
|
IArrayOf<IEspDFUFilePart> dfuParts;
|
|
|
IArrayOf<IEspDFUPartLocation> dfuPartLocations;
|
|
@@ -5895,7 +5900,7 @@ void CWsDfuEx::getFilePartsInfo(IEspContext &context, IFileDescriptor *fdesc, un
|
|
|
unsigned newLocationIndex = 0;
|
|
|
MapStringTo<unsigned> partLocationMap;
|
|
|
// NB: both CopyIndex and PartIndex are 1 based in response.
|
|
|
- Owned<IPartDescriptorIterator> pi = fdesc->getIterator();
|
|
|
+ Owned<IPartDescriptorIterator> pi = fileDesc.getIterator();
|
|
|
ForEach(*pi)
|
|
|
{
|
|
|
IPartDescriptor& part = pi->query();
|
|
@@ -5937,76 +5942,17 @@ void CWsDfuEx::getFilePartsInfo(IEspContext &context, IFileDescriptor *fdesc, un
|
|
|
dfuParts.append(*filePart.getClear());
|
|
|
}
|
|
|
|
|
|
- accessInfo.setNumParts(numParts);
|
|
|
+ accessInfo.setNumParts(fileDesc.numParts());
|
|
|
accessInfo.setFileParts(dfuParts);
|
|
|
accessInfo.setFileLocations(dfuPartLocations);
|
|
|
}
|
|
|
|
|
|
-static const char *securityInfoVersion="1";
|
|
|
-void CWsDfuEx::getFileMeta(StringBuffer &metaInfoStr, StringBuffer &expiryTime, const char *fileName,
|
|
|
- IFileDescriptor *fDesc, IUserDescriptor *user, const char *jobId, const char *keyPairName, IConstDFUFileAccessRequestBase &req)
|
|
|
-{
|
|
|
- // setup "expiryTime"
|
|
|
- unsigned expirySecs = req.getExpirySeconds();
|
|
|
- if (expirySecs > maxFileAccessExpirySeconds)
|
|
|
- expirySecs = maxFileAccessExpirySeconds;
|
|
|
- time_t now;
|
|
|
- time(&now);
|
|
|
- CDateTime expiryDt;
|
|
|
- expiryDt.set(now + expirySecs);
|
|
|
- expiryDt.getString(expiryTime);
|
|
|
-
|
|
|
- Owned<IPropertyTree> metaInfoEnvelope = createPTree();
|
|
|
- Owned<IPropertyTree> metaInfo = createPTree();
|
|
|
- extractFilePartInfo(*metaInfo, *fDesc);
|
|
|
-
|
|
|
- MemoryBuffer metaInfoMb;
|
|
|
-
|
|
|
- /* NB: If file access security is disabled in the environment, or on a per cluster basis
|
|
|
- * keyPairName will be blank. In that case the meta data is returned in plain format.
|
|
|
- * NB2: Dafilesrv's would also require file access security to be disabled in that case,
|
|
|
- * otherwise they will be denied access.
|
|
|
- * Should be part of the same configuration setup.
|
|
|
- */
|
|
|
-#ifdef _USE_OPENSSL
|
|
|
- if (!isEmptyString(keyPairName)) // without it, meta data is not encrypted
|
|
|
- {
|
|
|
- metaInfo->setProp("version", securityInfoVersion);
|
|
|
- metaInfo->setProp("logicalFilename", fileName);
|
|
|
- metaInfo->setProp("jobId", jobId);
|
|
|
- metaInfo->setProp("accessType", req.getAccessTypeAsString());
|
|
|
- StringBuffer userStr;
|
|
|
- if (user)
|
|
|
- metaInfo->setProp("user", user->getUserName(userStr).str());
|
|
|
- metaInfo->setProp("keyPairName", keyPairName);
|
|
|
- metaInfo->setProp("expiryTime", expiryTime);
|
|
|
-
|
|
|
- MemoryBuffer metaInfoBlob;
|
|
|
- metaInfo->serialize(metaInfoBlob);
|
|
|
-
|
|
|
- const char *privateKeyFName = env->getPrivateKeyPath(keyPairName);
|
|
|
- Owned<CLoadedKey> privateKey = loadPrivateKeyFromFile(privateKeyFName, nullptr);
|
|
|
- StringBuffer metaInfoSignature;
|
|
|
- digiSign(metaInfoSignature, metaInfoBlob.length(), metaInfoBlob.bytes(), *privateKey);
|
|
|
- metaInfoEnvelope->setProp("signature", metaInfoSignature);
|
|
|
- metaInfoEnvelope->setPropBin("metaInfoBlob", metaInfoBlob.length(), metaInfoBlob.bytes());
|
|
|
- metaInfoEnvelope->serialize(metaInfoMb.clear());
|
|
|
- }
|
|
|
- else
|
|
|
-#endif
|
|
|
- metaInfo->serialize(metaInfoMb);
|
|
|
-
|
|
|
- MemoryBuffer compressedMetaInfoMb;
|
|
|
- fastLZCompressToBuffer(compressedMetaInfoMb, metaInfoMb.length(), metaInfoMb.bytes());
|
|
|
- JBASE64_Encode(compressedMetaInfoMb.bytes(), compressedMetaInfoMb.length(), metaInfoStr, false);
|
|
|
-}
|
|
|
-
|
|
|
-void CWsDfuEx::getFileDafilesrvConfiguration(StringBuffer &keyPairName, unsigned &port, bool &secure, const char *cluster)
|
|
|
+void CWsDfuEx::getFileDafilesrvConfiguration(StringBuffer &keyPairName, unsigned &port, bool &secure, const char *group)
|
|
|
{
|
|
|
port = DEFAULT_ROWSERVICE_PORT;
|
|
|
secure = false;
|
|
|
- keyPairName.set(env->getClusterKeyPairName(cluster));
|
|
|
- Owned<IConstDaFileSrvInfo> daFileSrvInfo = env->getDaFileSrvGroupInfo(cluster);
|
|
|
+ keyPairName.set(env->getClusterGroupKeyPairName(group));
|
|
|
+ Owned<IConstDaFileSrvInfo> daFileSrvInfo = env->getDaFileSrvGroupInfo(group);
|
|
|
if (daFileSrvInfo)
|
|
|
{
|
|
|
port = daFileSrvInfo->getPort();
|
|
@@ -6014,22 +5960,20 @@ void CWsDfuEx::getFileDafilesrvConfiguration(StringBuffer &keyPairName, unsigned
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void CWsDfuEx::getFileDafilesrvConfiguration(StringBuffer &keyPairName, unsigned &retPort, bool &retSecure, IDistributedFile &file)
|
|
|
+void CWsDfuEx::getFileDafilesrvConfiguration(StringBuffer &keyPairName, unsigned &retPort, bool &retSecure, const char *fileName, std::vector<std::string> &groups)
|
|
|
{
|
|
|
retPort = DEFAULT_ROWSERVICE_PORT;
|
|
|
retSecure = false;
|
|
|
- unsigned numClusters = file.numClusters();
|
|
|
- for (unsigned c=0; c<numClusters; c++)
|
|
|
+ bool firstGroup = true;
|
|
|
+ for (auto &group: groups)
|
|
|
{
|
|
|
- StringBuffer clusterName;
|
|
|
- const char *cluster = file.getClusterName(c, clusterName.clear()).str();
|
|
|
-
|
|
|
StringBuffer _keyPairName;
|
|
|
unsigned port;
|
|
|
bool secure;
|
|
|
- getFileDafilesrvConfiguration(_keyPairName, port, secure, cluster);
|
|
|
- if (0 == c)
|
|
|
+ getFileDafilesrvConfiguration(_keyPairName, port, secure, group.c_str());
|
|
|
+ if (firstGroup)
|
|
|
{
|
|
|
+ firstGroup = false;
|
|
|
keyPairName.set(_keyPairName);
|
|
|
retPort = port;
|
|
|
retSecure = secure;
|
|
@@ -6037,104 +5981,25 @@ void CWsDfuEx::getFileDafilesrvConfiguration(StringBuffer &keyPairName, unsigned
|
|
|
else
|
|
|
{
|
|
|
if (!strsame(keyPairName, _keyPairName))
|
|
|
- throwStringExceptionV(0, "Configuration issue - file '%s' is on multiple clusters, keys for file access must match", file.queryLogicalName());
|
|
|
+ throwStringExceptionV(0, "Configuration issue - file '%s' is on multiple clusters, keys for file access must match", fileName);
|
|
|
if (retPort != port)
|
|
|
- throwStringExceptionV(0, "Configuration issue - file '%s' is on multiple clusters, dafilesrv's port for file access must match", file.queryLogicalName());
|
|
|
+ throwStringExceptionV(0, "Configuration issue - file '%s' is on multiple clusters, dafilesrv's port for file access must match", fileName);
|
|
|
if (retSecure != secure)
|
|
|
- throwStringExceptionV(0, "Configuration issue - file '%s' is on multiple clusters, dafilesrv's security setting for file access must match", file.queryLogicalName());
|
|
|
+ throwStringExceptionV(0, "Configuration issue - file '%s' is on multiple clusters, dafilesrv's security setting for file access must match", fileName);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void CWsDfuEx::getFileAccess(IEspContext &context, IUserDescriptor *udesc, SecAccessFlags accessType, IConstDFUFileAccessRequestBase &req, IEspDFUFileAccessInfo &accessInfo)
|
|
|
+static void getJsonTypeInfo(IFileDescriptor &fileDesc, IEspDFUFileAccessInfo &accessInfo)
|
|
|
{
|
|
|
- bool writePermissions = (accessType == SecAccess_Write) || (accessType == SecAccess_Full);
|
|
|
- bool readPermissions = true; // by implication
|
|
|
-
|
|
|
- CDfsLogicalFileName lfn;
|
|
|
- lfn.set(req.getName());
|
|
|
- StringBuffer fileName = lfn.get();
|
|
|
- if (!isEmptyString(req.getCluster()))
|
|
|
- fileName.append("@").append(req.getCluster());
|
|
|
-
|
|
|
- checkLogicalName(fileName, udesc, readPermissions, writePermissions, false, nullptr);
|
|
|
-
|
|
|
- switch (accessType)
|
|
|
- {
|
|
|
- case SecAccess_Access:
|
|
|
- case SecAccess_Read:
|
|
|
- break;
|
|
|
- default:
|
|
|
- {
|
|
|
- // NB - no handling for write/full at moment
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
- Owned<IDistributedFile> df = queryDistributedFileDirectory().lookup(fileName, udesc, false, false, true); // lock super-owners
|
|
|
- if (!df)
|
|
|
- throw MakeStringException(ECLWATCH_FILE_NOT_EXIST,"Cannot find file '%s'.", fileName.str());
|
|
|
-
|
|
|
- Owned<IFileDescriptor> fileDesc = df->getFileDescriptor(req.getCluster());
|
|
|
- CFileAccessRole role = req.getAccessRole();
|
|
|
- switch (role)
|
|
|
- {
|
|
|
- case CFileAccessRole_Token:
|
|
|
- {
|
|
|
- break;
|
|
|
- }
|
|
|
- case CFileAccessRole_Engine:
|
|
|
- {
|
|
|
- /* JCSMORE - for now do nothing
|
|
|
- * Ideally, would get the file tree here and add it to 'metaInfo' tree, i.e. outside of uncrypted secureInfo blob
|
|
|
- * Then client could construct a IDistributeFile from it etc.
|
|
|
- * However, the way the engines and IDistributedFile work at the moment, means that using this info
|
|
|
- * at the client side would require a significant amount of refactoring of the IDistributedFile implementation.
|
|
|
- * Not least because IDF allows updates via IPT -> Dali.
|
|
|
- *
|
|
|
- * So for now don't send anything, and rely on engine fetching the legacy way, i.e. direct from Dali, via lazy fetching etc.
|
|
|
- */
|
|
|
- break;
|
|
|
- }
|
|
|
- case CFileAccessRole_External:
|
|
|
- {
|
|
|
- getFilePartsInfo(context, fileDesc, df->numParts(), false, accessInfo);
|
|
|
- if (req.getReturnJsonTypeInfo() || req.getReturnBinTypeInfo())
|
|
|
- {
|
|
|
- MemoryBuffer binLayout;
|
|
|
- StringBuffer jsonLayout;
|
|
|
- if (!getRecordFormatFromRtlType(binLayout, jsonLayout, df->queryAttributes(), req.getReturnBinTypeInfo(), req.getReturnJsonTypeInfo()))
|
|
|
- getRecordFormatFromECL(binLayout, jsonLayout, df->queryAttributes(), req.getReturnBinTypeInfo(), req.getReturnJsonTypeInfo());
|
|
|
- if (req.getReturnJsonTypeInfo() && jsonLayout.length())
|
|
|
- accessInfo.setRecordTypeInfoJson(jsonLayout.str());
|
|
|
- if (req.getReturnBinTypeInfo() && binLayout.length())
|
|
|
- accessInfo.setRecordTypeInfoBin(binLayout);
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- default:
|
|
|
- throwUnexpected();
|
|
|
- }
|
|
|
-
|
|
|
- StringBuffer keyPairName;
|
|
|
- unsigned port;
|
|
|
- bool secure;
|
|
|
- getFileDafilesrvConfiguration(keyPairName, port, secure, *df);
|
|
|
-
|
|
|
- StringBuffer metaInfo, expiryTime;
|
|
|
- getFileMeta(metaInfo, expiryTime, fileName, fileDesc, udesc, req.getJobId(), keyPairName, req);
|
|
|
- accessInfo.setMetaInfoBlob(metaInfo);
|
|
|
- accessInfo.setExpiryTime(expiryTime);
|
|
|
- accessInfo.setFileAccessPort(port);
|
|
|
- accessInfo.setFileAccessSSL(secure);
|
|
|
-
|
|
|
- StringBuffer userName;
|
|
|
- if (udesc)
|
|
|
- udesc->getUserName(userName);
|
|
|
- LOG(daliAuditLogCat,",FileAccess,EspProcess,READ,%s,%s,%s,jobid=%s,expirySecs=%d", req.getCluster(),
|
|
|
- userName.str(), fileName.str(), req.getJobId(), req.getExpirySeconds());
|
|
|
+ MemoryBuffer binLayout;
|
|
|
+ StringBuffer jsonLayout;
|
|
|
+ if (!getRecordFormatFromRtlType(binLayout, jsonLayout, fileDesc.queryProperties(), false, true))
|
|
|
+ getRecordFormatFromECL(binLayout, jsonLayout, fileDesc.queryProperties(), false, true);
|
|
|
+ if (jsonLayout.length())
|
|
|
+ accessInfo.setRecordTypeInfoJson(jsonLayout.str());
|
|
|
}
|
|
|
|
|
|
-
|
|
|
SecAccessFlags translateToSecAccessFlags(CSecAccessType from)
|
|
|
{
|
|
|
switch (from)
|
|
@@ -6153,32 +6018,100 @@ SecAccessFlags translateToSecAccessFlags(CSecAccessType from)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+void CWsDfuEx::dFUFileAccessCommon(IEspContext &context, const CDfsLogicalFileName &lfn, const char *requestId, unsigned expirySecs, bool returnTextResponse, IEspDFUFileAccessInfo &accessInfo)
|
|
|
+{
|
|
|
+ StringBuffer fileName;
|
|
|
+ lfn.get(fileName, false, true);
|
|
|
+ if (0 == fileName.length())
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "DFU File lookup: No Name defined. (requestId=%s, expirySecs=%u)", requestId, expirySecs);
|
|
|
+
|
|
|
+ StringBuffer userID;
|
|
|
+ context.getUserID(userID);
|
|
|
+
|
|
|
+ Owned<IUserDescriptor> userDesc;
|
|
|
+ if (!userID.isEmpty())
|
|
|
+ {
|
|
|
+ userDesc.setown(createUserDescriptor());
|
|
|
+ userDesc->set(userID.str(), context.queryPassword(), context.querySignature());
|
|
|
+ }
|
|
|
+
|
|
|
+ checkLogicalName(fileName, userDesc, true, false, false, nullptr); // check for read permissions
|
|
|
+
|
|
|
+ Owned<IDistributedFile> df = queryDistributedFileDirectory().lookup(fileName, userDesc, false, false, true); // lock super-owners
|
|
|
+ if (!df)
|
|
|
+ throw MakeStringException(ECLWATCH_FILE_NOT_EXIST,"Cannot find file '%s'.", fileName.str());
|
|
|
+
|
|
|
+ StringBuffer cluster;
|
|
|
+ lfn.getCluster(cluster);
|
|
|
+ Owned<IFileDescriptor> fileDesc = df->getFileDescriptor(cluster);
|
|
|
+
|
|
|
+ // NB: if file has copies on >1 cluster, they must share the same key
|
|
|
+
|
|
|
+ std::vector<std::string> groups;
|
|
|
+ unsigned numClusters = df->numClusters();
|
|
|
+ for (unsigned c=0; c<numClusters; c++)
|
|
|
+ {
|
|
|
+ StringBuffer clusterName;
|
|
|
+ const char *clusterGroup = df->getClusterName(c, clusterName.clear());
|
|
|
+ groups.push_back(clusterGroup);
|
|
|
+ }
|
|
|
+
|
|
|
+ StringBuffer keyPairName;
|
|
|
+ unsigned port;
|
|
|
+ bool secure;
|
|
|
+ getFileDafilesrvConfiguration(keyPairName, port, secure, fileName, groups);
|
|
|
+
|
|
|
+ Owned<IPropertyTree> metaInfo = createDFUFileMetaInfo(fileName, fileDesc, requestId, "READ", expirySecs, userDesc, keyPairName, port, secure, maxFileAccessExpirySeconds);
|
|
|
+ StringBuffer metaInfoBlob;
|
|
|
+ encodeDFUFileMeta(metaInfoBlob, metaInfo, env);
|
|
|
+ accessInfo.setMetaInfoBlob(metaInfoBlob);
|
|
|
+
|
|
|
+ if (returnTextResponse)
|
|
|
+ {
|
|
|
+ getFilePartsInfo(context, *fileDesc, false, accessInfo);
|
|
|
+ getJsonTypeInfo(*fileDesc, accessInfo);
|
|
|
+
|
|
|
+ accessInfo.setExpiryTime(metaInfo->queryProp("expiryTime"));
|
|
|
+ accessInfo.setFileAccessPort(metaInfo->getPropInt("port"));
|
|
|
+ accessInfo.setFileAccessSSL(metaInfo->getPropBool("secure"));
|
|
|
+ }
|
|
|
+
|
|
|
+ LOG(daliAuditLogCat,",FileAccess,EspProcess,READ,%s,%s,%s,jobid=%s,expirySecs=%d", cluster.str(), userID.str(), fileName.str(), requestId, expirySecs);
|
|
|
+}
|
|
|
+
|
|
|
+// NB: deprecated from ver >= 1.50
|
|
|
bool CWsDfuEx::onDFUFileAccess(IEspContext &context, IEspDFUFileAccessRequest &req, IEspDFUFileAccessResponse &resp)
|
|
|
{
|
|
|
try
|
|
|
{
|
|
|
IConstDFUFileAccessRequestBase &requestBase = req.getRequestBase();
|
|
|
- SecAccessFlags accessType = translateToSecAccessFlags(requestBase.getAccessType());
|
|
|
- if (SecAccess_None == accessType)
|
|
|
- {
|
|
|
- context.setAuthStatus(AUTH_STATUS_NOACCESS);
|
|
|
- throw MakeStringException(ECLWATCH_DFU_ACCESS_DENIED, "WsDfu::DFUFileAccess - Permission denied.");
|
|
|
- }
|
|
|
- context.ensureFeatureAccess(FEATURE_URL, accessType, ECLWATCH_DFU_ACCESS_DENIED, "WsDfu::DFUFileAccess: Permission denied.");
|
|
|
|
|
|
- if (isEmptyString(requestBase.getName()))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "No Name defined.");
|
|
|
+ bool returnTextResponse = CFileAccessRole_External == requestBase.getAccessRole();
|
|
|
|
|
|
- StringBuffer userID;
|
|
|
- context.getUserID(userID);
|
|
|
+ CDfsLogicalFileName lfn;
|
|
|
+ lfn.set(requestBase.getName());
|
|
|
+ lfn.setCluster(requestBase.getCluster());
|
|
|
|
|
|
- Owned<IUserDescriptor> userDesc;
|
|
|
- if (!userID.isEmpty())
|
|
|
- {
|
|
|
- userDesc.setown(createUserDescriptor());
|
|
|
- userDesc->set(userID.str(), context.queryPassword(), context.querySignature());
|
|
|
- }
|
|
|
- getFileAccess(context, userDesc, accessType, requestBase, resp.updateAccessInfo());
|
|
|
+ IEspDFUFileAccessInfo &accessInfo = resp.updateAccessInfo();
|
|
|
+ dFUFileAccessCommon(context, lfn, requestBase.getJobId(), requestBase.getExpirySeconds(), returnTextResponse, accessInfo);
|
|
|
+ }
|
|
|
+ catch (IException *e)
|
|
|
+ {
|
|
|
+ FORWARDEXCEPTION(context, e, ECLWATCH_INTERNAL_ERROR);
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+bool CWsDfuEx::onDFUFileAccessV2(IEspContext &context, IEspDFUFileAccessV2Request &req, IEspDFUFileAccessResponse &resp)
|
|
|
+{
|
|
|
+ try
|
|
|
+ {
|
|
|
+ CDfsLogicalFileName lfn;
|
|
|
+ lfn.set(req.getName());
|
|
|
+ lfn.setCluster(req.getCluster());
|
|
|
+
|
|
|
+ IEspDFUFileAccessInfo &accessInfo = resp.updateAccessInfo();
|
|
|
+ dFUFileAccessCommon(context, lfn, req.getRequestId(), req.getExpirySeconds(), req.getReturnTextResponse(), accessInfo);
|
|
|
}
|
|
|
catch (IException *e)
|
|
|
{
|
|
@@ -6187,14 +6120,15 @@ bool CWsDfuEx::onDFUFileAccess(IEspContext &context, IEspDFUFileAccessRequest &r
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-IGroup *CWsDfuEx::getDFUFileIGroup(const char *clusterName, ClusterType clusterType, const char *clusterTypeEx, StringArray &locations, StringBuffer &groupName)
|
|
|
+// NB: deprecated from ver >= 1.50
|
|
|
+static IGroup *getDFUFileIGroup(const char *clusterName, ClusterType clusterType, const char *clusterTypeEx, StringArray &locations, StringBuffer &groupName)
|
|
|
{
|
|
|
GroupType groupType;
|
|
|
StringBuffer basedir;
|
|
|
getClusterGroupName(groupName, clusterName);
|
|
|
Owned<IGroup> groupFound = queryNamedGroupStore().lookup(groupName.str(), basedir, groupType);
|
|
|
if (!groupFound)
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Failed to get Group for Group %s.", groupName.str());
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "getDFUFileIGroup: Failed to get Group for Group %s.", groupName.str());
|
|
|
|
|
|
if (!locations.ordinality())
|
|
|
return groupFound.getClear();
|
|
@@ -6205,7 +6139,7 @@ IGroup *CWsDfuEx::getDFUFileIGroup(const char *clusterName, ClusterType clusterT
|
|
|
{
|
|
|
SocketEndpoint ep(locations.item(i));
|
|
|
if (ep.isNull())
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Invalid location '%s'.", locations.item(i));
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "getDFUFileIGroup: Invalid location '%s'.", locations.item(i));
|
|
|
epa.append(ep);
|
|
|
locationStr.append(locations.item(i)).append(";");
|
|
|
}
|
|
@@ -6243,7 +6177,7 @@ IGroup *CWsDfuEx::getDFUFileIGroup(const char *clusterName, ClusterType clusterT
|
|
|
{
|
|
|
StringBuffer defaultDir;
|
|
|
if (!getConfigurationDirectory(nullptr, ConfigurationDirectoryForDataCategory, clusterTypeEx, groupName.str(), defaultDir))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Failed to get ConfigurationDirectory: %s.", groupName.str());
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "getDFUFileIGroup: Failed to get ConfigurationDirectory: %s.", groupName.str());
|
|
|
|
|
|
GroupType grpType = (clusterType == ThorLCRCluster) ? grp_thor : ((clusterType == HThorCluster) ? grp_hthor : grp_roxie);
|
|
|
queryNamedGroupStore().add(groupName.str(), group, false, defaultDir.str(), grpType);
|
|
@@ -6259,128 +6193,210 @@ void CWsDfuEx::exportRecordDefinitionBinaryType(const char *recordDefinition, Me
|
|
|
if (errs.errCount() > 0)
|
|
|
{
|
|
|
StringBuffer errorMsg;
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Failed in parsing ECL %s: %s.", recordDefinition, errs.toString(errorMsg).str());
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "exportRecordDefinitionBinaryType: Failed in parsing ECL %s: %s.", recordDefinition, errs.toString(errorMsg).str());
|
|
|
}
|
|
|
if (!expr)
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Failed in parsing ECL: %s.", recordDefinition);
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "exportRecordDefinitionBinaryType: Failed in parsing ECL: %s.", recordDefinition);
|
|
|
|
|
|
if (!exportBinaryType(layoutBin, expr, false))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Failed in exportBinaryType.");
|
|
|
-}
|
|
|
-
|
|
|
-void CWsDfuEx::getFileAccessBeforePublish(IEspContext &context, const char *fileName, const char *cluster,
|
|
|
- const char *jobId, MemoryBuffer& layoutBin, IFileDescriptor *fileDesc, IUserDescriptor *udesc,
|
|
|
- IConstDFUFileAccessRequestBase &req, IEspDFUFileCreateResponse &resp)
|
|
|
-{
|
|
|
- IEspDFUFileAccessInfo &accessInfo = resp.updateAccessInfo();
|
|
|
- getFilePartsInfo(context, fileDesc, fileDesc->numParts(), true, accessInfo);
|
|
|
-
|
|
|
- StringBuffer keyPairName;
|
|
|
- unsigned port;
|
|
|
- bool secure;
|
|
|
- getFileDafilesrvConfiguration(keyPairName, port, secure, cluster);
|
|
|
-
|
|
|
- StringBuffer metaInfo, expiryTime;
|
|
|
- getFileMeta(metaInfo, expiryTime, fileName, fileDesc, udesc, jobId, keyPairName, req);
|
|
|
- accessInfo.setMetaInfoBlob(metaInfo);
|
|
|
- accessInfo.setExpiryTime(expiryTime);
|
|
|
-
|
|
|
- if (layoutBin.length() == 0)
|
|
|
- {
|
|
|
- if (!req.getReturnJsonTypeInfo() && !req.getReturnBinTypeInfo())
|
|
|
- return;
|
|
|
- resp.setWarning("RecordTypeInfoBin or RecordTypeInfoJson requested, but no type info found from ECLRecordDefinition");
|
|
|
- }
|
|
|
-
|
|
|
- if (req.getReturnJsonTypeInfo())
|
|
|
- {
|
|
|
- StringBuffer jsonLayout;
|
|
|
- Owned<IRtlFieldTypeDeserializer> deserializer(createRtlFieldTypeDeserializer());
|
|
|
- const RtlTypeInfo *typeInfo = deserializer->deserialize(layoutBin);
|
|
|
- dumpTypeInfo(jsonLayout, typeInfo);
|
|
|
- if (jsonLayout.length())
|
|
|
- accessInfo.setRecordTypeInfoJson(jsonLayout.str());
|
|
|
- }
|
|
|
- if (req.getReturnBinTypeInfo())
|
|
|
- {
|
|
|
- MemoryBuffer binLayout;
|
|
|
- layoutBin.swapWith(binLayout);
|
|
|
- if (binLayout.length())
|
|
|
- accessInfo.setRecordTypeInfoBin(binLayout);
|
|
|
- }
|
|
|
+ throw MakeStringException(ECLWATCH_INVALID_INPUT, "exportRecordDefinitionBinaryType: Failed in exportBinaryType.");
|
|
|
}
|
|
|
|
|
|
+// NB: deprecated from ver >= 1.50
|
|
|
bool CWsDfuEx::onDFUFileCreate(IEspContext &context, IEspDFUFileCreateRequest &req, IEspDFUFileCreateResponse &resp)
|
|
|
{
|
|
|
try
|
|
|
{
|
|
|
IConstDFUFileAccessRequestBase &requestBase = req.getRequestBase();
|
|
|
- SecAccessFlags accessType = translateToSecAccessFlags(requestBase.getAccessType());
|
|
|
- if (SecAccess_None == accessType)
|
|
|
- {
|
|
|
- context.setAuthStatus(AUTH_STATUS_NOACCESS);
|
|
|
- throw MakeStringException(ECLWATCH_DFU_ACCESS_DENIED, "WsDfu::DFUFileCreate - Permission denied.");
|
|
|
- }
|
|
|
- context.ensureFeatureAccess(FEATURE_URL, accessType, ECLWATCH_DFU_ACCESS_DENIED, "WsDfu::DFUFileCreate: Permission denied.");
|
|
|
-
|
|
|
const char *fileName = requestBase.getName();
|
|
|
const char *clusterName = requestBase.getCluster();
|
|
|
const char *recordDefinition = req.getECLRecordDefinition();
|
|
|
+ StringBuffer requestId = requestBase.getJobId();
|
|
|
+ unsigned expirySecs = requestBase.getExpirySeconds();
|
|
|
+ bool returnTextResponse = true;
|
|
|
+
|
|
|
if (isEmptyString(fileName))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "No Name defined.");
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreate: No Name defined.");
|
|
|
if (isEmptyString(clusterName))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "No Cluster defined.");
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreate: No Cluster defined.");
|
|
|
if (isEmptyString(recordDefinition))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "No ECLRecordDefinition defined.");
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreate: No ECLRecordDefinition defined.");
|
|
|
+
|
|
|
+ StringBuffer userId;
|
|
|
+ context.getUserID(userId);
|
|
|
+ Owned<IUserDescriptor> userDesc;
|
|
|
+ if (!userId.isEmpty())
|
|
|
+ {
|
|
|
+ userDesc.setown(createUserDescriptor());
|
|
|
+ userDesc->set(userId.str(), context.queryPassword(), context.querySignature());
|
|
|
+ }
|
|
|
|
|
|
ClusterType clusterType = getClusterTypeByClusterName(clusterName);
|
|
|
if (clusterType == NoCluster)
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Cluster %s not found.", clusterName);
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreate: Cluster %s not found.", clusterName);
|
|
|
|
|
|
- const char *clusterTypeEx = clusterTypeString(clusterType, false);
|
|
|
+ CDfsLogicalFileName lfn;
|
|
|
+ lfn.set(fileName);
|
|
|
+ StringBuffer normalizedFileName = lfn.get();
|
|
|
|
|
|
+ checkLogicalName(normalizedFileName, userDesc, false, true, false, nullptr);
|
|
|
+
|
|
|
+ const char *clusterTypeEx = clusterTypeString(clusterType, false);
|
|
|
StringBuffer groupName;
|
|
|
Owned<IGroup> group = getDFUFileIGroup(clusterName, clusterType, clusterTypeEx, req.getPartLocations(), groupName);
|
|
|
|
|
|
+ StringBuffer tempFileName = normalizedFileName;
|
|
|
+ tempFileName.append(DFUFileCreate_FileNamePostfix);
|
|
|
+
|
|
|
+ //create FileId
|
|
|
+ StringBuffer fileId;
|
|
|
+ fileId.set(groupName.str()).append(DFUFileIdSeparator).append(clusterName).append(DFUFileIdSeparator).append(tempFileName.str());
|
|
|
+ resp.setFileId(fileId.str());
|
|
|
+
|
|
|
+ if (requestId.isEmpty())
|
|
|
+ requestId.appendf("Create %s on %s", normalizedFileName.str(), clusterName);
|
|
|
+
|
|
|
+ Owned<IFileDescriptor> fileDesc = createFileDescriptor(tempFileName, clusterTypeString(clusterType, false), groupName, group);
|
|
|
+ fileDesc->queryProperties().setProp("@job", requestId);
|
|
|
+ if (!userId.isEmpty())
|
|
|
+ fileDesc->queryProperties().setProp("@owner", userId);
|
|
|
+
|
|
|
MemoryBuffer layoutBin;
|
|
|
exportRecordDefinitionBinaryType(recordDefinition, layoutBin);
|
|
|
+ if (0 == layoutBin.length())
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_ECLRECDEF, "DFUFileCreate: Failed to parse ECL record definition: %s.", recordDefinition);
|
|
|
+ fileDesc->queryProperties().setPropBin("_rtlType", layoutBin.length(), layoutBin.toByteArray());
|
|
|
+ fileDesc->queryProperties().setProp("ECL", recordDefinition);
|
|
|
+
|
|
|
+ // NB: if file has copies on >1 cluster, they must share the same key
|
|
|
+ StringBuffer keyPairName;
|
|
|
+ unsigned port;
|
|
|
+ bool secure;
|
|
|
+
|
|
|
+ std::vector<std::string> groups;
|
|
|
+ groups.push_back(groupName.str());
|
|
|
+ getFileDafilesrvConfiguration(keyPairName, port, secure, normalizedFileName, groups);
|
|
|
+
|
|
|
+ Owned<IPropertyTree> metaInfo = createDFUFileMetaInfo(tempFileName, fileDesc, requestId, "WRITE", expirySecs, userDesc, keyPairName, port, secure, maxFileAccessExpirySeconds);
|
|
|
+ metaInfo->setProp("clusterName", clusterName);
|
|
|
+
|
|
|
+ StringBuffer metaInfoBlob;
|
|
|
+ encodeDFUFileMeta(metaInfoBlob, metaInfo, env);
|
|
|
+
|
|
|
+ IEspDFUFileAccessInfo &accessInfo = resp.updateAccessInfo();
|
|
|
+ accessInfo.setMetaInfoBlob(metaInfoBlob);
|
|
|
+
|
|
|
+ getFilePartsInfo(context, *fileDesc, true, accessInfo);
|
|
|
+ getJsonTypeInfo(*fileDesc, accessInfo);
|
|
|
+
|
|
|
+ accessInfo.setExpiryTime(metaInfo->queryProp("expiryTime"));
|
|
|
+ accessInfo.setFileAccessPort(metaInfo->getPropInt("port"));
|
|
|
+ accessInfo.setFileAccessSSL(metaInfo->getPropBool("secure"));
|
|
|
+ }
|
|
|
+ catch (IException *e)
|
|
|
+ {
|
|
|
+ FORWARDEXCEPTION(context, e, ECLWATCH_INTERNAL_ERROR);
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+bool CWsDfuEx::onDFUFileCreateV2(IEspContext &context, IEspDFUFileCreateV2Request &req, IEspDFUFileCreateResponse &resp)
|
|
|
+{
|
|
|
+ try
|
|
|
+ {
|
|
|
+ const char *fileName = req.getName();
|
|
|
+ const char *clusterName = req.getCluster();
|
|
|
+ const char *recordDefinition = req.getECLRecordDefinition();
|
|
|
+ unsigned expirySecs = req.getExpirySeconds();
|
|
|
+ StringBuffer requestId = req.getRequestId();
|
|
|
+ bool returnTextResponse = req.getReturnTextResponse();
|
|
|
+
|
|
|
+ if (isEmptyString(fileName))
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: No Name defined.");
|
|
|
+ if (isEmptyString(clusterName))
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: No Cluster defined.");
|
|
|
+ if (isEmptyString(recordDefinition))
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: No ECLRecordDefinition defined.");
|
|
|
|
|
|
StringBuffer userId;
|
|
|
- Owned<IUserDescriptor> userDesc;
|
|
|
context.getUserID(userId);
|
|
|
+ Owned<IUserDescriptor> userDesc;
|
|
|
if (!userId.isEmpty())
|
|
|
{
|
|
|
userDesc.setown(createUserDescriptor());
|
|
|
userDesc->set(userId.str(), context.queryPassword(), context.querySignature());
|
|
|
}
|
|
|
|
|
|
+ ClusterType clusterType = getClusterTypeByClusterName(clusterName);
|
|
|
+ if (clusterType == NoCluster)
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: Cluster %s not found.", clusterName);
|
|
|
+
|
|
|
+ StringBuffer groupName;
|
|
|
+ getClusterGroupName(groupName, clusterName);
|
|
|
+ if (isEmptyString(groupName))
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: Group for cluster %s not found.", clusterName);
|
|
|
+ Owned<IGroup> group = queryNamedGroupStore().lookup(groupName.str());
|
|
|
+ if (!group)
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFileCreateV2: Failed to get Group %s.", groupName.str());
|
|
|
+
|
|
|
CDfsLogicalFileName lfn;
|
|
|
lfn.set(fileName);
|
|
|
- StringBuffer tempFileName = lfn.get();
|
|
|
+ StringBuffer normalizedFileName = lfn.get();
|
|
|
+
|
|
|
+ checkLogicalName(normalizedFileName, userDesc, false, true, false, nullptr);
|
|
|
+
|
|
|
+ StringBuffer tempFileName = normalizedFileName;
|
|
|
+ tempFileName.append(".").append(dfuCreateUniqId++); // avoid potential clash if >1 creating file. One will succeed at publish time.
|
|
|
tempFileName.append(DFUFileCreate_FileNamePostfix);
|
|
|
|
|
|
- StringBuffer jobId = requestBase.getJobId();
|
|
|
- if (jobId.isEmpty())
|
|
|
- jobId.appendf("Create %s on %s", tempFileName.str(), clusterName);
|
|
|
+ //create FileId
|
|
|
+ StringBuffer fileId;
|
|
|
+ fileId.set(groupName).append(DFUFileIdSeparator).append(clusterName).append(DFUFileIdSeparator).append(tempFileName);
|
|
|
+ resp.setFileId(fileId.str());
|
|
|
|
|
|
- Owned<IFileDescriptor> fileDesc = createFileDescriptor(tempFileName, clusterTypeEx, groupName, group);
|
|
|
- fileDesc->queryProperties().setProp("@job", jobId);
|
|
|
+ if (requestId.isEmpty())
|
|
|
+ requestId.appendf("Create %s on %s", normalizedFileName.str(), clusterName);
|
|
|
+
|
|
|
+ Owned<IFileDescriptor> fileDesc = createFileDescriptor(tempFileName, clusterTypeString(clusterType, false), groupName, group);
|
|
|
+ fileDesc->queryProperties().setProp("@job", requestId);
|
|
|
if (!userId.isEmpty())
|
|
|
fileDesc->queryProperties().setProp("@owner", userId);
|
|
|
+ fileDesc->queryProperties().setProp("ECL", recordDefinition);
|
|
|
|
|
|
- Owned<IDistributedFile> file = queryDistributedFileDirectory().createNew(fileDesc);
|
|
|
- file->setAccessed();
|
|
|
- file->setECL(recordDefinition);
|
|
|
- file->queryAttributes().setPropBin("_rtlType", layoutBin.length(), layoutBin.toByteArray());
|
|
|
- file->queryAttributes().setPropInt64("@recordCount", 0);
|
|
|
- file->queryAttributes().setPropInt64("@size", 0);
|
|
|
+ MemoryBuffer layoutBin;
|
|
|
+ exportRecordDefinitionBinaryType(recordDefinition, layoutBin);
|
|
|
+ if (0 == layoutBin.length())
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_ECLRECDEF, "DFUFileCreateV2: Failed to parse ECL record definition: %s.", recordDefinition);
|
|
|
+ fileDesc->queryProperties().setPropBin("_rtlType", layoutBin.length(), layoutBin.toByteArray());
|
|
|
+ if (req.getCompressed())
|
|
|
+ fileDesc->queryProperties().setPropBool("@blockCompressed", true);
|
|
|
+
|
|
|
+ // NB: if file has copies on >1 cluster, they must share the same key
|
|
|
+ StringBuffer keyPairName;
|
|
|
+ unsigned port;
|
|
|
+ bool secure;
|
|
|
|
|
|
- getFileAccessBeforePublish(context, tempFileName, clusterName, jobId, layoutBin, fileDesc, userDesc, requestBase, resp);
|
|
|
+ std::vector<std::string> groups;
|
|
|
+ groups.push_back(groupName.str());
|
|
|
+ getFileDafilesrvConfiguration(keyPairName, port, secure, normalizedFileName, groups);
|
|
|
|
|
|
- //create FileId
|
|
|
- StringBuffer fileID;
|
|
|
- fileID.set(groupName.str()).append(DFUFileIdSeparator).append(clusterName).append(DFUFileIdSeparator).append(tempFileName.str());
|
|
|
- resp.setFileId(fileID.str());
|
|
|
+ Owned<IPropertyTree> metaInfo = createDFUFileMetaInfo(tempFileName, fileDesc, requestId, "WRITE", expirySecs, userDesc, keyPairName, port, secure, maxFileAccessExpirySeconds);
|
|
|
+ metaInfo->setProp("clusterName", clusterName);
|
|
|
+
|
|
|
+ StringBuffer metaInfoBlob;
|
|
|
+ encodeDFUFileMeta(metaInfoBlob, metaInfo, env);
|
|
|
+
|
|
|
+ IEspDFUFileAccessInfo &accessInfo = resp.updateAccessInfo();
|
|
|
+ accessInfo.setMetaInfoBlob(metaInfoBlob);
|
|
|
+ if (returnTextResponse)
|
|
|
+ {
|
|
|
+ getFilePartsInfo(context, *fileDesc, true, accessInfo);
|
|
|
+ getJsonTypeInfo(*fileDesc, accessInfo);
|
|
|
+
|
|
|
+ accessInfo.setExpiryTime(metaInfo->queryProp("expiryTime"));
|
|
|
+ accessInfo.setFileAccessPort(metaInfo->getPropInt("port"));
|
|
|
+ accessInfo.setFileAccessSSL(metaInfo->getPropBool("secure"));
|
|
|
+ }
|
|
|
}
|
|
|
catch (IException *e)
|
|
|
{
|
|
@@ -6391,55 +6407,97 @@ bool CWsDfuEx::onDFUFileCreate(IEspContext &context, IEspDFUFileCreateRequest &r
|
|
|
|
|
|
bool CWsDfuEx::onDFUFilePublish(IEspContext &context, IEspDFUFilePublishRequest &req, IEspDFUFilePublishResponse &resp)
|
|
|
{
|
|
|
+ Owned<IException> exception;
|
|
|
+ Owned<IDistributedFile> newFile;
|
|
|
+ Owned<IFileDescriptor> fileDesc;
|
|
|
+ StringBuffer normalizeTempFileName;
|
|
|
try
|
|
|
{
|
|
|
- context.ensureFeatureAccess(FEATURE_URL, SecAccess_Write, ECLWATCH_DFU_ACCESS_DENIED, "WsDfu::DFUFilePublish: Permission denied.");
|
|
|
-
|
|
|
const char *fileId = req.getFileId();
|
|
|
- const char *recordDefinition = req.getECLRecordDefinition();
|
|
|
if (isEmptyString(fileId))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "No FileId defined.");
|
|
|
- if (isEmptyString(recordDefinition))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "No ECLRecordDefinition defined.");
|
|
|
+ throw makeStringException(ECLWATCH_INVALID_INPUT, "DFUFilePublish: No FileId defined.");
|
|
|
+
|
|
|
+ const char *recordDefinition = req.getECLRecordDefinition();
|
|
|
|
|
|
StringArray fileIdItems;
|
|
|
fileIdItems.appendList(fileId, DFUFileIdSeparator);
|
|
|
- if (fileIdItems.ordinality() < 3)
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Invalid FileId '%s'.", fileId);
|
|
|
-
|
|
|
const char *groupName = fileIdItems.item(0);
|
|
|
const char *clusterName = fileIdItems.item(1);
|
|
|
const char *tempFileName = fileIdItems.item(2);
|
|
|
if (isEmptyString(groupName))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Invalid FileId: empty groupName.");
|
|
|
+ throw makeStringException(ECLWATCH_INVALID_INPUT, "DFUFilePublish: Invalid FileId: empty groupName.");
|
|
|
if (isEmptyString(clusterName))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Invalid FileId: empty clusterName.");
|
|
|
+ throw makeStringException(ECLWATCH_INVALID_INPUT, "DFUFilePublish: Invalid FileId: empty clusterName.");
|
|
|
if (isEmptyString(tempFileName))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Invalid FileId: empty FileName.");
|
|
|
+ throw makeStringException(ECLWATCH_INVALID_INPUT, "DFUFilePublish: Invalid FileId: empty FileName.");
|
|
|
|
|
|
- StringBuffer userId, newFileName;
|
|
|
CDfsLogicalFileName lfn;
|
|
|
lfn.set(tempFileName);
|
|
|
- newFileName.set(lfn.get());
|
|
|
- if (newFileName.length() <= strlen(DFUFileCreate_FileNamePostfix))
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Invalid FileId: cannot read FileName from %s.", tempFileName);
|
|
|
- newFileName.setLength(newFileName.length() - strlen(DFUFileCreate_FileNamePostfix)); //remove DFUFileCreate_FileNamePostfix
|
|
|
+ normalizeTempFileName.set(lfn.get());
|
|
|
|
|
|
- ClusterType clusterType = getClusterTypeByClusterName(clusterName);
|
|
|
- if (clusterType == NoCluster)
|
|
|
- throw MakeStringException(ECLWATCH_INVALID_INPUT, "Cluster %s not found.", clusterName);
|
|
|
+ size32_t postFixLen = strlen(DFUFileCreate_FileNamePostfix);
|
|
|
+ if (normalizeTempFileName.length() <= postFixLen)
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFilePublish: Invalid temporary filename: %s.", fileId);
|
|
|
|
|
|
- const char *clusterTypeEx = clusterTypeString(clusterType, false);
|
|
|
+ const MemoryBuffer &fDescBlob = req.getFileDescriptorBlob();
|
|
|
+ if (fDescBlob.length())
|
|
|
+ {
|
|
|
+ MemoryBuffer mb;
|
|
|
+ mb.setBuffer(fDescBlob.length(), (void *)fDescBlob.toByteArray());
|
|
|
+ fileDesc.setown(deserializeFileDescriptor(mb));
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ if (isEmptyString(recordDefinition))
|
|
|
+ throw makeStringException(ECLWATCH_INVALID_INPUT, "DFUFilePublish: No ECLRecordDefinition defined.");
|
|
|
|
|
|
- GroupType groupType;
|
|
|
- StringBuffer basedir;
|
|
|
- Owned<IGroup> group = queryNamedGroupStore().lookup(groupName, basedir, groupType);
|
|
|
- if (!group)
|
|
|
- throw MakeStringException(ECLWATCH_FILE_NOT_EXIST, "Failed to find group %s.", groupName);
|
|
|
+ ClusterType clusterType = getClusterTypeByClusterName(clusterName);
|
|
|
+ const char *clusterTypeEx = clusterTypeString(clusterType, false);
|
|
|
+ GroupType groupType;
|
|
|
+ StringBuffer basedir;
|
|
|
+ Owned<IGroup> group = queryNamedGroupStore().lookup(groupName, basedir, groupType);
|
|
|
+ if (!group)
|
|
|
+ throw makeStringExceptionV(ECLWATCH_FILE_NOT_EXIST, "DFUFilePublish: Failed to find group %s.", groupName);
|
|
|
|
|
|
- MemoryBuffer layoutBin;
|
|
|
- exportRecordDefinitionBinaryType(recordDefinition, layoutBin);
|
|
|
+ fileDesc.setown(createFileDescriptor(normalizeTempFileName, clusterTypeEx, groupName, group));
|
|
|
+ }
|
|
|
|
|
|
+ StringBuffer newFileName = normalizeTempFileName;
|
|
|
+ const char *start = newFileName;
|
|
|
+ const char *pos = start + (newFileName.length() - postFixLen);
|
|
|
+ const char *startPos = pos;
|
|
|
+ if (!streq(pos, DFUFileCreate_FileNamePostfix))
|
|
|
+ throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DFUFilePublish: Invalid temporary filename: %s.", fileId);
|
|
|
+ while (true)
|
|
|
+ {
|
|
|
+ --pos;
|
|
|
+ if (pos == start)
|
|
|
+ {
|
|
|
+ // old style ( <= 1.39 ), without unique id
|
|
|
+ newFileName.setLength(startPos-start);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ else if ('.' == *pos)
|
|
|
+ {
|
|
|
+ newFileName.setLength(pos-start);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ newFileName.setLength(pos-start);
|
|
|
+
|
|
|
+ if (!isEmptyString(recordDefinition))
|
|
|
+ {
|
|
|
+ MemoryBuffer layoutBin;
|
|
|
+ exportRecordDefinitionBinaryType(recordDefinition, layoutBin);
|
|
|
+ fileDesc->queryProperties().setPropBin("_rtlType", layoutBin.length(), layoutBin.toByteArray());
|
|
|
+ fileDesc->queryProperties().setProp("ECL", recordDefinition);
|
|
|
+ }
|
|
|
+ if (!req.getRecordCount_isNull())
|
|
|
+ fileDesc->queryProperties().setPropInt64("@recordCount", req.getRecordCount());
|
|
|
+ if (!req.getFileSize_isNull())
|
|
|
+ fileDesc->queryProperties().setPropInt64("@size", req.getFileSize());
|
|
|
+
|
|
|
+ StringBuffer userId;
|
|
|
Owned<IUserDescriptor> userDesc;
|
|
|
context.getUserID(userId);
|
|
|
if (!userId.isEmpty())
|
|
@@ -6447,37 +6505,49 @@ bool CWsDfuEx::onDFUFilePublish(IEspContext &context, IEspDFUFilePublishRequest
|
|
|
userDesc.setown(createUserDescriptor());
|
|
|
userDesc->set(userId.str(), context.queryPassword(), context.querySignature());
|
|
|
}
|
|
|
+ Owned<IDistributedFile> df = queryDistributedFileDirectory().lookup(newFileName, userDesc, false, false, true);
|
|
|
+ if (df)
|
|
|
+ {
|
|
|
+ if (!req.getOverwrite())
|
|
|
+ throw makeStringExceptionV(ECLWATCH_FILE_ALREADY_EXISTS, "DFUFilePublish: File already exists (%s) and overwrite not specified.", newFileName.str());
|
|
|
+ df->detach(30000);
|
|
|
+ }
|
|
|
|
|
|
- VStringBuffer jobId("Publish %s on %s", newFileName.str(), clusterName);
|
|
|
- Owned<IFileDescriptor> fileDesc = createFileDescriptor(tempFileName, clusterTypeEx, groupName, group);
|
|
|
- Owned<IDistributedFile> oldFile = queryDistributedFileDirectory().createNew(fileDesc);
|
|
|
- oldFile->validate();
|
|
|
-
|
|
|
- if (!oldFile->renamePhysicalPartFiles(newFileName.str(), nullptr, nullptr, nullptr))
|
|
|
- throw MakeStringException(ECLWATCH_FILE_NOT_EXIST, "Failed in renamePhysicalPartFiles %s.", newFileName.str());
|
|
|
+ newFile.setown(queryDistributedFileDirectory().createNew(fileDesc));
|
|
|
+ newFile->validate();
|
|
|
+ newFile->setAccessed();
|
|
|
+ newFile->attach(normalizeTempFileName, userDesc);
|
|
|
|
|
|
- Owned<IFileDescriptor> newFileDesc = createFileDescriptor(newFileName, clusterTypeEx, groupName, group);
|
|
|
- newFileDesc->queryProperties().setProp("@job", jobId);
|
|
|
- if (!userId.isEmpty())
|
|
|
- newFileDesc->queryProperties().setProp("@owner", userId);
|
|
|
+ if (!newFile->renamePhysicalPartFiles(newFileName, nullptr, nullptr))//, fileDesc->queryDefaultDir()))
|
|
|
+ throw makeStringExceptionV(ECLWATCH_FILE_NOT_EXIST, "DFUFilePublish: Failed in renamePhysicalPartFiles %s.", newFileName.str());
|
|
|
|
|
|
- Owned<IDistributedFile> newFile = queryDistributedFileDirectory().createNew(newFileDesc);
|
|
|
- newFile->setAccessed();
|
|
|
- newFile->setECL(recordDefinition);
|
|
|
- newFile->queryAttributes().setPropBin("_rtlType", layoutBin.length(), layoutBin.toByteArray());
|
|
|
- if (!req.getRecordCount_isNull())
|
|
|
- newFile->queryAttributes().setPropInt64("@recordCount", req.getRecordCount());
|
|
|
- if (!req.getFileSize_isNull())
|
|
|
- newFile->queryAttributes().setPropInt64("@size", req.getFileSize());
|
|
|
- newFile->attach(newFileName.str(), userDesc);
|
|
|
+ newFile->rename(newFileName, userDesc);
|
|
|
|
|
|
LOG(daliAuditLogCat,",FileAccess,EspProcess,CREATED,%s,%s,%s", groupName, userId.str(), newFileName.str());
|
|
|
}
|
|
|
catch (IException *e)
|
|
|
{
|
|
|
- FORWARDEXCEPTION(context, e, ECLWATCH_INTERNAL_ERROR);
|
|
|
+ exception.setown(e);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (exception)
|
|
|
+ {
|
|
|
+ if (fileDesc)
|
|
|
+ {
|
|
|
+ Owned<IMultiException> exceptions = MakeMultiException("CWsDfuEx::onDFUFilePublish");
|
|
|
+ queryDistributedFileDirectory().removePhysicalPartFiles(normalizeTempFileName, fileDesc, exceptions);
|
|
|
+ if (exceptions->ordinality())
|
|
|
+ {
|
|
|
+ StringBuffer errMsg("Error whilst clearing up temporary file: ");
|
|
|
+ EXCLOG(exceptions, errMsg.append(normalizeTempFileName).str());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (newFile)
|
|
|
+ newFile->detach(30000);
|
|
|
+ FORWARDEXCEPTION(context, exception.getClear(), ECLWATCH_INTERNAL_ERROR);
|
|
|
}
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
//////////////////////HPCC Browser//////////////////////////
|