diff --git a/s3/libirods_s3.cpp b/s3/libirods_s3.cpp index 2f1c0802..ac715f75 100644 --- a/s3/libirods_s3.cpp +++ b/s3/libirods_s3.cpp @@ -137,6 +137,18 @@ int get_booleans_from_host_mode(const std::string& host_mode_str, return 0; } +// gets the resource name from the property map +std::string get_resource_name(irods::plugin_property_map& _prop_map) { + + std::string resc_property_name_str; + irods::error ret = _prop_map.get(irods::RESOURCE_NAME, resc_property_name_str); + if (ret.ok()) { + return resc_property_name_str; + } else { + return ""; + } +} + void get_modes_from_properties(irods::plugin_property_map& _prop_map, bool& attached_mode, bool& cacheless_mode) { @@ -149,9 +161,7 @@ void get_modes_from_properties(irods::plugin_property_map& _prop_map, if( ret.ok() ) { if ( get_booleans_from_host_mode(host_mode_str, attached_mode, cacheless_mode) < 0 ) { - - rodsLog(LOG_ERROR, "Invalid HOST_MODE for S3 plugin [%s]. Setting to default - archive_attached.", - host_mode_str.c_str()); + rodsLog(LOG_ERROR, "[resource_name=%s] Invalid HOST_MODE for S3 plugin [%s]. Setting to default - archive_attached.", get_resource_name(_prop_map).c_str(), host_mode_str.c_str()); _prop_map.set(host_mode, "archive_attached"); } @@ -191,7 +201,7 @@ static unsigned long long usNow() { // Return a malloc()'d C string containing the ASCII MD5 signature of the file // from start through length bytes, using pread to not affect file pointers. // The returned string needs to be free()d by the caller -static char *s3CalcMD5( int fd, off_t start, off_t length ) +static char *s3CalcMD5( int fd, off_t start, off_t length, const std::string& resource_name ) { char *buff; // Temp buff to do MD5 calc on unsigned char md5_bin[MD5_DIGEST_LENGTH]; @@ -200,7 +210,7 @@ static char *s3CalcMD5( int fd, off_t start, off_t length ) buff = (char *)malloc( 1024*1024 ); // 1MB chunk reads if ( buff == NULL ) { - rodsLog( LOG_ERROR, "Out of memory in S3 MD5 calculation, MD5 checksum will NOT be used for upload." ); + rodsLog( LOG_ERROR, "[resource_name=%s] Out of memory in S3 MD5 calculation, MD5 checksum will NOT be used for upload.", resource_name.c_str() ); return NULL; } @@ -208,7 +218,7 @@ static char *s3CalcMD5( int fd, off_t start, off_t length ) for ( read=0; (read + 1024*1024) < length; read += 1024*1024 ) { long ret = pread( fd, buff, 1024*1024, start ); if ( ret != 1024*1024 ) { - rodsLog( LOG_ERROR, "Error during MD5 pread of file, checksum will NOT be used for upload." ); + rodsLog( LOG_ERROR, "[resource_name=%s] Error during MD5 pread of file, checksum will NOT be used for upload.", resource_name.c_str() ); free( buff ); return NULL; } @@ -218,7 +228,7 @@ static char *s3CalcMD5( int fd, off_t start, off_t length ) // Partial read for the last bit long ret = pread( fd, buff, length-read, start ); if ( ret != length-read ) { - rodsLog( LOG_ERROR, "Error during MD5 pread of file, checksum will NOT be used for upload." ); + rodsLog( LOG_ERROR, "[resource_name=%s] Error during MD5 pread of file, checksum will NOT be used for upload.", resource_name.c_str() ); free( buff ); return NULL; } @@ -233,21 +243,21 @@ static char *s3CalcMD5( int fd, off_t start, off_t length ) b64 = BIO_new(BIO_f_base64()); bmem = BIO_new(BIO_s_mem()); if ( (b64 == NULL) || (bmem == NULL) ) { - rodsLog( LOG_ERROR, "Error during Base64 allocation, checksum will NOT be used for upload." ); + rodsLog( LOG_ERROR, "[resource_name=%s] Error during Base64 allocation, checksum will NOT be used for upload.", resource_name.c_str() ); return NULL; } b64 = BIO_push(b64, bmem); BIO_write(b64, md5_bin, MD5_DIGEST_LENGTH); if (BIO_flush(b64) != 1) { - rodsLog( LOG_ERROR, "Error during Base64 computation, checksum will NOT be used for upload." ); + rodsLog( LOG_ERROR, "[resource_name=%s] Error during Base64 computation, checksum will NOT be used for upload.", resource_name.c_str() ); return NULL; } BIO_get_mem_ptr(b64, &bptr); char *md5_b64 = (char*)malloc( bptr->length ); if ( md5_b64 == NULL ) { - rodsLog( LOG_ERROR, "Error during MD5 allocation, checksum will NOT be used for upload." ); + rodsLog( LOG_ERROR, "[resource_name=%s] Error during MD5 allocation, checksum will NOT be used for upload.", resource_name.c_str() ); return NULL; } memcpy( md5_b64, bptr->data, bptr->length-1 ); @@ -334,9 +344,11 @@ static S3Status getObjectDataCallback( void *callbackData) { callback_data_t *cb = (callback_data_t *)callbackData; + irods::plugin_property_map *prop_map_ptr = cb ? cb->prop_map_ptr : nullptr; + std::string resource_name = prop_map_ptr != nullptr ? get_resource_name(*prop_map_ptr) : ""; irods::error result = ASSERT_ERROR(bufferSize != 0 && buffer != NULL && callbackData != NULL, - SYS_INVALID_INPUT_PARAM, "Invalid input parameter."); + SYS_INVALID_INPUT_PARAM, "[resource_name=%s] Invalid input parameter.", resource_name.c_str() ); if(!result.ok()) { irods::log(result); } @@ -348,7 +360,7 @@ static S3Status getObjectDataCallback( g_error_mutex.lock(); g_werr++; if (g_werr == g_werr_idx) { - rodsLog(LOG_ERROR, "Injecting a PWRITE error during S3 callback"); + rodsLog(LOG_ERROR, "[resource_name=%s] Injecting a PWRITE error during S3 callback", resource_name.c_str() ); g_error_mutex.unlock(); return S3StatusAbortedByCallback; } @@ -379,7 +391,7 @@ static int putObjectDataCallback( g_error_mutex.lock(); g_rerr++; if (g_rerr == g_rerr_idx) { - rodsLog(LOG_ERROR, "Injecting pread error in S3 callback"); + rodsLog(LOG_ERROR, "[resource_name=%s] Injecting pread error in S3 callback", get_resource_name(_prop_map).c_str()); ret = -1; } g_error_mutex.unlock(); @@ -400,12 +412,15 @@ S3Status listBucketCallback( callback_data_t *data = (callback_data_t *) callbackData; + irods::plugin_property_map *prop_map_ptr = data ? data->prop_map_ptr : nullptr; + std::string resource_name = prop_map_ptr != nullptr ? get_resource_name(*prop_map_ptr) : ""; + if (contentsCount <= 0) { data->keyCount = 0; return S3StatusOK; } else if (contentsCount > 1) { rodsLog (LOG_ERROR, - "listBucketCallback: contentsCount %d > 1 for %s", + "[resource_name=%s] listBucketCallback: contentsCount %d > 1 for %s", resource_name.c_str(), contentsCount, contents->key); } data->keyCount = contentsCount; @@ -420,7 +435,9 @@ S3Status listBucketCallback( irods::error parseS3Path ( const std::string& _s3ObjName, std::string& _bucket, - std::string& _key) { + std::string& _key, + irods::plugin_property_map& _prop_map ) { + irods::error result = SUCCESS(); size_t start_pos = 0; size_t slash_pos = 0; @@ -431,7 +448,7 @@ irods::error parseS3Path ( slash_pos = _s3ObjName.find_first_of("/", 1); } // have to have at least one slash to separate bucket from key - if((result = ASSERT_ERROR(slash_pos != std::string::npos, SYS_INVALID_FILE_PATH, "Problem parsing \"%s\".", + if((result = ASSERT_ERROR(slash_pos != std::string::npos, SYS_INVALID_FILE_PATH, "[resource_name=%s] Problem parsing \"%s\".", get_resource_name(_prop_map).c_str(), _s3ObjName.c_str())).ok()) { _bucket = _s3ObjName.substr(start_pos, slash_pos - start_pos); _key = _s3ObjName.substr(slash_pos + 1); @@ -457,7 +474,8 @@ S3SignatureVersion s3GetSignatureVersion (irods::plugin_property_map& _prop_map) irods::error readS3AuthInfo ( const std::string& _filename, std::string& _rtn_key_id, - std::string& _rtn_access_key) + std::string& _rtn_access_key, + irods::plugin_property_map& _prop_map ) { irods::error result = SUCCESS(); irods::error ret; @@ -470,7 +488,9 @@ irods::error readS3AuthInfo ( fptr = fopen (_filename.c_str(), "r"); - if ((result = ASSERT_ERROR(fptr != NULL, SYS_CONFIG_FILE_ERR, "Failed to open S3 auth file: \"%s\", errno = \"%s\".", + std::string resource_name = get_resource_name(_prop_map); + + if ((result = ASSERT_ERROR(fptr != NULL, SYS_CONFIG_FILE_ERR, "[resource_name=%s] Failed to open S3 auth file: \"%s\", errno = \"%s\".", resource_name.c_str(), _filename.c_str(), strerror(errno))).ok()) { while ((lineLen = getLine (fptr, inbuf, MAX_NAME_LEN)) > 0) { char *inPtr = inbuf; @@ -486,7 +506,7 @@ irods::error readS3AuthInfo ( } } } - if ((result = ASSERT_ERROR(linecnt == 2, SYS_CONFIG_FILE_ERR, "Read %d lines in the auth file. Expected 2.", + if ((result = ASSERT_ERROR(linecnt == 2, SYS_CONFIG_FILE_ERR, "[resource_name=%s] Read %d lines in the auth file. Expected 2.", resource_name.c_str(), linecnt)).ok()) { _rtn_key_id = access_key_id; _rtn_access_key = secret_access_key; @@ -494,7 +514,9 @@ irods::error readS3AuthInfo ( return result; } - result = ERROR( SYS_CONFIG_FILE_ERR, "Unknown error in authorization file." ); + std::string error_str = boost::str(boost::format("[resource_name=%s] Unknown error in authorization file.") % resource_name.c_str()); + + result = ERROR( SYS_CONFIG_FILE_ERR, error_str.c_str()); return result; } @@ -509,6 +531,8 @@ irods::error s3ReadAuthInfo( std::string key_id; std::string access_key; + std::string resource_name = get_resource_name(_prop_map); + if ((tmpPtr = getenv(s3_key_id.c_str())) != NULL) { key_id = tmpPtr; if ((tmpPtr = getenv(s3_access_key.c_str())) != NULL) { @@ -517,13 +541,13 @@ irods::error s3ReadAuthInfo( } else { std::string auth_file; ret = _prop_map.get(s3_auth_file, auth_file); - if((result = ASSERT_PASS(ret, "Failed to retrieve S3 auth filename property.")).ok()) { - ret = readS3AuthInfo(auth_file, key_id, access_key); - if ((result = ASSERT_PASS(ret, "Failed reading the authorization credentials file.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to retrieve S3 auth filename property.", resource_name.c_str())).ok()) { + ret = readS3AuthInfo(auth_file, key_id, access_key, _prop_map); + if ((result = ASSERT_PASS(ret, "[resource_name=%s] Failed reading the authorization credentials file.", resource_name.c_str())).ok()) { ret = _prop_map.set(s3_key_id, key_id); - if((result = ASSERT_PASS(ret, "Failed to set the \"%s\" property.", s3_key_id.c_str())).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to set the \"%s\" property.", resource_name.c_str(), s3_key_id.c_str())).ok()) { ret = _prop_map.set(s3_access_key, access_key); - result = ASSERT_PASS(ret, "Failed to set the \"%s\" property.", s3_access_key.c_str()); + result = ASSERT_PASS(ret, "[resource_name=%s] Failed to set the \"%s\" property.", resource_name.c_str(), s3_access_key.c_str()); } } } @@ -540,6 +564,8 @@ irods::error s3Init ( std::vector hostname_vector; size_t hostname_index = 0; + std::string resource_name = get_resource_name(_prop_map); + g_hostnameIdxLock.lock(); // First, parse the default hostname (if present) into a list of @@ -582,7 +608,7 @@ irods::error s3Init ( } catch ( const boost::bad_lexical_cast& ) { rodsLog( LOG_ERROR, - "failed to cast retry count [%s] to an int", + "[resource_name=%s] failed to cast retry count [%s] to an int", resource_name.c_str(), retry_count_str.c_str() ); } } @@ -599,7 +625,7 @@ irods::error s3Init ( } catch ( const boost::bad_lexical_cast& ) { rodsLog( LOG_ERROR, - "failed to cast wait time [%s] to an int", + "[resource_name=%s] failed to cast wait time [%s] to an int", resource_name.c_str(), wait_time_str.c_str() ); } } @@ -613,6 +639,8 @@ irods::error s3InitPerOperation ( irods::plugin_property_map& _prop_map ) { irods::error result = SUCCESS(); + + std::string resource_name = get_resource_name(_prop_map); size_t retry_count = 10; std::string retry_count_str; @@ -644,7 +672,7 @@ irods::error s3InitPerOperation ( msg << "\""; } - result = ASSERT_ERROR(status == S3StatusOK, status, "Error initializing the S3 library. Status = %d.", + result = ASSERT_ERROR(status == S3StatusOK, status, "[resource_name=%s] Error initializing the S3 library. Status = %d.", resource_name.c_str(), status, msg.str().c_str()); if( result.ok() ) { @@ -654,13 +682,15 @@ irods::error s3InitPerOperation ( // Get S3 region name from plugin property map if (!_prop_map.get< std::string >(s3_region_name, region_name ).ok()) { - rodsLog( LOG_ERROR, "Failed to retrieve S3 region name from resource plugin properties, using 'us-east-1'"); + rodsLog( LOG_ERROR, "[resource_name=%s] Failed to retrieve S3 region name from resource plugin properties, using 'us-east-1'", resource_name.c_str()); } S3Status status = S3_set_region_name(region_name.c_str()); if (status != S3StatusOK) { - rodsLog(LOG_ERROR, "failed to set region name to %s: %s", region_name.c_str(), S3_get_status_name(status)); - return ERROR(S3_INIT_ERROR, "S3_set_region_name() failed."); + std::string error_str = boost::str(boost::format("[resource_name=%s] failed to set region name to %s: %s") % resource_name.c_str() % + region_name.c_str() % S3_get_status_name(status)); + rodsLog(LOG_ERROR, error_str.c_str()); + return ERROR(S3_INIT_ERROR, error_str.c_str()); } } @@ -857,6 +887,8 @@ static void mrdWorkerThread ( S3BucketContext bucketContext = *((S3BucketContext*)bucketContextParam); irods::plugin_property_map _prop_map = *((irods::plugin_property_map*)pluginPropertyMapParam); + std::string resource_name = get_resource_name(_prop_map); + irods::error result; std::stringstream msg; S3GetObjectHandler getObjectHandler = { {mrdRangeRespPropCB, mrdRangeRespCompCB }, mrdRangeGetDataCB }; @@ -893,6 +925,7 @@ static void mrdWorkerThread ( // at the wrong offset and length. rangeData = g_mrdData[seq-1]; rangeData.pCtx = &bucketContext; + rangeData.prop_map_ptr = &_prop_map; msg.str( std::string() ); // Clear msg << "Multirange: Start range " << (int)seq << ", key \"" << g_mrdKey << "\", offset " @@ -912,7 +945,8 @@ static void mrdWorkerThread ( } while ((rangeData.status != S3StatusOK) && S3_status_is_retryable(rangeData.status) && (++retry_cnt < retry_count_limit)); if (rangeData.status != S3StatusOK) { msg.str( std::string() ); // Clear - msg << __FUNCTION__ << " - Error getting the S3 object: \"" << g_mrdKey << "\" range " << seq; + msg << "[resource_name=" << resource_name << "] " << __FUNCTION__ + << " - Error getting the S3 object: \"" << g_mrdKey << "\" range " << seq; if (rangeData.status >= 0) { msg << " - \"" << S3_get_status_name( rangeData.status ) << "\""; } @@ -957,6 +991,8 @@ irods::error s3GetFile( irods::error result = SUCCESS(); irods::error ret; + std::string resource_name = get_resource_name(_prop_map); + size_t retry_count_limit = S3_DEFAULT_RETRY_COUNT; _prop_map.get(s3_retry_count_size_t, retry_count_limit); @@ -966,15 +1002,15 @@ irods::error s3GetFile( int cache_fd = -1; std::string bucket; std::string key; - ret = parseS3Path(_s3ObjName, bucket, key); - if((result = ASSERT_PASS(ret, "Failed parsing the S3 bucket and key from the physical path: \"%s\".", + ret = parseS3Path(_s3ObjName, bucket, key, _prop_map); + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed parsing the S3 bucket and key from the physical path: \"%s\".", resource_name.c_str(), _s3ObjName.c_str())).ok()) { ret = s3InitPerOperation( _prop_map ); - if((result = ASSERT_PASS(ret, "Failed to initialize the S3 system.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to initialize the S3 system.", resource_name.c_str())).ok()) { cache_fd = open(_filename.c_str(), O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR); - if((result = ASSERT_ERROR(cache_fd != -1, UNIX_FILE_OPEN_ERR, "Failed to open the cache file: \"%s\".", + if((result = ASSERT_ERROR(cache_fd != -1, UNIX_FILE_OPEN_ERR, "[resource_name=%s] Failed to open the cache file: \"%s\".", resource_name.c_str(), _filename.c_str())).ok()) { callback_data_t data; @@ -999,6 +1035,7 @@ irods::error s3GetFile( size_t retry_cnt = 0; do { bzero (&data, sizeof (data)); + data.prop_map_ptr = &_prop_map; data.fd = cache_fd; data.contentLength = data.originalContentLength = _fileSize; unsigned long long usStart = usNow(); @@ -1013,7 +1050,7 @@ irods::error s3GetFile( } while ( (data.status != S3StatusOK) && S3_status_is_retryable(data.status) && (++retry_cnt < retry_count_limit) ); if (data.status != S3StatusOK) { std::stringstream msg; - msg << __FUNCTION__ << " - Error fetching the S3 object: \"" << _s3ObjName << "\""; + msg << "[resource_name=" << resource_name << "] " << __FUNCTION__ << " - Error fetching the S3 object: \"" << _s3ObjName << "\""; if (data.status >= 0) { msg << " - \"" << S3_get_status_name((S3Status)data.status) << "\""; } @@ -1024,6 +1061,7 @@ irods::error s3GetFile( // Only the FD part of this will be constant bzero (&data, sizeof (data)); + data.prop_map_ptr = &_prop_map; data.fd = cache_fd; data.contentLength = data.originalContentLength = _fileSize; @@ -1038,9 +1076,9 @@ irods::error s3GetFile( g_mrdData = (multirange_data_t*)calloc(totalSeq, sizeof(multirange_data_t)); if (!g_mrdData) { - const char *msg = "Out of memory error in S3 multirange g_mrdData allocation."; - rodsLog( LOG_ERROR, msg ); - result = ERROR( SYS_MALLOC_ERR, msg ); + std::string msg = boost::str(boost::format("[resource_name=%s] Out of memory error in S3 multirange g_mrdData allocation.") % resource_name.c_str()); + rodsLog( LOG_ERROR, msg.c_str() ); + result = ERROR( SYS_MALLOC_ERR, msg.c_str() ); return result; } @@ -1049,6 +1087,7 @@ irods::error s3GetFile( g_mrdKey = key.c_str(); for(seq = 0; seq < totalSeq ; seq ++) { memset(&rangeData, 0, sizeof(rangeData)); + rangeData.prop_map_ptr = &_prop_map; rangeData.seq = seq; rangeData.get_object_data = data; rangeLength = (data.contentLength > chunksize)?chunksize:data.contentLength; @@ -1081,10 +1120,10 @@ irods::error s3GetFile( if (!g_mrdResult.ok()) { // Someone aborted after we started, delete the partial object on S3 - rodsLog(LOG_ERROR, "Cancelling multipart download"); + rodsLog(LOG_ERROR, "[resource_name=%s] Cancelling multipart download", resource_name.c_str()); // 0-length the file, it's garbage if (ftruncate( cache_fd, 0 )) - rodsLog(LOG_ERROR, "Unable to 0-length the result file"); + rodsLog(LOG_ERROR, "[resource_name=%s] Unable to 0-length the result file", resource_name.c_str()); result = g_mrdResult; } // Clean up memory @@ -1193,7 +1232,7 @@ static int mpuCommitXmlCB ( g_error_mutex.lock(); g_merr++; if (g_merr == g_merr_idx) { - rodsLog(LOG_ERROR, "Injecting a XML upload error during S3 callback"); + rodsLog(LOG_ERROR, "[resource_name=%s] Injecting a XML upload error during S3 callback", get_resource_name(_prop_map).c_str()); g_error_mutex.unlock(); ret = -1; } @@ -1243,13 +1282,16 @@ static void mpuCancelRespCompCB ( // The WorkerThread will note that status!=OK and act appropriately (retry or fail) } -static void mpuCancel( S3BucketContext *bucketContext, const char *key, const char *upload_id ) +static void mpuCancel( S3BucketContext *bucketContext, const char *key, const char *upload_id, + irods::plugin_property_map& _prop_map ) { S3AbortMultipartUploadHandler abortHandler = { { mpuCancelRespPropCB, mpuCancelRespCompCB } }; std::stringstream msg; S3Status status; - msg << "Cancelling multipart upload: key=\"" << key << "\", upload_id=\"" << upload_id << "\""; + std::string resource_name = get_resource_name(_prop_map); + + msg << "[resource_name=" << resource_name << "] " << "Cancelling multipart upload: key=\"" << key << "\", upload_id=\"" << upload_id << "\""; rodsLog( LOG_ERROR, msg.str().c_str() ); g_mpuCancelRespCompCB_status = S3StatusOK; g_mpuCancelRespCompCB_pCtx = bucketContext; @@ -1257,7 +1299,7 @@ static void mpuCancel( S3BucketContext *bucketContext, const char *key, const ch status = g_mpuCancelRespCompCB_status; if (status != S3StatusOK) { msg.str( std::string() ); // Clear - msg << __FUNCTION__ << " - Error cancelling the multipart upload of S3 object: \"" << key << "\""; + msg << "[resource_name=" << resource_name << "] " << __FUNCTION__ << " - Error cancelling the multipart upload of S3 object: \"" << key << "\""; if (status >= 0) { msg << " - \"" << S3_get_status_name(status) << "\""; } @@ -1273,6 +1315,8 @@ static void mpuWorkerThread ( S3BucketContext bucketContext = *((S3BucketContext*)bucketContextParam); irods::plugin_property_map _prop_map = *((irods::plugin_property_map*)pluginPropertyMapParam); + std::string resource_name = get_resource_name(_prop_map); + irods::error result; std::stringstream msg; S3PutObjectHandler putObjectHandler = { {mpuPartRespPropCB, mpuPartRespCompCB }, &mpuPartPutDataCB }; @@ -1320,7 +1364,7 @@ static void mpuWorkerThread ( S3PutProperties *putProps = NULL; putProps = (S3PutProperties*)calloc( sizeof(S3PutProperties), 1 ); if ( putProps && partData.enable_md5 ) - putProps->md5 = s3CalcMD5( partData.put_object_data.fd, partData.put_object_data.offset, partData.put_object_data.contentLength ); + putProps->md5 = s3CalcMD5( partData.put_object_data.fd, partData.put_object_data.offset, partData.put_object_data.contentLength, resource_name ); putProps->expires = -1; unsigned long long usStart = usNow(); std::string&& hostname = s3GetHostname(_prop_map); @@ -1354,7 +1398,7 @@ static void mpuWorkerThread ( } while ((partData.status != S3StatusOK) && S3_status_is_retryable(partData.status) && (++retry_cnt < retry_count_limit)); if (partData.status != S3StatusOK) { msg.str( std::string() ); // Clear - msg << __FUNCTION__ << " - Error putting the S3 object: \"" << g_mpuKey << "\"" << " part " << seq; + msg << "[resource_name=" << resource_name << "] " << __FUNCTION__ << " - Error putting the S3 object: \"" << g_mpuKey << "\"" << " part " << seq; if(partData.status >= 0) { msg << " - \"" << S3_get_status_name(partData.status) << "\""; } @@ -1389,18 +1433,20 @@ irods::error s3PutCopyFile( bool server_encrypt = s3GetServerEncrypt ( _prop_map ); std::stringstream msg; + std::string resource_name = get_resource_name(_prop_map); + size_t retry_count_limit = S3_DEFAULT_RETRY_COUNT; _prop_map.get(s3_retry_count_size_t, retry_count_limit); size_t retry_wait = S3_DEFAULT_RETRY_WAIT_SEC; _prop_map.get(s3_wait_time_sec_size_t, retry_wait); - ret = parseS3Path(_s3ObjName, bucket, key); - if((result = ASSERT_PASS(ret, "Failed parsing the S3 bucket and key from the physical path: \"%s\".", + ret = parseS3Path(_s3ObjName, bucket, key, _prop_map); + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed parsing the S3 bucket and key from the physical path: \"%s\".", resource_name.c_str(), _s3ObjName.c_str())).ok()) { ret = s3InitPerOperation( _prop_map ); - if((result = ASSERT_PASS(ret, "Failed to initialize the S3 system.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to initialize the S3 system.", resource_name.c_str())).ok()) { if (_mode == S3_PUTFILE) { cache_fd = open(_filename.c_str(), O_RDONLY); @@ -1414,7 +1460,7 @@ irods::error s3PutCopyFile( cache_fd = -1; err_status = UNIX_FILE_OPEN_ERR; } - if((result = ASSERT_ERROR(cache_fd != -1, err_status, "Failed to open the cache file: \"%s\".", + if((result = ASSERT_ERROR(cache_fd != -1, err_status, "[resource_name=%s] Failed to open the cache file: \"%s\".", resource_name.c_str(), _filename.c_str())).ok()) { callback_data_t data; @@ -1432,7 +1478,7 @@ irods::error s3PutCopyFile( S3PutProperties *putProps = NULL; putProps = (S3PutProperties*)calloc( sizeof(S3PutProperties), 1 ); if ( putProps && enable_md5 ) - putProps->md5 = s3CalcMD5( cache_fd, 0, _fileSize ); + putProps->md5 = s3CalcMD5( cache_fd, 0, _fileSize, get_resource_name(_prop_map) ); if ( putProps && server_encrypt ) putProps->useServerSideEncryption = true; putProps->expires = -1; @@ -1447,10 +1493,11 @@ irods::error s3PutCopyFile( do { bzero (&data, sizeof (data)); + data.prop_map_ptr = &_prop_map; data.fd = cache_fd; data.contentLength = data.originalContentLength = _fileSize; data.pCtx = &bucketContext; - + unsigned long long usStart = usNow(); std::string&& hostname = s3GetHostname(_prop_map); bucketContext.hostName = hostname.c_str(); // Safe to do, this is a local copy of the data structure @@ -1462,7 +1509,7 @@ irods::error s3PutCopyFile( } while ( (data.status != S3StatusOK) && S3_status_is_retryable(data.status) && (++retry_cnt < retry_count_limit) ); if (data.status != S3StatusOK) { std::stringstream msg; - msg << __FUNCTION__ << " - Error putting the S3 object: \"" << _s3ObjName << "\""; + msg << "[resource_name=" << resource_name << "] " << " - Error putting the S3 object: \"" << _s3ObjName << "\""; if ( data.status >= 0 ) { msg << " - \"" << S3_get_status_name((S3Status)data.status) << "\""; } @@ -1495,6 +1542,7 @@ irods::error s3PutCopyFile( int partContentLength = 0; bzero (&data, sizeof (data)); + data.prop_map_ptr = &_prop_map; data.fd = cache_fd; data.contentLength = data.originalContentLength = _fileSize; @@ -1506,9 +1554,9 @@ irods::error s3PutCopyFile( if (putProps->md5) free( (char*)putProps->md5 ); free( putProps ); } - const char *msg = "Out of memory error in S3 multipart ETags allocation."; - rodsLog( LOG_ERROR, msg ); - result = ERROR( SYS_MALLOC_ERR, msg ); + std::string msg = boost::str(boost::format("[resource_name=%s] Out of memory error in S3 multipart ETags allocation.") % resource_name.c_str()); + rodsLog( LOG_ERROR, msg.c_str() ); + result = ERROR( SYS_MALLOC_ERR, msg.c_str() ); return result; } g_mpuData = (multipart_data_t*)calloc(totalSeq, sizeof(multipart_data_t)); @@ -1519,9 +1567,9 @@ irods::error s3PutCopyFile( free( putProps ); } free(manager.etags); - const char *msg = "Out of memory error in S3 multipart g_mpuData allocation."; - rodsLog( LOG_ERROR, msg ); - result = ERROR( SYS_MALLOC_ERR, msg ); + std::string msg = boost::str(boost::format("[resource_name=%s] Out of memory error in S3 multipart g_mupData allocation.") % resource_name.c_str()); + rodsLog( LOG_ERROR, msg.c_str() ); + result = ERROR( SYS_MALLOC_ERR, msg.c_str() ); return result; } // Maximum XML completion length with extra space for the tag @@ -1534,9 +1582,9 @@ irods::error s3PutCopyFile( } free(g_mpuData); free(manager.etags); - const char *msg = "Out of memory error in S3 multipart XML allocation."; - rodsLog( LOG_ERROR, msg ); - result = ERROR( SYS_MALLOC_ERR, msg ); + std::string msg = boost::str(boost::format("[resource_name=%s] Out of memory error in S3 multiparts XML allocation.") % resource_name.c_str()); + rodsLog( LOG_ERROR, msg.c_str() ); + result = ERROR( SYS_MALLOC_ERR, msg.c_str() ); return result; } @@ -1557,7 +1605,7 @@ irods::error s3PutCopyFile( free( putProps ); } msg.str( std::string() ); // Clear - msg << __FUNCTION__ << " - Error initiating multipart upload of the S3 object: \"" << _s3ObjName << "\""; + msg << "[resource_name=" << resource_name << "] " << __FUNCTION__ << " - Error initiating multipart upload of the S3 object: \"" << _s3ObjName << "\""; if(manager.status >= 0) { msg << " - \"" << S3_get_status_name(manager.status) << "\""; } @@ -1569,8 +1617,8 @@ irods::error s3PutCopyFile( // Following used by S3_COPYOBJECT only S3BucketContext srcBucketContext; if (_mode == S3_COPYOBJECT) { - ret = parseS3Path(_filename, srcBucket, srcKey); - if(!(result = ASSERT_PASS(ret, "Failed parsing the S3 bucket and key from the physical path: \"%s\".", + ret = parseS3Path(_filename, srcBucket, srcKey, _prop_map); + if(!(result = ASSERT_PASS(ret, "[resource_name=%s] Failed parsing the S3 bucket and key from the physical path: \"%s\".", resource_name.c_str(), _filename.c_str())).ok()) { return result; // Abort early } @@ -1665,6 +1713,7 @@ irods::error s3PutCopyFile( } while ((manager.status != S3StatusOK) && S3_status_is_retryable(manager.status) && ( ++retry_cnt < retry_count_limit)); if (manager.status != S3StatusOK) { msg.str( std::string() ); // Clear + msg << "[resource_name=" << resource_name << "] "; msg << __FUNCTION__ << " - Error putting the S3 object: \"" << _s3ObjName << "\""; if(manager.status >= 0) { msg << " - \"" << S3_get_status_name( manager.status ) << "\""; @@ -1674,8 +1723,8 @@ irods::error s3PutCopyFile( } if ( !g_mpuResult.ok() && manager.upload_id ) { // Someone aborted after we started, delete the partial object on S3 - rodsLog(LOG_ERROR, "Cancelling multipart upload"); - mpuCancel( &bucketContext, key.c_str(), manager.upload_id ); + rodsLog(LOG_ERROR, "[resource_name=%s] Cancelling multipart upload", resource_name.c_str()); + mpuCancel( &bucketContext, key.c_str(), manager.upload_id, _prop_map ); // Return the error result = g_mpuResult; } @@ -1725,10 +1774,12 @@ irods::error s3CopyFile( size_t retry_wait = S3_DEFAULT_RETRY_WAIT_SEC; _src_ctx.prop_map().get(s3_wait_time_sec_size_t, retry_wait); + std::string resource_name = get_resource_name(_src_ctx.prop_map()); + // Check the size, and if too large punt to the multipart copy/put routine struct stat statbuf; ret = irods_s3_archive::s3FileStatPlugin( _src_ctx, &statbuf ); - if (( result = ASSERT_PASS(ret, "Unable to get original object size for source file name: \"%s\".", + if (( result = ASSERT_PASS(ret, "[resource_name=%s] Unable to get original object size for source file name: \"%s\".", resource_name.c_str(), _src_file.c_str())).ok()) { if ( statbuf.st_size > s3GetMPUChunksize(_src_ctx.prop_map()) ) { // Early return for cleaner code... @@ -1736,16 +1787,17 @@ irods::error s3CopyFile( } // Parse the src file - ret = parseS3Path(_src_file, src_bucket, src_key); - if((result = ASSERT_PASS(ret, "Failed to parse the source file name: \"%s\".", + ret = parseS3Path(_src_file, src_bucket, src_key, _src_ctx.prop_map()); + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to parse the source file name: \"%s\".", resource_name.c_str(), _src_file.c_str())).ok()) { // Parse the dest file - ret = parseS3Path(_dest_file, dest_bucket, dest_key); - if((result = ASSERT_PASS(ret, "Failed to parse the destination file name: \"%s\".", + ret = parseS3Path(_dest_file, dest_bucket, dest_key, _src_ctx.prop_map()); + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to parse the destination file name: \"%s\".", resource_name.c_str(), _dest_file.c_str())).ok()) { callback_data_t data; + data.prop_map_ptr = &_src_ctx.prop_map(); S3BucketContext bucketContext; int64_t lastModified; char eTag[256]; @@ -1771,6 +1823,7 @@ irods::error s3CopyFile( size_t retry_cnt = 0; do { bzero (&data, sizeof (data)); + data.prop_map_ptr = &_src_ctx.prop_map(); std::string&& hostname = s3GetHostname(_src_ctx.prop_map()); bucketContext.hostName = hostname.c_str(); // Safe to do, this is a local copy of the data structure data.pCtx = &bucketContext; @@ -1781,6 +1834,7 @@ irods::error s3CopyFile( if (data.status != S3StatusOK) { std::stringstream msg; msg << __FUNCTION__; + msg << "[resource_name=" << resource_name << "] "; msg << " - Error copying the S3 object: \"" << _src_file << "\" to S3 object: \"" << _dest_file << "\""; if (data.status >= 0) { msg << " - \"" << S3_get_status_name((S3Status)data.status) << "\""; @@ -1803,11 +1857,13 @@ irods::error s3GetAuthCredentials( std::string key_id; std::string access_key; + std::string resource_name = get_resource_name(_prop_map); + ret = _prop_map.get(s3_key_id, key_id); - if((result = ASSERT_PASS(ret, "Failed to get the S3 access key id property.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get the S3 access key id property.", resource_name.c_str())).ok()) { ret = _prop_map.get(s3_access_key, access_key); - if((result = ASSERT_PASS(ret, "Failed to get the S3 secret access key property.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get the S3 secret access key property.", resource_name.c_str())).ok()) { _rtn_key_id = key_id; _rtn_access_key = access_key; @@ -1826,10 +1882,12 @@ irods::error s3CheckParams(irods::plugin_context& _ctx ) { irods::error result = SUCCESS(); irods::error ret; + std::string resource_name = get_resource_name(_ctx.prop_map()); + // =-=-=-=-=-=-=- // verify that the resc context is valid ret = _ctx.valid(); - result = ASSERT_PASS(ret, "Resource context is invalid"); + result = ASSERT_PASS(ret, "[resource_name=%s] Resource context is invalid", resource_name.c_str()); return result; @@ -1841,11 +1899,13 @@ irods:: error s3StartOperation(irods::plugin_property_map& _prop_map) irods::error result = SUCCESS(); irods::error ret; + std::string resource_name = get_resource_name(_prop_map); + ret = s3Init( _prop_map ); - if((result = ASSERT_PASS(ret, "Failed to initialize the S3 library.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to initialize the S3 library.", resource_name.c_str())).ok()) { // Retrieve the auth info and set the appropriate fields in the property map ret = s3ReadAuthInfo(_prop_map); - result = ASSERT_PASS(ret, "Failed to read S3 auth info."); + result = ASSERT_PASS(ret, "[resource_name=%s] Failed to read S3 auth info.", resource_name.c_str()); } bool attached_mode = true, cacheless_mode = false; @@ -1863,14 +1923,14 @@ irods:: error s3StartOperation(irods::plugin_property_map& _prop_map) // Load SSE environment if(!S3fsCurl::LoadEnvSse()) { - std::string error_str = "something wrong about SSE environment."; + std::string error_str = boost::str(boost::format("[resource_name=%s] something wrong about SSE environment.") % resource_name.c_str()); rodsLog(LOG_ERROR, error_str.c_str()); return ERROR(S3_INIT_ERROR, error_str.c_str()); } // ssl init else if(!s3fs_init_global_ssl()){ - std::string error_str = "could not initialize for ssl libraries."; + std::string error_str = boost::str(boost::format("[resource_name=%s] could not initialize for ssl libraries.") % resource_name.c_str()); rodsLog(LOG_ERROR, error_str.c_str()); return ERROR(S3_INIT_ERROR, error_str.c_str()); } @@ -1906,8 +1966,8 @@ bool determine_unlink_for_repl_policy( if(std::string::npos == pos) { THROW( SYS_INVALID_INPUT_PARAM, - boost::format("[%s] is not a logical path") % - _logical_path); + boost::str(boost::format("[%s] is not a logical path") % + _logical_path)); } std::string data_name{_logical_path.substr(pos+1, std::string::npos)}; @@ -1962,15 +2022,17 @@ irods::error s3RedirectCreate( std::string host_name; + std::string resource_name = get_resource_name(_prop_map); + // =-=-=-=-=-=-=- // determine if the resource is down ret = _prop_map.get< int >( irods::RESOURCE_STATUS, resc_status ); - if((result = ASSERT_PASS(ret, "Failed to retrieve status property.")).ok() ) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to retrieve status property.", resource_name.c_str())).ok() ) { // =-=-=-=-=-=-=- // get the resource host for comparison to curr host ret = _prop_map.get< std::string >( irods::RESOURCE_LOCATION, host_name ); - if((result = ASSERT_PASS(ret, "Failed to get location property.")).ok() ) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get location property.", resource_name.c_str())).ok() ) { // =-=-=-=-=-=-=- // if the status is down, vote no. @@ -2018,7 +2080,9 @@ irods::error register_archive_object( irods::RESOURCE_NAME, resc_name ); if( !ret.ok() ) { - return PASS( ret ); + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_prop_map) << "] " << ret.result(); + return PASSMSG(msg.str(), ret); } // scan for a repl with this resource in the @@ -2059,10 +2123,10 @@ irods::error register_archive_object( if(phy_path.empty()) { return ERROR( INVALID_OBJECT_NAME, - boost::format("no matching phy path for [%s], [%s], [%s]") % + boost::str(boost::format("[resource_name=%s] no matching phy path for [%s], [%s], [%s]") % resc_name.c_str() % _file_obj->logical_path() % vault_path % - resc_name); + resc_name)); } // =-=-=-=-=-=-=- @@ -2070,13 +2134,17 @@ irods::error register_archive_object( rodsLong_t resc_id = 0; ret = _prop_map.get( irods::RESOURCE_ID, resc_id ); if( !ret.ok() ) { - return PASS( ret ); + std::stringstream msg; + msg << "[resource_name=" << resc_name << "] " << ret.result(); + return PASSMSG(msg.str(), ret); } std::string resc_hier; ret = resc_mgr.leaf_id_to_hier(resc_id, resc_hier); if( !ret.ok() ) { - return PASS( ret ); + std::stringstream msg; + msg << "[resource_name=" << resc_name << "] " << ret.result(); + return PASSMSG(msg.str(), ret); } // =-=-=-=-=-=-=- @@ -2147,7 +2215,8 @@ irods::error register_archive_object( reg_inp.destDataObjInfo = &dst_data_obj; int reg_status = rsRegReplica( _comm, ®_inp ); if( reg_status < 0 ) { - return ERROR( reg_status, "failed to register data object" ); + std::string error_str = boost::str(boost::format("[resource_name=%s] failed register data object") % resc_name.c_str()); + return ERROR( reg_status, error_str.c_str() ); } // =-=-=-=-=-=-=- @@ -2179,6 +2248,10 @@ irods::error s3RedirectOpen( const std::string& _resc_name, const std::string& _curr_host, float& _out_vote ) { + + + std::string resource_name = get_resource_name(_prop_map); + irods::error result = SUCCESS(); irods::error ret; int resc_status = 0; @@ -2186,11 +2259,11 @@ irods::error s3RedirectOpen( // =-=-=-=-=-=-=- // determine if the resource is down ret = _prop_map.get< int >( irods::RESOURCE_STATUS, resc_status ); - if((result = ASSERT_PASS(ret, "Failed to get status property for resource.")).ok() ) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get status property for resource.", resource_name.c_str())).ok() ) { // =-=-=-=-=-=-=- // get the resource host for comparison to curr host ret = _prop_map.get< std::string >( irods::RESOURCE_LOCATION, host_name ); - if((result = ASSERT_PASS(ret, "Failed to get the location property.")).ok() ) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get the location property.", resource_name.c_str())).ok() ) { // =-=-=-=-=-=-=- // if the status is down, vote no. @@ -2206,7 +2279,9 @@ irods::error s3RedirectOpen( _file_obj); if(!get_ret.ok()) { irods::log(get_ret); - return PASS(get_ret); + std::stringstream msg; + msg << "[resource_name=" << resource_name << "] " << get_ret.result(); + return PASSMSG(msg.str(), get_ret); } _out_vote = 1.0; diff --git a/s3/libirods_s3.hpp b/s3/libirods_s3.hpp index ae9a9e2b..945f397f 100644 --- a/s3/libirods_s3.hpp +++ b/s3/libirods_s3.hpp @@ -79,6 +79,7 @@ typedef struct callback_data s3Stat_t s3Stat; /* should be a pointer if keyCount > 1 */ S3BucketContext *pCtx; /* To enable more detailed error messages */ + irods::plugin_property_map *prop_map_ptr; } callback_data_t; typedef struct upload_manager @@ -117,6 +118,7 @@ typedef struct multirange_data S3Status status; S3BucketContext *pCtx; /* To enable more detailed error messages */ + irods::plugin_property_map *prop_map_ptr; } multirange_data_t; // Sleep for *at least* the given time, plus some up to 1s additional @@ -139,7 +141,8 @@ S3Status responsePropertiesCallback( irods::error parseS3Path ( const std::string& _s3ObjName, std::string& _bucket, - std::string& _key); + std::string& _key, + irods::plugin_property_map& _prop_map ); irods::error s3Init ( irods::plugin_property_map& _prop_map ); irods::error s3InitPerOperation ( irods::plugin_property_map& _prop_map ); @@ -183,6 +186,8 @@ irods::error s3CheckParams(irods::plugin_context& _ctx ); void get_modes_from_properties(irods::plugin_property_map& _prop_map, bool& attached_mode, bool& cacheless_mode); +std::string get_resource_name(irods::plugin_property_map& _prop_map); + bool determine_unlink_for_repl_policy( rsComm_t* _comm, const std::string& _logical_path, diff --git a/s3/s3_archive_operations.cpp b/s3/s3_archive_operations.cpp index 76fd4d05..3de4340d 100644 --- a/s3/s3_archive_operations.cpp +++ b/s3/s3_archive_operations.cpp @@ -33,35 +33,35 @@ namespace irods_s3_archive { // interface for file registration irods::error s3RegisteredPlugin( irods::plugin_context& _ctx) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- // interface for file unregistration irods::error s3UnregisteredPlugin( irods::plugin_context& _ctx) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- // interface for file modification irods::error s3ModifiedPlugin( irods::plugin_context& _ctx) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- // interface for POSIX create irods::error s3FileCreatePlugin( irods::plugin_context& _ctx) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- // interface for POSIX Open irods::error s3FileOpenPlugin( irods::plugin_context& _ctx) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- @@ -70,8 +70,7 @@ namespace irods_s3_archive { void* _buf, int _len ) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); - + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- @@ -79,16 +78,15 @@ namespace irods_s3_archive { irods::error s3FileWritePlugin( irods::plugin_context& _ctx, void* _buf, int _len ) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- // interface for POSIX Close irods::error s3FileClosePlugin( irods::plugin_context& _ctx ) { - return ERROR( SYS_NOT_SUPPORTED, __FUNCTION__ ); - + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } @@ -100,9 +98,7 @@ namespace irods_s3_archive { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } size_t retry_count_limit = S3_DEFAULT_RETRY_COUNT; @@ -129,7 +125,9 @@ namespace irods_s3_archive { irods::RESOURCE_PATH, vault_path); if(!ret.ok()) { - return PASS(ret); + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] " << ret.result(); + return PASSMSG(msg.str(), ret); } if(!determine_unlink_for_repl_policy( @@ -148,7 +146,7 @@ namespace irods_s3_archive { std::string bucket; std::string key; - ret = parseS3Path(file_obj->physical_path(), bucket, key); + ret = parseS3Path(file_obj->physical_path(), bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -197,7 +195,7 @@ namespace irods_s3_archive { if(data.status != S3StatusOK) { std::stringstream msg; - msg << __FUNCTION__; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] "; msg << " - Error unlinking the S3 object: \""; msg << file_obj->physical_path(); msg << "\""; @@ -230,7 +228,7 @@ namespace irods_s3_archive { // =-=-=-=-=-=-=- // check incoming parameters irods::error ret = s3CheckParams( _ctx ); - if((result = ASSERT_PASS(ret, "Invalid parameters or physical path.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid parameters or physical path.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { // =-=-=-=-=-=-=- // get ref to fco @@ -249,15 +247,15 @@ namespace irods_s3_archive { std::string key_id; std::string access_key; - ret = parseS3Path(_object->physical_path(), bucket, key); - if((result = ASSERT_PASS(ret, "Failed parsing the S3 bucket and key from the physical path: \"%s\".", + ret = parseS3Path(_object->physical_path(), bucket, key, _ctx.prop_map()); + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed parsing the S3 bucket and key from the physical path: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), _object->physical_path().c_str())).ok()) { ret = s3InitPerOperation( _ctx.prop_map() ); - if((result = ASSERT_PASS(ret, "Failed to initialize the S3 system.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to initialize the S3 system.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key); - if((result = ASSERT_PASS(ret, "Failed to get the S3 credentials properties.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get the S3 credentials properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { callback_data_t data; S3BucketContext bucketContext; @@ -283,7 +281,8 @@ namespace irods_s3_archive { if (data.status != S3StatusOK) { std::stringstream msg; - msg << __FUNCTION__ << " - Error stat'ing the S3 object: \"" << _object->physical_path() << "\""; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] "; + msg << " - Error stat'ing the S3 object: \"" << _object->physical_path() << "\""; if (data.status >= 0) { msg << " - \"" << S3_get_status_name((S3Status)data.status) << "\""; } @@ -304,7 +303,10 @@ namespace irods_s3_archive { } } if( !result.ok() ) { - irods::log( result ); + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] " + << result.result(); + rodsLog(LOG_ERROR, msg.str().c_str()); } return result; } @@ -313,7 +315,7 @@ namespace irods_s3_archive { // interface for POSIX Fstat irods::error s3FileFstatPlugin( irods::plugin_context& _ctx, struct stat* _statbuf ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileFstatPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // s3FileFstatPlugin @@ -323,7 +325,7 @@ namespace irods_s3_archive { size_t _offset, int _whence ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileLseekPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // wosFileLseekPlugin @@ -331,7 +333,7 @@ namespace irods_s3_archive { // interface for POSIX mkdir irods::error s3FileMkdirPlugin( irods::plugin_context& _ctx ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileMkdirPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // s3FileMkdirPlugin @@ -339,21 +341,21 @@ namespace irods_s3_archive { // interface for POSIX mkdir irods::error s3FileRmdirPlugin( irods::plugin_context& _ctx ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileRmdirPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // s3FileRmdirPlugin // =-=-=-=-=-=-=- // interface for POSIX opendir irods::error s3FileOpendirPlugin( irods::plugin_context& _ctx ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileOpendirPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // s3FileOpendirPlugin // =-=-=-=-=-=-=- // interface for POSIX closedir irods::error s3FileClosedirPlugin( irods::plugin_context& _ctx) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileClosedirPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // s3FileClosedirPlugin // =-=-=-=-=-=-=- @@ -361,7 +363,7 @@ namespace irods_s3_archive { irods::error s3FileReaddirPlugin( irods::plugin_context& _ctx, struct rodsDirent** _dirent_ptr ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileReaddirPlugin" ); + return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) ); } // s3FileReaddirPlugin // =-=-=-=-=-=-=- @@ -378,7 +380,11 @@ namespace irods_s3_archive { std::string archive_naming_policy = CONSISTENT_NAMING; // default ret = _ctx.prop_map().get(ARCHIVE_NAMING_POLICY_KW, archive_naming_policy); // get plugin context property if(!ret.ok()) { - irods::log(PASS(ret)); + + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] " + << ret.result(); + rodsLog(LOG_ERROR, msg.str().c_str()); } boost::to_lower(archive_naming_policy); @@ -391,16 +397,16 @@ namespace irods_s3_archive { } ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key); - if((result = ASSERT_PASS(ret, "Failed to get S3 credential properties.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get S3 credential properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { // copy the file to the new location ret = s3CopyFile(_ctx, object->physical_path(), _new_file_name, key_id, access_key, s3GetProto(_ctx.prop_map()), s3GetSTSDate(_ctx.prop_map())); - if((result = ASSERT_PASS(ret, "Failed to copy file from: \"%s\" to \"%s\".", + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to copy file from: \"%s\" to \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), object->physical_path().c_str(), _new_file_name)).ok()) { // delete the old file ret = s3FileUnlinkPlugin(_ctx); - result = ASSERT_PASS(ret, "Failed to unlink old S3 file: \"%s\".", + result = ASSERT_PASS(ret, "[resource_name=%s] Failed to unlink old S3 file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), object->physical_path().c_str()); } } @@ -416,7 +422,7 @@ namespace irods_s3_archive { irods::error s3FileTruncatePlugin( irods::plugin_context& _ctx ) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileTruncatePlugin" ); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // s3FileTruncatePlugin @@ -424,14 +430,14 @@ namespace irods_s3_archive { irods::error s3FileGetFsFreeSpacePlugin( irods::plugin_context& _ctx ) { - return ERROR(SYS_NOT_SUPPORTED, "s3FileGetFsFreeSpacePlugin"); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // s3FileGetFsFreeSpacePlugin irods::error s3FileCopyPlugin( int mode, const char *srcFileName, const char *destFileName) { - return ERROR( SYS_NOT_SUPPORTED, "s3FileCopyPlugin" ); + return ERROR(SYS_NOT_SUPPORTED, __FUNCTION__); } @@ -448,7 +454,7 @@ namespace irods_s3_archive { // =-=-=-=-=-=-=- // check incoming parameters irods::error ret = s3CheckParams( _ctx ); - if((result = ASSERT_PASS(ret, "Invalid parameters or physical path.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid parameters or physical path.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { struct stat statbuf; std::string key_id; @@ -457,21 +463,21 @@ namespace irods_s3_archive { irods::file_object_ptr object = boost::dynamic_pointer_cast(_ctx.fco()); ret = s3FileStatPlugin(_ctx, &statbuf); - if((result = ASSERT_PASS(ret, "Failed stating the file: \"%s\".", + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed stating the file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), object->physical_path().c_str())).ok()) { - if((result = ASSERT_ERROR((statbuf.st_mode & S_IFREG) != 0, S3_FILE_STAT_ERR, "Error stating the file: \"%s\".", + if((result = ASSERT_ERROR((statbuf.st_mode & S_IFREG) != 0, S3_FILE_STAT_ERR, "[resource_name=%s] Error stating the file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), object->physical_path().c_str())).ok()) { if((result = ASSERT_ERROR(object->size() <= 0 || object->size() == static_cast(statbuf.st_size), SYS_COPY_LEN_ERR, - "Error for file: \"%s\" inp data size: %ld does not match stat size: %ld.", + "[resource_name=%s] Error for file: \"%s\" inp data size: %ld does not match stat size: %ld.", get_resource_name(_ctx.prop_map()).c_str(), object->physical_path().c_str(), object->size(), statbuf.st_size)).ok()) { ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key); - if((result = ASSERT_PASS(ret, "Failed to get S3 credential properties.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get S3 credential properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { ret = s3GetFile( _cache_file_name, object->physical_path(), statbuf.st_size, key_id, access_key, _ctx.prop_map()); - result = ASSERT_PASS(ret, "Failed to copy the S3 object: \"%s\" to the cache: \"%s\".", + result = ASSERT_PASS(ret, "[resource_name=%s] Failed to copy the S3 object: \"%s\" to the cache: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), object->physical_path().c_str(), _cache_file_name); } } @@ -493,7 +499,7 @@ namespace irods_s3_archive { // =-=-=-=-=-=-=- // check incoming parameters irods::error ret = s3CheckParams( _ctx ); - if((result = ASSERT_PASS(ret, "Invalid parameters or physical path.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid parameters or physical path.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { struct stat statbuf; int status; @@ -503,20 +509,23 @@ namespace irods_s3_archive { irods::file_object_ptr object = boost::dynamic_pointer_cast(_ctx.fco()); status = stat(_cache_file_name, &statbuf); int err_status = UNIX_FILE_STAT_ERR - errno; - if((result = ASSERT_ERROR(status >= 0, err_status, "Failed to stat cache file: \"%s\".", + if((result = ASSERT_ERROR(status >= 0, err_status, "[resource_name=%s] Failed to stat cache file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), _cache_file_name)).ok()) { - if((result = ASSERT_ERROR((statbuf.st_mode & S_IFREG) != 0, UNIX_FILE_STAT_ERR, "Cache file: \"%s\" is not a file.", + if((result = ASSERT_ERROR((statbuf.st_mode & S_IFREG) != 0, UNIX_FILE_STAT_ERR, "[resource_name=%s] Cache file: \"%s\" is not a file.", get_resource_name(_ctx.prop_map()).c_str(), _cache_file_name)).ok()) { ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key); - if((result = ASSERT_PASS(ret, "Failed to get S3 credential properties.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get S3 credential properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { // retrieve archive naming policy from resource plugin context std::string archive_naming_policy = CONSISTENT_NAMING; // default ret = _ctx.prop_map().get(ARCHIVE_NAMING_POLICY_KW, archive_naming_policy); // get plugin context property if(!ret.ok()) { - irods::log(ret); + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] " + << ret.result(); + rodsLog(LOG_ERROR, msg.str().c_str()); } boost::to_lower(archive_naming_policy); @@ -542,7 +551,7 @@ namespace irods_s3_archive { } ret = s3PutCopyFile(S3_PUTFILE, _cache_file_name, object->physical_path(), statbuf.st_size, key_id, access_key, _ctx.prop_map()); - result = ASSERT_PASS(ret, "Failed to copy the cache file: \"%s\" to the S3 object: \"%s\".", + result = ASSERT_PASS(ret, "[resource_name=%s] Failed to copy the cache file: \"%s\" to the S3 object: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), _cache_file_name, object->physical_path().c_str()); } @@ -550,7 +559,10 @@ namespace irods_s3_archive { } } if( !result.ok() ) { - irods::log( result ); + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] " + << result.result(); + rodsLog(LOG_ERROR, msg.str().c_str()); } return result; } // s3SyncToArchPlugin @@ -572,12 +584,12 @@ namespace irods_s3_archive { // =-=-=-=-=-=-=- // check the context validity ret = _ctx.valid< irods::file_object >(); - if((result = ASSERT_PASS(ret, "Invalid resource context.")).ok()) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid resource context.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { // =-=-=-=-=-=-=- // check incoming parameters if((result = ASSERT_ERROR(_opr && _curr_host && _out_parser && _out_vote, SYS_INVALID_INPUT_PARAM, - "One or more NULL pointer arguments.")).ok()) { + "[resource_name=%s] One or more NULL pointer arguments.", get_resource_name(_ctx.prop_map()).c_str())).ok()) { std::string resc_name; @@ -588,7 +600,7 @@ namespace irods_s3_archive { // =-=-=-=-=-=-=- // get the name of this resource ret = _ctx.prop_map().get< std::string >( irods::RESOURCE_NAME, resc_name ); - if((result = ASSERT_PASS(ret, "Failed to get resource name property.")).ok() ) { + if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get resource name property.", get_resource_name(_ctx.prop_map()).c_str())).ok() ) { // =-=-=-=-=-=-=- // add ourselves to the hierarchy parser by default @@ -612,7 +624,7 @@ namespace irods_s3_archive { result = s3RedirectCreate( _ctx.prop_map(), *file_obj, resc_name, (*_curr_host), (*_out_vote) ); } else { - result = ASSERT_ERROR(false, SYS_INVALID_INPUT_PARAM, "Unknown redirect operation: \"%s\".", + result = ASSERT_ERROR(false, SYS_INVALID_INPUT_PARAM, "[resource_name=%s] Unknown redirect operation: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), _opr->c_str()); } } diff --git a/s3/s3_cacheless_operations.cpp b/s3/s3_cacheless_operations.cpp index f8b16833..3ba25b4c 100644 --- a/s3/s3_cacheless_operations.cpp +++ b/s3/s3_cacheless_operations.cpp @@ -88,7 +88,10 @@ namespace irods_s3_cacheless { S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); } - return ret; + std::string error_str = boost::str(boost::format("[resource_name=%s] failed to read S3_ACCESS_KEY_ID.") + % get_resource_name(_prop_map).c_str()); + rodsLog(LOG_ERROR, error_str.c_str()); + return ERROR(S3_INIT_ERROR, error_str.c_str()); } ret = _prop_map.get< std::string >(s3_access_key, access_key); @@ -98,7 +101,10 @@ namespace irods_s3_cacheless { S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); } - return ret; + std::string error_str = boost::str(boost::format("[resource_name=%s] failed to read S3_SECRET_ACCESS_KEY.") + % get_resource_name(_prop_map).c_str()); + rodsLog(LOG_ERROR, error_str.c_str()); + return ERROR(S3_INIT_ERROR, error_str.c_str()); } // save keys @@ -109,6 +115,8 @@ namespace irods_s3_cacheless { s3fs_destroy_global_ssl(); } + std::string error_str = boost::str(boost::format("[resource_name=%s] failed to set internal data for access key/secret key.") + % get_resource_name(_prop_map).c_str()); rodsLog(LOG_ERROR, error_str.c_str()); return ERROR(S3_INIT_ERROR, error_str.c_str()); } @@ -121,6 +129,9 @@ namespace irods_s3_cacheless { S3fsCurl::DestroyS3fsCurl(); s3fs_destroy_global_ssl(); } + + std::string error_str = boost::str(boost::format("[resource_name=%s] S3_PROTO is not defined for resource.") + % get_resource_name(_prop_map).c_str()); rodsLog(LOG_ERROR, error_str.c_str()); return ERROR(S3_INIT_ERROR, error_str.c_str()); } @@ -226,15 +237,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::file_object_ptr fco = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() ); @@ -242,7 +250,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -254,8 +262,8 @@ namespace irods_s3_cacheless { result = create_file_object(key); StatCache::getStatCacheData()->DelStat(key.c_str()); if(result != 0){ - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Code is %d") - % __FILE__ % __LINE__ % __FUNCTION__ % result).str()); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Code is %d") + % get_resource_name(_ctx.prop_map()).c_str() % result)); } @@ -263,9 +271,9 @@ namespace irods_s3_cacheless { headers_t meta; get_object_attribute(key.c_str(), NULL, &meta, true, NULL, true); // no truncate cache if(NULL == (ent = FdManager::get()->Open(key.c_str(), &meta, 0, -1, false, true))){ - StatCache::getStatCacheData()->DelStat(key.c_str()); - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Code is EIO") - % __FILE__ % __LINE__ % __FUNCTION__)); + StatCache::getStatCacheData()->DelStat(key.c_str()); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] code is EIO") + % get_resource_name(_ctx.prop_map()).c_str())); } // create an iRODS file descriptor @@ -285,17 +293,14 @@ namespace irods_s3_cacheless { irods::error ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } // =-=-=-=-=-=-=- // check incoming parameters ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } bool needs_flush = false; @@ -305,7 +310,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -326,8 +331,8 @@ namespace irods_s3_cacheless { int returnVal = get_object_attribute(key.c_str(), &st, &meta, true, NULL, true); // no truncate cache if (0 != returnVal) { StatCache::getStatCacheData()->DelStat(key.c_str()); - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Failed to perform a stat of %s") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Failed to perform a stat of %s") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } if((unsigned int)flags & O_TRUNC){ @@ -342,8 +347,8 @@ namespace irods_s3_cacheless { StatCache::getStatCacheData()->DelStat(key.c_str()); // TODO create S3_OPEN_ERROR - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Error opening %s.") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Error opening %s.") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } if (needs_flush){ @@ -354,8 +359,8 @@ namespace irods_s3_cacheless { StatCache::getStatCacheData()->DelStat(key.c_str()); // TODO create S3_OPEN_ERROR - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Error opening %s.") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Error opening %s.") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } } @@ -381,15 +386,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::error result = SUCCESS(); @@ -399,7 +401,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -409,8 +411,8 @@ namespace irods_s3_cacheless { int irods_fd = fco->file_descriptor(); int fd; if (!(FileOffsetManager::get()->getFd(irods_fd, fd))) { - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Could not look up file descriptor [irods_fd=%d]") - % __FILE__ % __LINE__ % __FUNCTION__ % irods_fd)); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Could not look up file descriptor [irods_fd=%d]") + % get_resource_name(_ctx.prop_map()).c_str() % irods_fd)); } ssize_t readReturnVal; @@ -418,8 +420,8 @@ namespace irods_s3_cacheless { FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(key.c_str(), fd))) { S3FS_PRN_ERR("could not find opened fd(%d) for %s", fd, key.c_str()); - return ERROR(S3_GET_ERROR, (boost::format("%s:%d (%s) Could not find opened fd(%s) for %s") - % __FILE__ % __LINE__ % __FUNCTION__ % fd % key.c_str())); + return ERROR(S3_GET_ERROR, boost::str(boost::format("[resource_name=%s] Could not find opened fd(%d) for %s") + % get_resource_name(_ctx.prop_map()).c_str() % fd % key.c_str())); } if(ent->GetFd() != fd){ S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fd)); @@ -428,8 +430,8 @@ namespace irods_s3_cacheless { // read the offset from the cache off_t offset = 0; if (!(FileOffsetManager::get()->getOffset(irods_fd, offset))) { - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Could not read offset for read (%llu)") - % __FILE__ % __LINE__ % __FUNCTION__ % offset)); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Could not read offset for read (%llu)") + % get_resource_name(_ctx.prop_map()).c_str() % offset)); } S3FS_PRN_DBG("[path=%s][size=%zu][offset=%jd][fd=%llu]", key.c_str(), _len, (intmax_t)offset, (unsigned long long)(fd)); @@ -446,8 +448,8 @@ namespace irods_s3_cacheless { headers_t meta; int returnVal = get_object_attribute(key.c_str(), &st, &meta); if (0 != returnVal) { - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Failed to perform a stat of %s") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Failed to perform a stat of %s") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } if (offset >= st.st_size) { @@ -477,8 +479,8 @@ namespace irods_s3_cacheless { readReturnVal = ent->Read(static_cast(_buf), offset, _len, false); if(0 > readReturnVal){ S3FS_PRN_WARN("failed to read file(%s). result=%jd", key.c_str(), (intmax_t)readReturnVal); - return ERROR(S3_GET_ERROR, (boost::format("%s:%d (%s) failed to read file(%s)") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_GET_ERROR, boost::str(boost::format("[resource_name=%s] failed to read file(%s)") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } { @@ -514,15 +516,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::error result = SUCCESS(); @@ -532,7 +531,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -542,8 +541,8 @@ namespace irods_s3_cacheless { int irods_fd = fco->file_descriptor(); int fd; if (!(FileOffsetManager::get()->getFd(irods_fd, fd))) { - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Could not look up file descriptor") - % __FILE__ % __LINE__ % __FUNCTION__)); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Could not look up file descriptor") + % get_resource_name(_ctx.prop_map()).c_str())); } ssize_t retVal; @@ -553,8 +552,8 @@ namespace irods_s3_cacheless { FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(key.c_str(), static_cast(fd)))){ S3FS_PRN_ERR("could not find opened fd(%s)", key.c_str()); - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Could not find opened fd(%s)") - % __FILE__ % __LINE__ % __FUNCTION__ % fd)); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Could not find opened fd(%d)") + % get_resource_name(_ctx.prop_map()).c_str() % fd)); } if(ent->GetFd() != fd) { S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), (unsigned long long)(fd)); @@ -563,8 +562,8 @@ namespace irods_s3_cacheless { // read the offset from the cache off_t offset = 0; if (!(FileOffsetManager::get()->getOffset(irods_fd, offset))) { - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Could not read offset for write (%llu)") - % __FILE__ % __LINE__ % __FUNCTION__ % offset)); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Could not read offset for write (%llu)") + % get_resource_name(_ctx.prop_map()).c_str() % offset)); } S3FS_PRN_DBG("[offset=%llu]", offset); @@ -587,8 +586,7 @@ namespace irods_s3_cacheless { result = set_s3_configuration_from_context(_ctx.prop_map()); if (!result.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % result.result().c_str())); + return PASS(result); } irods::file_object_ptr fco = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() ); @@ -597,7 +595,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - result = parseS3Path(path, bucket, key); + result = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!result.ok()) { return PASS(result); } @@ -633,15 +631,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::file_object_ptr fco = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() ); @@ -649,10 +644,11 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } + strncpy(::bucket, bucket.c_str(), MAX_NAME_LEN-1); key = "/" + key; @@ -665,8 +661,8 @@ namespace irods_s3_cacheless { S3FS_MALLOCTRIM(0); if (result < 0) { - return ERROR(S3_FILE_UNLINK_ERR, (boost::format("%s:%d (%s) Could not unlink file %s") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_UNLINK_ERR, boost::str(boost::format("[resource_name=%s] Could not unlink file %s") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } return SUCCESS(); @@ -683,15 +679,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::file_object_ptr fco = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() ); @@ -699,7 +692,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -709,8 +702,8 @@ namespace irods_s3_cacheless { int returnVal; returnVal = get_object_attribute(key.c_str(), _statbuf); if (0 != returnVal) { - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Failed to perform a stat of %s") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Failed to perform a stat of %s") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } // If has already opened fd, the st_size should be instead. @@ -752,9 +745,7 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } // TODO create S3_FILE_SEEK_ERR @@ -764,7 +755,7 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -779,15 +770,15 @@ namespace irods_s3_cacheless { int irods_fd = fco->file_descriptor(); int fd; if (!(FileOffsetManager::get()->getFd(irods_fd, fd))) { - return ERROR(S3_PUT_ERROR, (boost::format("%s:%d (%s) Could not look up file descriptor") - % __FILE__ % __LINE__ % __FUNCTION__)); + return ERROR(S3_PUT_ERROR, boost::str(boost::format("[resource_name=%s] Could not look up file descriptor") + % get_resource_name(_ctx.prop_map()).c_str())); } FdEntity* ent; if(NULL == (ent = FdManager::get()->ExistOpen(key.c_str(), static_cast(fd)))){ S3FS_PRN_ERR("could not find opened fd(%s)", key.c_str()); - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Could not find opened fd(%d)") - % __FILE__ % __LINE__ % __FUNCTION__ % fd)); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Could not find opened fd(%d)") + % get_resource_name(_ctx.prop_map()).c_str() % fd)); } if(ent->GetFd() != fd) { S3FS_PRN_WARN("different fd(%d - %llu)", ent->GetFd(), fd); @@ -812,8 +803,8 @@ namespace irods_s3_cacheless { headers_t meta; int returnVal = get_object_attribute(key.c_str(), &st, &meta, true, NULL, true); // no truncate cache if (0 != returnVal) { - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Failed to perform a stat of %s") - % __FILE__ % __LINE__ % __FUNCTION__ % key.c_str())); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Failed to perform a stat of %s") + % get_resource_name(_ctx.prop_map()).c_str() % key.c_str())); } FileOffsetManager::get()->setOffset(irods_fd, st.st_size + _offset); @@ -822,8 +813,8 @@ namespace irods_s3_cacheless { } default: S3FS_PRN_ERR("invalid whence argument (%d) on lseek for object (%s)", _whence, key.c_str()); - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Setting seek failed (%lld)") - % __FILE__ % __LINE__ % __FUNCTION__ % _offset)); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Setting seek failed (%lld)") + % get_resource_name(_ctx.prop_map()).c_str() % _offset)); } // read the new offset and set in ret.code @@ -870,15 +861,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if (!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::collection_object_ptr fco = boost::dynamic_pointer_cast< irods::collection_object >( _ctx.fco() ); @@ -886,10 +874,11 @@ namespace irods_s3_cacheless { std::string bucket; std::string key; - ret = parseS3Path(path, bucket, key); + ret = parseS3Path(path, bucket, key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } + strncpy(::bucket, bucket.c_str(), MAX_NAME_LEN-1); key = "/" + key; @@ -904,8 +893,8 @@ namespace irods_s3_cacheless { // get a list of all the objects if ((result = list_bucket(key.c_str(), head, "/")) != 0) { - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) list_bucket returns error(%d).") - % __FILE__ % __LINE__ % __FUNCTION__ % result)); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] list_bucket returns error(%d).") + % get_resource_name(_ctx.prop_map()).c_str() % result)); } if (head.IsEmpty()) { @@ -934,8 +923,8 @@ namespace irods_s3_cacheless { struct stat st; headers_t meta; if (0 != (result = get_object_attribute(object_key.c_str(), &st, &meta, true, NULL, true))) { - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) get_object_attribute on %s returns error(%d).") - % __FILE__ % __LINE__ % __FUNCTION__ % object_key.c_str() % result)); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] get_object_attribute on %s returns error(%d).") + % get_resource_name(_ctx.prop_map()).c_str() % object_key.c_str() % result)); } *_dirent_ptr = ( rodsDirent_t* ) malloc( sizeof( rodsDirent_t ) ); boost::filesystem::path p(object_key.c_str()); @@ -957,15 +946,12 @@ namespace irods_s3_cacheless { // check incoming parameters irods::error ret = s3CheckParams( _ctx ); if(!ret.ok()) { - std::stringstream msg; - msg << __FUNCTION__ << " - Invalid parameters or physical path."; - return PASSMSG(msg.str(), ret); + return PASS(ret); } ret = set_s3_configuration_from_context(_ctx.prop_map()); if (!ret.ok()) { - return ERROR(S3_INIT_ERROR, (boost::format("%s:%d (%s) init cacheless mode returned error %s") - % __FILE__ % __LINE__ % __FUNCTION__ % ret.result().c_str())); + return PASS(ret); } irods::file_object_ptr fco = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() ); @@ -973,15 +959,16 @@ namespace irods_s3_cacheless { std::string bucket; std::string from_key; - ret = parseS3Path(from, bucket, from_key); + ret = parseS3Path(from, bucket, from_key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } + strncpy(::bucket, bucket.c_str(), MAX_NAME_LEN-1); from_key = "/" + from_key; std::string new_file_key; - ret = parseS3Path(_new_file_name, bucket, new_file_key); + ret = parseS3Path(_new_file_name, bucket, new_file_key, _ctx.prop_map()); if(!ret.ok()) { return PASS(ret); } @@ -996,8 +983,8 @@ namespace irods_s3_cacheless { ret = s3FileStatPlugin(_ctx, &buf); if(!ret.ok()) { - return ERROR(S3_FILE_STAT_ERR, (boost::format("%s:%d (%s) Failed to stat file (%s) during move to (%s)") - % __FILE__ % __LINE__ % __FUNCTION__ % from.c_str(), _new_file_name)); + return ERROR(S3_FILE_STAT_ERR, boost::str(boost::format("[resource_name=%s] Failed to stat file (%s) during move to (%s)") + % get_resource_name(_ctx.prop_map()).c_str() % from.c_str() % _new_file_name)); } // files larger than 5GB must be modified via the multipart interface @@ -1013,8 +1000,8 @@ namespace irods_s3_cacheless { S3FS_MALLOCTRIM(0); if (result != 0) { - return ERROR(S3_FILE_COPY_ERR, (boost::format("%s:%d (%s) Failed to rename file from (%s) to (%s) result = %d") - % __FILE__ % __LINE__ % __FUNCTION__ % from.c_str() % _new_file_name % result)); + return ERROR(S3_FILE_COPY_ERR, boost::str(boost::format("[resource_name=%s] Failed to rename file from (%s) to (%s) result = %d") + % get_resource_name(_ctx.prop_map()).c_str() % from.c_str() % _new_file_name % result)); } // issue 1855 (irods issue 4326) - resources must now set physical path @@ -1048,8 +1035,7 @@ namespace irods_s3_cacheless { irods::plugin_context& _ctx, const char* _cache_file_name ) { - std::cerr << irods::stacktrace().dump(); - return ERROR(SYS_NOT_SUPPORTED, "s3StageToCachePlugin"); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- @@ -1060,8 +1046,7 @@ namespace irods_s3_cacheless { irods::plugin_context& _ctx, const char* _cache_file_name ) { - std::cerr << irods::stacktrace().dump(); - return ERROR(SYS_NOT_SUPPORTED, "s3StageToCachePlugin"); + return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__)); } // =-=-=-=-=-=-=- @@ -1083,7 +1068,7 @@ namespace irods_s3_cacheless { // determine if the resource is down int resc_status = 0; irods::error get_ret = _prop_map.get< int >( irods::RESOURCE_STATUS, resc_status ); - if ( ( result = ASSERT_PASS( get_ret, "Failed to get \"status\" property." ) ).ok() ) { + if ( ( result = ASSERT_PASS( get_ret, boost::str(boost::format("[resource_name=%s] Failed to get \"status\" property.") % _resc_name.c_str() ) ) ).ok() ) { // =-=-=-=-=-=-=- // if the status is down, vote no. @@ -1093,7 +1078,7 @@ namespace irods_s3_cacheless { // get the resource host for comparison to curr host std::string host_name; get_ret = _prop_map.get< std::string >( irods::RESOURCE_LOCATION, host_name ); - if ( ( result = ASSERT_PASS( get_ret, "Failed to get \"location\" property." ) ).ok() ) { + if ( ( result = ASSERT_PASS( get_ret, boost::str(boost::format("[resource_name=%s] Failed to get \"location\" property.") % _resc_name.c_str() ) ) ).ok() ) { // =-=-=-=-=-=-=- // set a flag to test if were at the curr host, if so we vote higher @@ -1182,7 +1167,9 @@ namespace irods_s3_cacheless { } else { result.code( SYS_RESC_IS_DOWN ); - result = PASS( result ); + std::stringstream msg; + msg << "[resource_name=" << get_resource_name(_prop_map) << "] resource is down"; + return PASSMSG(msg.str(), result); } } @@ -1206,12 +1193,12 @@ namespace irods_s3_cacheless { // =-=-=-=-=-=-=- // check the context validity ret = _ctx.valid< irods::file_object >(); - if((result = ASSERT_PASS(ret, "Invalid resource context.")).ok()) { + if ( ( result = ASSERT_PASS( ret, "[resource_name=%s] Invalid resource context.", get_resource_name(_ctx.prop_map()).c_str() ) ).ok() ) { // =-=-=-=-=-=-=- // check incoming parameters - if((result = ASSERT_ERROR(_opr && _curr_host && _out_parser && _out_vote, SYS_INVALID_INPUT_PARAM, - "One or more NULL pointer arguments.")).ok()) { + if( ( result = ASSERT_ERROR( _opr && _curr_host && _out_parser && _out_vote, SYS_INVALID_INPUT_PARAM, + "[resource_name=%s] One or more NULL pointer arguments.", get_resource_name(_ctx.prop_map()).c_str() ) ).ok() ) { std::string resc_name; @@ -1238,13 +1225,13 @@ namespace irods_s3_cacheless { ret = _ctx.prop_map().get( irods::RESOURCE_ID, resc_id ); if ( !ret.ok() ) { - std::string msg("get_property in s3RedirectPlugin failed to get irods::RESOURCE _ID"); + std::string msg = boost::str(boost::format("[resource_name=%s] get_property in s3RedirectPlugin failed to get irods::RESOURCE _ID") % resc_name.c_str() ); return PASSMSG( msg, ret ); } ret = irods::get_resource_property< rodsServerHost_t* >( resc_id, irods::RESOURCE_HOST, host ); if ( !ret.ok() ) { - std::string msg("get_resource_property in s3RedirectPlugin for detached mode failed"); + std::string msg = boost::str(boost::format("[resource_name=%s] get_resource_property (irods::RESOURCE_HOST) in s3RedirectPlugin for detached mode failed") % resc_name.c_str() ); return PASSMSG( msg, ret ); } @@ -1256,7 +1243,7 @@ namespace irods_s3_cacheless { ret = irods::set_resource_property< rodsServerHost_t* >( resc_name, irods::RESOURCE_HOST, host ); if ( !ret.ok() ) { - std::string msg("set_resource_property in s3RedirectPlugin for detached mode failed"); + std::string msg = boost::str(boost::format("[resource_name=%s] set_resource_property (irods::RESOURCE_HOST) in s3RedirectPlugin for detached mode failed") % resc_name.c_str() ); return PASSMSG( msg, ret ); } @@ -1286,8 +1273,8 @@ namespace irods_s3_cacheless { result = s3RedirectCreate( _ctx.prop_map(), *file_obj, resc_name, (*_curr_host), (*_out_vote) ); } else { - result = ASSERT_ERROR(false, SYS_INVALID_INPUT_PARAM, "Unknown redirect operation: \"%s\".", - _opr->c_str()); + result = ASSERT_ERROR(false, SYS_INVALID_INPUT_PARAM, + "[resource_name=%s] Unknown redirect operation: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(), _opr->c_str() ); } } }