You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Describe the bug
When a bunch (north of 5000) queries have run inside the job, the job appears to run out of memory and refuses to create new connections until restarted. Temporary storage of the job also keeps increasing throughout the day. I can only suspect a memory leak.
To Reproduce
Steps to reproduce the behavior:
I have a back-end server implemented with Express.js that runs the entire day and is restarted at night. Normally it works fine, but a couple of days a month, everybody in the company uses the tool it powers at the same time, and it can receive 10 hits per second. During those busy days, it looks like the job eventually runs out of temporary storage. This appears to be caused by the statements (or the connections?) allocating memory and never deallocating? If this is caused by poor coding from my side, I would like to apologize for wasting your time, I usually stick to RPGLE :-).
I have also included a function that uses idb-pconnector, as there the problem seems to be much worse. This could provide useful clues when figuring out what happens I guess.
const{ dbconn, dbstmt,NUMERIC,CHAR,IN,NULL,SQL_ATTR_DBC_SYS_NAMING,SQL_TRUE}=require('idb-connector');const{ DBPool, Statement }=require('idb-pconnector');constconfig=require('./config.json');constpool=newDBPool({url: '*LOCAL',});asyncfunctionexecuteStatement(sqlStatement,bindParams,skipFetch=false,skipSetLibList=false){constconnection=newdbconn();connection.conn('*LOCAL');if(!skipSetLibList)awaitsetLibraryList(connection,config.db2LibList);conststatement=newdbstmt(connection);returnnewPromise((resolve,reject)=>{try{statement.exec('SET PATH = *LIBL',(out,error)=>{if(error){reject(error);return;}if(bindParams===null){statement.exec(sqlStatement,(out,error)=>{if(error){reject(error);return;}resolve(out);statement.close();connection.disconn();connection.close();});}else{letbindings=[];for(variinbindParams){switch(typeofbindParams[i]){case'number':
bindings.push([bindParams[i],IN,NUMERIC]);break;case'string':
bindings.push([bindParams[i],IN,CHAR]);break;case'object':
if(bindParams[i]===null)bindings.push([null,IN,NULL]);elsebindings.push([bindParams[i],IN,CHAR]);break;}}statement.prepare(sqlStatement,(error)=>{if(error){reject(error);return;}statement.bindParameters(bindings,(error)=>{if(error){reject(error);return;}statement.execute((out,error)=>{if(error){reject(error);return;}if(!skipFetch){statement.fetchAll((out,error)=>{if(error){reject(error);return;}resolve(out);statement.close();connection.disconn();connection.close();});}else{resolve(out);statement.close();connection.disconn();connection.close();}});});});}});}catch(error){reject(error);}});}asyncfunctionsetLibraryList(conn,list){awaitsetConnAttr(conn,SQL_ATTR_DBC_SYS_NAMING,SQL_TRUE);returnnewPromise((resolve,reject)=>{try{letchangeLibStmt=newdbstmt(conn),qcmdexc='CALL QSYS2.QCMDEXC(?)',// you can set multiple libschangeLibParam=`CHGLIBL LIBL(${list.join(' ')})`;changeLibStmt.prepare(qcmdexc,(error)=>{if(error){reject(error);return;}changeLibStmt.bindParam([[changeLibParam,IN,CHAR]],(error)=>{if(error){reject(error);return;}changeLibStmt.execute((out,error)=>{if(error){reject(error);return;}changeLibStmt.close();resolve(out);});});});}catch(error){reject(error);}});}asyncfunctionsetConnAttr(conn,attribute,value){returnnewPromise((resolve,reject)=>{try{resolve(conn.setConnAttr(attribute,value));}catch(error){reject(error);}});}asyncfunctionexecuteStatementConnectorP(sqlStatement,bindParams,skipFetch=false,skipSetLibList=false){constconn=pool.attach();if(!skipSetLibList)awaitconn.connection.setLibraryList(config.db2LibList);varresults;letstatement=newStatement(conn.connection);statement.stmt.asNumber(true);awaitstatement.exec('SET PATH = *LIBL');//TIMESTAMP_ISO8601 UDF in SYSACCPGMif(bindParams===null){results=awaitstatement.exec(sqlStatement);}else{awaitstatement.prepare(sqlStatement);letbindings=[];for(variinbindParams){switch(typeofbindParams[i]){case'number':
bindings.push([bindParams[i],IN,NUMERIC]);break;case'string':
bindings.push([bindParams[i],IN,CHAR]);break;case'object':
if(bindParams[i]===null)bindings.push([null,IN,NULL]);elsebindings.push([bindParams[i],IN,CHAR]);break;}}awaitstatement.bindParam(bindings);awaitstatement.execute();if(!skipFetch)results=awaitstatement.fetchAll();}awaitstatement.close();pool.detach(conn);returnresults;}asyncfunctiongetStories(){constsSql=` SELECT rTrim(JIRAISSUE) "jiraIssue", rTrim(JIRAISSUEID) "jiraIssueId", rTrim(JIRAASSIGNEEUSER) "jiraAssigneeUser", JIRAISSUEDESCRIPTION "jiraIssueDescription", JIRAISSUESYSTEM "jiraIssueSystem", rTrim(JIRAISSUEVERSION) "jiraIssueVersion", DEPLOYINSTRUCTIONS "deployInstructions", rTrim(DEPLOYINSTRUCTIONSRESPONSIBLE) "deployInstructionsResponsible", rTrim(DEPLOYINSTRUCTIONSRESPONSIBLEBACKUP) "deployInstructionsResponsibleBackup", DEPLOYINSTRUCTIONSDONE "deployInstructionsDone", STATUS "status", TIMESTAMP_ISO8601(timestamp(UPDATEDATE, UPDATETIME)) "updateTimestamp", rTrim(UPDATEUSER) "updateUser" FROM sysjse order by JIRAISSUEID`;constresultSet=awaitexecuteStatement(sSql);//const resultSet = await executeStatementConnectorP(sSql);returnresultSet;}for(letstep=0;step<1000;step++){getStories().then((a)=>{console.log(step+' OK ');}).catch((e)=>{console.error(step+' ERROR '+e);});}//enter to exitprocess.stdin.setRawMode(true);process.stdin.resume();process.stdin.on('data',process.exit.bind(process,0));
When ran using idb-connector [await executeStatement(sSql)]:
When ran using idb-pconnector [await executeStatementConnectorP(sSql)]:
My implementation with pconnector uses about 30% more temp storage after completing 1000 statements and runs slightly slower.
When a node job reaches 1200 - 1400 MB of temp storage, it tends to crash on our machine.
Is this caused by poor coding on my side, or is this some sort of memory leak?
Expected behavior
All memory cleaned up after completing a query.
Revisiting this, I notice I didn't make very clear when "SQLSTATE=PAERR SQLCODE=8012" occurs. When the temp storage reaches 1200 - 1400 MB, usually the job becomes unresponsive, but other times when a query (usually 'SET PATH = *LIBL'") is executed on the first statement after new dbconn() "SQLSTATE=PAERR SQLCODE=8012" occurs. After that point, every query will fail with this error until the job is restarted.
Describe the bug
When a bunch (north of 5000) queries have run inside the job, the job appears to run out of memory and refuses to create new connections until restarted. Temporary storage of the job also keeps increasing throughout the day. I can only suspect a memory leak.
To Reproduce
Steps to reproduce the behavior:
I have a back-end server implemented with Express.js that runs the entire day and is restarted at night. Normally it works fine, but a couple of days a month, everybody in the company uses the tool it powers at the same time, and it can receive 10 hits per second. During those busy days, it looks like the job eventually runs out of temporary storage. This appears to be caused by the statements (or the connections?) allocating memory and never deallocating? If this is caused by poor coding from my side, I would like to apologize for wasting your time, I usually stick to RPGLE :-).
I have also included a function that uses idb-pconnector, as there the problem seems to be much worse. This could provide useful clues when figuring out what happens I guess.
When ran using idb-connector [await executeStatement(sSql)]:
When ran using idb-pconnector [await executeStatementConnectorP(sSql)]:
My implementation with pconnector uses about 30% more temp storage after completing 1000 statements and runs slightly slower.
When a node job reaches 1200 - 1400 MB of temp storage, it tends to crash on our machine.
Is this caused by poor coding on my side, or is this some sort of memory leak?
Expected behavior
All memory cleaned up after completing a query.
Screenshots
See above.
+-- idb-pconnector@1.0.8
|
-- idb-connector@1.2.10 deduped
-- itoolkit@1.0.0`-- idb-connector@1.2.10 deduped
Thank you for your time 👍
The text was updated successfully, but these errors were encountered: