Added timeout on chunked uploads

The service will now automatically clean up any leftover data from any
inactive & unfinished chunked upload attempts.

Updated config.sample.js for updated description and new sub-option
for chunkSize option.
This commit is contained in:
Bobby Wibowo 2020-06-15 23:48:43 +07:00
parent b4c8b1d90e
commit 585331c6e5
No known key found for this signature in database
GPG Key ID: 51C3A1E1E22D26CF
2 changed files with 45 additions and 10 deletions

View File

@ -208,23 +208,32 @@ module.exports = {
/* /*
Chunk size for chunked uploads. Needs to be in MB. Chunk size for chunked uploads. Needs to be in MB.
If this is enabled, every files uploaded from the homepage uploader If this is enabled, every files uploaded from the homepage uploader
will forcibly be chunked by the size specified in "default". will forcibly be chunked by the size specified in "default".
Users can configure the chunk size they want from the homepage uploader, Users can configure the chunk size they want from the homepage uploader,
but you can force allowed max size of each chunk with "max". but you can force allowed max size of each chunk with "max".
Min size will always be 1MB. Min size will always be 1MB.
Users will still be able to upload bigger files with the API Users will still be able to upload bigger files with the API
as long as they don't surpass the limit specified in the "maxSize" option above. as long as they don't surpass the limit specified in the "maxSize" option above.
Once all chunks have been uploads, their total size Once all chunks have been uploads, their total size
will be tested against the "maxSize" option again. will be tested against the "maxSize" option again.
With "timeout", you can specify how long a particular chunked upload attempt
can remain inactive before their temporary data gets cleared out
(partially uploaded files or other internal data).
This option is mainly useful for hosters that use Cloudflare, This option is mainly useful for hosters that use Cloudflare,
since Cloudflare limits upload size to 100MB on their Free plan. since Cloudflare limits upload size to 100MB on their Free plan.
https://support.cloudflare.com/hc/en-us/articles/200172516#h_51422705-42d0-450d-8eb1-5321dcadb5bc https://support.cloudflare.com/hc/en-us/articles/200172516#h_51422705-42d0-450d-8eb1-5321dcadb5bc
NOTE: Set to falsy value to disable chunked uploads.
NOTE: Set "default" or the option itself to falsy value to disable chunked uploads.
*/ */
chunkSize: { chunkSize: {
max: '95MB', max: '95MB',
default: '25MB' default: '25MB',
timeout: 30 * 60 * 1000 // 30 minutes
}, },
/* /*

View File

@ -29,6 +29,7 @@ const maxFilesPerUpload = 20
const chunkedUploads = config.uploads.chunkSize && const chunkedUploads = config.uploads.chunkSize &&
typeof config.uploads.chunkSize === 'object' && typeof config.uploads.chunkSize === 'object' &&
config.uploads.chunkSize.default config.uploads.chunkSize.default
const chunkedUploadsTimeout = config.uploads.chunkSize.timeout || 1800000
const chunksData = {} const chunksData = {}
// Hard-coded min chunk size of 1 MB (e.g. 50 MB = max 50 chunks) // Hard-coded min chunk size of 1 MB (e.g. 50 MB = max 50 chunks)
const maxChunksCount = maxSize const maxChunksCount = maxSize
@ -40,6 +41,35 @@ const urlExtensionsFilter = Array.isArray(config.uploads.urlExtensionsFilter) &&
const temporaryUploads = Array.isArray(config.uploads.temporaryUploadAges) && const temporaryUploads = Array.isArray(config.uploads.temporaryUploadAges) &&
config.uploads.temporaryUploadAges.length config.uploads.temporaryUploadAges.length
class ChunksData {
constructor (uuid, root) {
this.uuid = uuid
this.root = root
this.filename = 'tmp'
this.chunks = 0
this.stream = null
this.hasher = null
}
onTimeout () {
if (this.stream)
this.stream.end()
if (this.hasher)
this.hasher.dispose()
self.cleanUpChunks(this.uuid, true)
}
setTimeout (delay) {
this.clearTimeout()
this._timeout = setTimeout(this.onTimeout.bind(this), delay)
}
clearTimeout () {
if (this._timeout)
clearTimeout(this._timeout)
}
}
const initChunks = async uuid => { const initChunks = async uuid => {
if (chunksData[uuid] === undefined) { if (chunksData[uuid] === undefined) {
const root = path.join(paths.chunks, uuid) const root = path.join(paths.chunks, uuid)
@ -51,14 +81,9 @@ const initChunks = async uuid => {
throw err throw err
await paths.mkdir(root) await paths.mkdir(root)
} }
chunksData[uuid] = { chunksData[uuid] = new ChunksData(uuid, root)
root,
filename: 'tmp',
chunks: 0,
stream: null,
hasher: null
}
} }
chunksData[uuid].setTimeout(chunkedUploadsTimeout)
return chunksData[uuid] return chunksData[uuid]
} }
@ -539,7 +564,7 @@ self.actuallyFinishChunks = async (req, res, user) => {
} }
} }
self.cleanUpChunks = async (uuid) => { self.cleanUpChunks = async (uuid, onTimeout) => {
// Remove tmp file // Remove tmp file
await paths.unlink(path.join(chunksData[uuid].root, chunksData[uuid].filename)) await paths.unlink(path.join(chunksData[uuid].root, chunksData[uuid].filename))
.catch(error => { .catch(error => {
@ -549,6 +574,7 @@ self.cleanUpChunks = async (uuid) => {
// Remove UUID dir // Remove UUID dir
await paths.rmdir(chunksData[uuid].root) await paths.rmdir(chunksData[uuid].root)
// Delete cached chunks data // Delete cached chunks data
if (!onTimeout) chunksData[uuid].clearTimeout()
delete chunksData[uuid] delete chunksData[uuid]
} }