filesafe/controllers/multerStorageController.js
Bobby Wibowo 62a977542e
Switched to BLAKE3 for file hashing [MORE]
UPDATE YOUR CONFIG FILE IF YOU USE CHUNKED UPLOADS!
Read more about this at the end.

Added new dependency: blake3

Hashes will be created as the uploads are being written to disk.
With exception for chunked uploads!
For them specifically, their hashes will be created as they're being
rebuilt into a single file.
Should still be a lot better than the previous case where it had to
re-read the already written files.

To support that feature, added a new file
controllers/multerStorageController.js.
It's just a custom storage engine for Multer.

chunkSize option now allows setting max chunk size from config file.
Previously it was hardcoded to 95MB, but assuming you have paid
Cloudflare plans, you can actually have up to 500MB.

Also moved the option to be after maxSize and before urlMaxSize.
Made a lot more sense to me this way, as chunked uploads only work on
regular uploads.

Updated v1 version string and rebuilt client assets.
2020-05-29 02:52:58 +07:00

80 lines
2.0 KiB
JavaScript

const fs = require('fs')
const os = require('os')
const path = require('path')
const crypto = require('crypto')
const blake3 = require('blake3')
const mkdirp = require('mkdirp')
function getFilename (req, file, cb) {
// This won't be used since we use our own filename function.
crypto.randomBytes(16, function (err, raw) {
cb(err, err ? undefined : raw.toString('hex'))
})
}
function getDestination (req, file, cb) {
cb(null, os.tmpdir())
}
function DiskStorage (opts) {
this.getFilename = (opts.filename || getFilename)
if (typeof opts.destination === 'string') {
mkdirp.sync(opts.destination)
this.getDestination = function ($0, $1, cb) { cb(null, opts.destination) }
} else {
this.getDestination = (opts.destination || getDestination)
}
}
DiskStorage.prototype._handleFile = function _handleFile (req, file, cb) {
const that = this
that.getDestination(req, file, function (err, destination) {
if (err) return cb(err)
that.getFilename(req, file, function (err, filename) {
if (err) return cb(err)
const finalPath = path.join(destination, filename)
const outStream = fs.createWriteStream(finalPath)
file.stream.pipe(outStream)
let hash = null
if (!file._ischunk) {
hash = blake3.createHash()
file.stream.on('data', d => hash.update(d))
file.stream.on('error', err => {
hash.dispose()
return cb(err)
})
}
outStream.on('error', cb)
outStream.on('finish', function () {
cb(null, {
destination,
filename,
path: finalPath,
size: outStream.bytesWritten,
hash: hash && hash.digest('hex')
})
})
})
})
}
DiskStorage.prototype._removeFile = function _removeFile (req, file, cb) {
const path = file.path
delete file.destination
delete file.filename
delete file.path
fs.unlink(path, cb)
}
module.exports = function (opts) {
return new DiskStorage(opts)
}