I had the same requirement ... stream files from Amazon S3, zip them on the fly (in memory) and deliver them to the browser via node.js. My solution included using knox and archiver packages and log archive bytes into the result stream.
Since this is on the fly, you wonโt know the archive size you received, and therefore you wonโt be able to use the โContent-Lengthโ HTTP header. Instead, you will have to use the heading "Transfer-Encoding: chunked".
The disadvantage of "chunked" is that you will not get a progress bar to download. I tried setting the Content-Length header to an approximate value, but this only works for Chrome and Firefox; IE corrupts the file; not tested with Safari.
var http = require("http"); var knox = require("knox"); var archiver = require('archiver'); http.createServer(options, function(req, res) { var zippedFilename = 'test.zip'; var archive = archiver('zip'); var header = { "Content-Type": "application/x-zip", "Pragma": "public", "Expires": "0", "Cache-Control": "private, must-revalidate, post-check=0, pre-check=0", "Content-disposition": 'attachment; filename="' + zippedFilename + '"', "Transfer-Encoding": "chunked", "Content-Transfer-Encoding": "binary" }; res.writeHead(200, header); archive.store = true;
Michael gorham
source share