Adding support for upload chunked encoding.

This commit is contained in:
ali.ibrahim 2016-09-27 12:33:51 +02:00
parent 9498a5738a
commit cbbc4376ca
5 changed files with 95 additions and 24 deletions

View file

@ -105,10 +105,10 @@ mate::WrappableBase* URLRequest::New(mate::Arguments* args) {
v8::Local<v8::Object> options;
args->GetNext(&options);
mate::Dictionary dict(args->isolate(), options);
std::string url;
dict.Get("url", &url);
std::string method;
dict.Get("method", &method);
std::string url;
dict.Get("url", &url);
std::string session_name;
dict.Get("session", &session_name);
@ -135,7 +135,7 @@ void URLRequest::BuildPrototype(v8::Isolate* isolate,
mate::ObjectTemplateBuilder(isolate, prototype->PrototypeTemplate())
// Request API
.MakeDestroyable()
.SetMethod("writeBuffer", &URLRequest::WriteBuffer)
.SetMethod("writeBuffer", &URLRequest::Write)
.SetMethod("abort", &URLRequest::Abort)
.SetMethod("setExtraHeader", &URLRequest::SetExtraHeader)
.SetMethod("removeExtraHeader", &URLRequest::RemoveExtraHeader)
@ -150,10 +150,10 @@ void URLRequest::BuildPrototype(v8::Isolate* isolate,
}
bool URLRequest::WriteBuffer(
bool URLRequest::Write(
scoped_refptr<const net::IOBufferWithSize> buffer,
bool is_last) {
return atom_request_->WriteBuffer(buffer, is_last);
return atom_request_->Write(buffer, is_last);
}

View file

@ -32,7 +32,7 @@ class URLRequest : public mate::EventEmitter<URLRequest> {
private:
bool WriteBuffer(scoped_refptr<const net::IOBufferWithSize> buffer,
bool Write(scoped_refptr<const net::IOBufferWithSize> buffer,
bool is_last);
void Abort();
bool SetExtraHeader(const std::string& name, const std::string& value);

View file

@ -86,7 +86,7 @@ scoped_refptr<AtomURLRequest> AtomURLRequest::Create(
}
bool AtomURLRequest::WriteBuffer(
bool AtomURLRequest::Write(
scoped_refptr<const net::IOBufferWithSize> buffer,
bool is_last) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);

View file

@ -34,7 +34,7 @@ public:
const std::string& url,
base::WeakPtr<api::URLRequest> delegate);
bool WriteBuffer(scoped_refptr<const net::IOBufferWithSize> buffer,
bool Write(scoped_refptr<const net::IOBufferWithSize> buffer,
bool is_last);
void SetChunkedUpload(bool is_chunked_upload);
void Abort() const;

View file

@ -1,6 +1,7 @@
'use strict'
const {EventEmitter} = require('events')
const util = require('util')
const binding = process.atomBinding('net')
const {net, Net} = binding
const {URLRequest} = net
@ -8,6 +9,10 @@ const {URLRequest} = net
Object.setPrototypeOf(Net.prototype, EventEmitter.prototype)
Object.setPrototypeOf(URLRequest.prototype, EventEmitter.prototype)
let kSupportedProtocols = new Set();
kSupportedProtocols.add('http:');
kSupportedProtocols.add('https:');
class IncomingMessage extends EventEmitter {
constructor(url_request) {
super();
@ -57,6 +62,74 @@ class ClientRequest extends EventEmitter {
constructor(options, callback) {
super();
if (typeof options === 'string') {
options = url.parse(options);
} else {
options = util._extend({}, options);
}
const method = (options.method || 'GET').toUpperCase();
let url_str = options.url;
if (!url_str) {
let url_obj = {};
const protocol = options.protocol || 'http';
if (!kSupportedProtocols.has(protocol)) {
throw new Error('Protocol "' + protocol + '" not supported. ');
}
url_obj.protocol = protocol;
if (options.host) {
url_obj.host = options.host;
} else {
if (options.hostname) {
url_obj.hostname = options.hostname;
} else {
url_obj.hostname = 'localhost';
}
if (options.port) {
url_obj.port = options.port;
}
}
const path = options.path || '/';
if (options.path && / /.test(options.path)) {
// The actual regex is more like /[^A-Za-z0-9\-._~!$&'()*+,;=/:@]/
// with an additional rule for ignoring percentage-escaped characters
// but that's a) hard to capture in a regular expression that performs
// well, and b) possibly too restrictive for real-world usage. That's
// why it only scans for spaces because those are guaranteed to create
// an invalid request.
throw new TypeError('Request path contains unescaped characters.');
}
url_obj.path = path;
url_str = url.format(url_obj);
}
const session_name = options.session || '';
let url_request = new URLRequest({
method: method,
url: url_str,
session: session_name
});
// Set back and forward links.
this._url_request = url_request;
url_request._request = this;
if (options.headers) {
let keys = Object.keys(options.headers);
for (let i = 0, l = keys.length; i < l; i++) {
let key = keys[i];
this.setHeader(key, options.headers[key]);
}
}
// Flag to prevent writings after end.
this._finished = false;
@ -73,19 +146,6 @@ class ClientRequest extends EventEmitter {
// after the request starts.
this._extra_headers = {};
let url = options.url;
let method = options.method;
let session = options.session;
let url_request = new URLRequest({
url: url,
method: method,
session: session
});
this._url_request = url_request;
url_request._request = this;
url_request.on('response', ()=> {
let response = new IncomingMessage(url_request);
url_request._response = response;
@ -97,6 +157,17 @@ class ClientRequest extends EventEmitter {
}
}
get chunkedEncoding() {
return this._chunkedEncoding;
}
set chunkedEncoding(value) {
if (this._headersSent) {
throw new Error('Can\'t set the transfer encoding, headers have been sent.');
}
this._chunkedEncoding = value;
}
setHeader(name, value) {
if (typeof name !== 'string')
@ -121,7 +192,7 @@ class ClientRequest extends EventEmitter {
return;
}
var key = name.toLowerCase();
let key = name.toLowerCase();
return this._extra_headers[key];
}
@ -135,7 +206,7 @@ class ClientRequest extends EventEmitter {
throw new Error('Can\'t remove headers after they are sent.');
}
var key = name.toLowerCase();
let key = name.toLowerCase();
delete this._extra_headers[key];
this._url_request.removeExtraHeader(name);
}
@ -162,7 +233,7 @@ class ClientRequest extends EventEmitter {
// assume that request headers are written after delivering the first
// buffer to the network IO thread.
if (!this._headersSent) {
this._url_request.setChunkedUpload(this._chunkedEncoding);
this._url_request.setChunkedUpload(this.chunkedEncoding);
this._headersSent = true;
}