Merge branch 'master' into windows-min-max-animation
This commit is contained in:
commit
070d26e68e
14 changed files with 294 additions and 51 deletions
|
@ -1,18 +1,21 @@
|
||||||
{spawn} = require 'child_process'
|
|
||||||
binding = process.atomBinding 'crash_reporter'
|
binding = process.atomBinding 'crash_reporter'
|
||||||
|
fs = require 'fs'
|
||||||
|
os = require 'os'
|
||||||
|
path = require 'path'
|
||||||
|
{spawn} = require 'child_process'
|
||||||
|
|
||||||
class CrashReporter
|
class CrashReporter
|
||||||
start: (options={}) ->
|
start: (options={}) ->
|
||||||
{productName, companyName, submitUrl, autoSubmit, ignoreSystemCrashHandler, extra} = options
|
{@productName, companyName, submitUrl, autoSubmit, ignoreSystemCrashHandler, extra} = options
|
||||||
|
|
||||||
productName ?= 'Atom-Shell'
|
@productName ?= 'Atom-Shell'
|
||||||
companyName ?= 'GitHub, Inc'
|
companyName ?= 'GitHub, Inc'
|
||||||
submitUrl ?= 'http://54.249.141.255:1127/post'
|
submitUrl ?= 'http://54.249.141.255:1127/post'
|
||||||
autoSubmit ?= true
|
autoSubmit ?= true
|
||||||
ignoreSystemCrashHandler ?= false
|
ignoreSystemCrashHandler ?= false
|
||||||
extra ?= {}
|
extra ?= {}
|
||||||
|
|
||||||
extra._productName ?= productName
|
extra._productName ?= @productName
|
||||||
extra._companyName ?= companyName
|
extra._companyName ?= companyName
|
||||||
extra._version ?=
|
extra._version ?=
|
||||||
if process.type is 'browser'
|
if process.type is 'browser'
|
||||||
|
@ -20,12 +23,12 @@ class CrashReporter
|
||||||
else
|
else
|
||||||
require('remote').require('app').getVersion()
|
require('remote').require('app').getVersion()
|
||||||
|
|
||||||
start = -> binding.start productName, companyName, submitUrl, autoSubmit, ignoreSystemCrashHandler, extra
|
start = => binding.start @productName, companyName, submitUrl, autoSubmit, ignoreSystemCrashHandler, extra
|
||||||
|
|
||||||
if process.platform is 'win32'
|
if process.platform is 'win32'
|
||||||
args = [
|
args = [
|
||||||
"--reporter-url=#{submitUrl}"
|
"--reporter-url=#{submitUrl}"
|
||||||
"--application-name=#{productName}"
|
"--application-name=#{@productName}"
|
||||||
"--v=1"
|
"--v=1"
|
||||||
]
|
]
|
||||||
env = ATOM_SHELL_INTERNAL_CRASH_SERVICE: 1
|
env = ATOM_SHELL_INTERNAL_CRASH_SERVICE: 1
|
||||||
|
@ -35,4 +38,20 @@ class CrashReporter
|
||||||
else
|
else
|
||||||
start()
|
start()
|
||||||
|
|
||||||
module.exports = new CrashReporter
|
getLastCrashReport: ->
|
||||||
|
tmpdir =
|
||||||
|
if process.platform is 'win32'
|
||||||
|
os.tmpdir()
|
||||||
|
else
|
||||||
|
'/tmp'
|
||||||
|
log = path.join tmpdir, "#{@productName} Crashes", 'uploads.log'
|
||||||
|
try
|
||||||
|
reports = String(fs.readFileSync(log)).split('\n')
|
||||||
|
return null unless reports.length > 1
|
||||||
|
[time, id] = reports[reports.length - 2].split ','
|
||||||
|
return {date: new Date(parseInt(time) * 1000), id}
|
||||||
|
catch e
|
||||||
|
return null
|
||||||
|
|
||||||
|
crashRepoter = new CrashReporter
|
||||||
|
module.exports = crashRepoter
|
||||||
|
|
|
@ -12,11 +12,12 @@
|
||||||
|
|
||||||
#include "base/debug/crash_logging.h"
|
#include "base/debug/crash_logging.h"
|
||||||
#include "base/files/file_path.h"
|
#include "base/files/file_path.h"
|
||||||
|
#include "base/files/file_util.h"
|
||||||
#include "base/linux_util.h"
|
#include "base/linux_util.h"
|
||||||
#include "base/logging.h"
|
#include "base/logging.h"
|
||||||
#include "base/path_service.h"
|
|
||||||
#include "base/process/memory.h"
|
#include "base/process/memory.h"
|
||||||
#include "base/memory/singleton.h"
|
#include "base/memory/singleton.h"
|
||||||
|
#include "base/strings/stringprintf.h"
|
||||||
#include "vendor/breakpad/src/client/linux/handler/exception_handler.h"
|
#include "vendor/breakpad/src/client/linux/handler/exception_handler.h"
|
||||||
#include "vendor/breakpad/src/common/linux/linux_libc_support.h"
|
#include "vendor/breakpad/src/common/linux/linux_libc_support.h"
|
||||||
|
|
||||||
|
@ -61,7 +62,7 @@ void CrashReporterLinux::InitBreakpad(const std::string& product_name,
|
||||||
const std::string& submit_url,
|
const std::string& submit_url,
|
||||||
bool auto_submit,
|
bool auto_submit,
|
||||||
bool skip_system_crash_handler) {
|
bool skip_system_crash_handler) {
|
||||||
EnableCrashDumping();
|
EnableCrashDumping(product_name);
|
||||||
|
|
||||||
crash_keys_.SetKeyValue("prod", "Atom-Shell");
|
crash_keys_.SetKeyValue("prod", "Atom-Shell");
|
||||||
crash_keys_.SetKeyValue("ver", version.c_str());
|
crash_keys_.SetKeyValue("ver", version.c_str());
|
||||||
|
@ -76,11 +77,15 @@ void CrashReporterLinux::SetUploadParameters() {
|
||||||
upload_parameters_["platform"] = "linux";
|
upload_parameters_["platform"] = "linux";
|
||||||
}
|
}
|
||||||
|
|
||||||
void CrashReporterLinux::EnableCrashDumping() {
|
void CrashReporterLinux::EnableCrashDumping(const std::string& product_name) {
|
||||||
base::FilePath tmp_path("/tmp");
|
std::string dump_dir = "/tmp/" + product_name + " Crashes";
|
||||||
PathService::Get(base::DIR_TEMP, &tmp_path);
|
base::FilePath dumps_path(dump_dir);
|
||||||
|
base::CreateDirectory(dumps_path);
|
||||||
|
|
||||||
|
std::string log_file = base::StringPrintf(
|
||||||
|
"%s/%s", dump_dir.c_str(), "uploads.log");
|
||||||
|
strncpy(g_crash_log_path, log_file.c_str(), sizeof(g_crash_log_path));
|
||||||
|
|
||||||
base::FilePath dumps_path(tmp_path);
|
|
||||||
MinidumpDescriptor minidump_descriptor(dumps_path.value());
|
MinidumpDescriptor minidump_descriptor(dumps_path.value());
|
||||||
minidump_descriptor.set_size_limit(kMaxMinidumpFileSize);
|
minidump_descriptor.set_size_limit(kMaxMinidumpFileSize);
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ class CrashReporterLinux : public CrashReporter {
|
||||||
CrashReporterLinux();
|
CrashReporterLinux();
|
||||||
virtual ~CrashReporterLinux();
|
virtual ~CrashReporterLinux();
|
||||||
|
|
||||||
void EnableCrashDumping();
|
void EnableCrashDumping(const std::string& product_name);
|
||||||
|
|
||||||
static bool CrashDone(const google_breakpad::MinidumpDescriptor& minidump,
|
static bool CrashDone(const google_breakpad::MinidumpDescriptor& minidump,
|
||||||
void* context,
|
void* context,
|
||||||
|
|
|
@ -75,6 +75,10 @@ uint64_t kernel_timeval_to_ms(struct kernel_timeval *tv) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool my_isxdigit(char c) {
|
||||||
|
return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f');
|
||||||
|
}
|
||||||
|
|
||||||
size_t LengthWithoutTrailingSpaces(const char* str, size_t len) {
|
size_t LengthWithoutTrailingSpaces(const char* str, size_t len) {
|
||||||
while (len > 0 && str[len - 1] == ' ') {
|
while (len > 0 && str[len - 1] == ' ') {
|
||||||
len--;
|
len--;
|
||||||
|
@ -345,13 +349,12 @@ void ExecUploadProcessOrTerminate(const BreakpadInfo& info,
|
||||||
kWgetBinary,
|
kWgetBinary,
|
||||||
header,
|
header,
|
||||||
post_file,
|
post_file,
|
||||||
// TODO(zcbenz): Enabling custom upload url.
|
|
||||||
info.upload_url,
|
info.upload_url,
|
||||||
"--timeout=60", // Set a timeout so we don't hang forever.
|
"--timeout=60", // Set a timeout so we don't hang forever.
|
||||||
"--tries=1", // Don't retry if the upload fails.
|
"--tries=1", // Don't retry if the upload fails.
|
||||||
"--quiet", // Be silent.
|
"--quiet", // Be silent.
|
||||||
"-O", // output reply to /dev/null.
|
"-O", // output reply to /dev/null.
|
||||||
"/dev/null",
|
"/dev/fd/3",
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
static const char msg[] = "Cannot upload crash dump: cannot exec "
|
static const char msg[] = "Cannot upload crash dump: cannot exec "
|
||||||
|
@ -361,8 +364,98 @@ void ExecUploadProcessOrTerminate(const BreakpadInfo& info,
|
||||||
sys__exit(1);
|
sys__exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Runs in the helper process to wait for the upload process running
|
||||||
|
// ExecUploadProcessOrTerminate() to finish. Returns the number of bytes written
|
||||||
|
// to |fd| and save the written contents to |buf|.
|
||||||
|
// |buf| needs to be big enough to hold |bytes_to_read| + 1 characters.
|
||||||
|
size_t WaitForCrashReportUploadProcess(int fd, size_t bytes_to_read,
|
||||||
|
char* buf) {
|
||||||
|
size_t bytes_read = 0;
|
||||||
|
|
||||||
|
// Upload should finish in about 10 seconds. Add a few more 500 ms
|
||||||
|
// internals to account for process startup time.
|
||||||
|
for (size_t wait_count = 0; wait_count < 24; ++wait_count) {
|
||||||
|
struct kernel_pollfd poll_fd;
|
||||||
|
poll_fd.fd = fd;
|
||||||
|
poll_fd.events = POLLIN | POLLPRI | POLLERR;
|
||||||
|
int ret = sys_poll(&poll_fd, 1, 500);
|
||||||
|
if (ret < 0) {
|
||||||
|
// Error
|
||||||
|
break;
|
||||||
|
} else if (ret > 0) {
|
||||||
|
// There is data to read.
|
||||||
|
ssize_t len = HANDLE_EINTR(
|
||||||
|
sys_read(fd, buf + bytes_read, bytes_to_read - bytes_read));
|
||||||
|
if (len < 0)
|
||||||
|
break;
|
||||||
|
bytes_read += len;
|
||||||
|
if (bytes_read == bytes_to_read)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// |ret| == 0 -> timed out, continue waiting.
|
||||||
|
// or |bytes_read| < |bytes_to_read| still, keep reading.
|
||||||
|
}
|
||||||
|
buf[bytes_to_read] = 0; // Always NUL terminate the buffer.
|
||||||
|
return bytes_read;
|
||||||
|
}
|
||||||
|
|
||||||
|
// |buf| should be |expected_len| + 1 characters in size and NULL terminated.
|
||||||
|
bool IsValidCrashReportId(const char* buf, size_t bytes_read,
|
||||||
|
size_t expected_len) {
|
||||||
|
if (bytes_read != expected_len)
|
||||||
|
return false;
|
||||||
|
for (size_t i = 0; i < bytes_read; ++i) {
|
||||||
|
if (!my_isxdigit(buf[i]) && buf[i] != '-')
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// |buf| should be |expected_len| + 1 characters in size and NULL terminated.
|
||||||
|
void HandleCrashReportId(const char* buf, size_t bytes_read,
|
||||||
|
size_t expected_len) {
|
||||||
|
if (!IsValidCrashReportId(buf, bytes_read, expected_len)) {
|
||||||
|
static const char msg[] = "Failed to get crash dump id.";
|
||||||
|
WriteLog(msg, sizeof(msg) - 1);
|
||||||
|
WriteNewline();
|
||||||
|
|
||||||
|
static const char id_msg[] = "Report Id: ";
|
||||||
|
WriteLog(id_msg, sizeof(id_msg) - 1);
|
||||||
|
WriteLog(buf, bytes_read);
|
||||||
|
WriteNewline();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write crash dump id to stderr.
|
||||||
|
static const char msg[] = "Crash dump id: ";
|
||||||
|
WriteLog(msg, sizeof(msg) - 1);
|
||||||
|
WriteLog(buf, my_strlen(buf));
|
||||||
|
WriteNewline();
|
||||||
|
|
||||||
|
// Write crash dump id to crash log as: seconds_since_epoch,crash_id
|
||||||
|
struct kernel_timeval tv;
|
||||||
|
if (!sys_gettimeofday(&tv, NULL)) {
|
||||||
|
uint64_t time = kernel_timeval_to_ms(&tv) / 1000;
|
||||||
|
char time_str[kUint64StringSize];
|
||||||
|
const unsigned time_len = my_uint64_len(time);
|
||||||
|
my_uint64tos(time_str, time, time_len);
|
||||||
|
|
||||||
|
const int kLogOpenFlags = O_CREAT | O_WRONLY | O_APPEND | O_CLOEXEC;
|
||||||
|
int log_fd = sys_open(g_crash_log_path, kLogOpenFlags, 0600);
|
||||||
|
if (log_fd > 0) {
|
||||||
|
sys_write(log_fd, time_str, time_len);
|
||||||
|
sys_write(log_fd, ",", 1);
|
||||||
|
sys_write(log_fd, buf, my_strlen(buf));
|
||||||
|
sys_write(log_fd, "\n", 1);
|
||||||
|
IGNORE_RET(sys_close(log_fd));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
char g_crash_log_path[256];
|
||||||
|
|
||||||
void HandleCrashDump(const BreakpadInfo& info) {
|
void HandleCrashDump(const BreakpadInfo& info) {
|
||||||
int dumpfd;
|
int dumpfd;
|
||||||
bool keep_fd = false;
|
bool keep_fd = false;
|
||||||
|
@ -616,33 +709,13 @@ void HandleCrashDump(const BreakpadInfo& info) {
|
||||||
// Helper process.
|
// Helper process.
|
||||||
if (upload_child > 0) {
|
if (upload_child > 0) {
|
||||||
IGNORE_RET(sys_close(fds[1]));
|
IGNORE_RET(sys_close(fds[1]));
|
||||||
char id_buf[17]; // Crash report IDs are expected to be 16 chars.
|
|
||||||
ssize_t len = -1;
|
const size_t kCrashIdLength = 36;
|
||||||
// Upload should finish in about 10 seconds. Add a few more 500 ms
|
char id_buf[kCrashIdLength + 1];
|
||||||
// internals to account for process startup time.
|
size_t bytes_read =
|
||||||
for (size_t wait_count = 0; wait_count < 24; ++wait_count) {
|
WaitForCrashReportUploadProcess(fds[0], kCrashIdLength, id_buf);
|
||||||
struct kernel_pollfd poll_fd;
|
HandleCrashReportId(id_buf, bytes_read, kCrashIdLength);
|
||||||
poll_fd.fd = fds[0];
|
|
||||||
poll_fd.events = POLLIN | POLLPRI | POLLERR;
|
|
||||||
int ret = sys_poll(&poll_fd, 1, 500);
|
|
||||||
if (ret < 0) {
|
|
||||||
// Error
|
|
||||||
break;
|
|
||||||
} else if (ret > 0) {
|
|
||||||
// There is data to read.
|
|
||||||
len = HANDLE_EINTR(sys_read(fds[0], id_buf, sizeof(id_buf) - 1));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// ret == 0 -> timed out, continue waiting.
|
|
||||||
}
|
|
||||||
if (len > 0) {
|
|
||||||
// Write crash dump id to stderr.
|
|
||||||
id_buf[len] = 0;
|
|
||||||
static const char msg[] = "\nCrash dump id: ";
|
|
||||||
WriteLog(msg, sizeof(msg) - 1);
|
|
||||||
WriteLog(id_buf, my_strlen(id_buf));
|
|
||||||
WriteLog("\n", 1);
|
|
||||||
}
|
|
||||||
if (sys_waitpid(upload_child, NULL, WNOHANG) == 0) {
|
if (sys_waitpid(upload_child, NULL, WNOHANG) == 0) {
|
||||||
// Upload process is still around, kill it.
|
// Upload process is still around, kill it.
|
||||||
sys_kill(upload_child, SIGKILL);
|
sys_kill(upload_child, SIGKILL);
|
||||||
|
@ -666,4 +739,8 @@ size_t WriteLog(const char* buf, size_t nbytes) {
|
||||||
return sys_write(2, buf, nbytes);
|
return sys_write(2, buf, nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t WriteNewline() {
|
||||||
|
return WriteLog("\n", 1);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace crash_reporter
|
} // namespace crash_reporter
|
||||||
|
|
|
@ -32,6 +32,10 @@ struct BreakpadInfo {
|
||||||
void HandleCrashDump(const BreakpadInfo& info);
|
void HandleCrashDump(const BreakpadInfo& info);
|
||||||
|
|
||||||
size_t WriteLog(const char* buf, size_t nbytes);
|
size_t WriteLog(const char* buf, size_t nbytes);
|
||||||
|
size_t WriteNewline();
|
||||||
|
|
||||||
|
// Global variable storing the path of upload log.
|
||||||
|
extern char g_crash_log_path[256];
|
||||||
|
|
||||||
} // namespace crash_reporter
|
} // namespace crash_reporter
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
#include "base/command_line.h"
|
#include "base/command_line.h"
|
||||||
#include "base/file_util.h"
|
#include "base/file_util.h"
|
||||||
#include "base/logging.h"
|
#include "base/logging.h"
|
||||||
|
#include "base/strings/string_number_conversions.h"
|
||||||
|
#include "base/time/time.h"
|
||||||
#include "base/win/windows_version.h"
|
#include "base/win/windows_version.h"
|
||||||
#include "vendor/breakpad/src/client/windows/crash_generation/client_info.h"
|
#include "vendor/breakpad/src/client/windows/crash_generation/client_info.h"
|
||||||
#include "vendor/breakpad/src/client/windows/crash_generation/crash_generation_server.h"
|
#include "vendor/breakpad/src/client/windows/crash_generation/crash_generation_server.h"
|
||||||
|
@ -66,6 +68,30 @@ bool WriteCustomInfoToFile(const std::wstring& dump_path, const CrashMap& map) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool WriteReportIDToFile(const std::wstring& dump_path,
|
||||||
|
const std::wstring& report_id) {
|
||||||
|
std::wstring file_path(dump_path);
|
||||||
|
size_t last_slash = file_path.rfind(L'\\');
|
||||||
|
if (last_slash == std::wstring::npos)
|
||||||
|
return false;
|
||||||
|
file_path.resize(last_slash);
|
||||||
|
file_path += L"\\uploads.log";
|
||||||
|
|
||||||
|
std::wofstream file(file_path.c_str(),
|
||||||
|
std::ios_base::out | std::ios_base::app | std::ios::binary);
|
||||||
|
if (!file.is_open())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
int64 seconds_since_epoch =
|
||||||
|
(base::Time::Now() - base::Time::UnixEpoch()).InSeconds();
|
||||||
|
std::wstring line = base::Int64ToString16(seconds_since_epoch);
|
||||||
|
line += L',';
|
||||||
|
line += report_id;
|
||||||
|
line += L'\n';
|
||||||
|
file.write(line.c_str(), static_cast<std::streamsize>(line.length()));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// The window procedure task is to handle when a) the user logs off.
|
// The window procedure task is to handle when a) the user logs off.
|
||||||
// b) the system shuts down or c) when the user closes the window.
|
// b) the system shuts down or c) when the user closes the window.
|
||||||
LRESULT __stdcall CrashSvcWndProc(HWND hwnd, UINT message,
|
LRESULT __stdcall CrashSvcWndProc(HWND hwnd, UINT message,
|
||||||
|
@ -422,6 +448,7 @@ DWORD CrashService::AsyncSendDump(void* context) {
|
||||||
++info->self->requests_sent_;
|
++info->self->requests_sent_;
|
||||||
++info->self->requests_handled_;
|
++info->self->requests_handled_;
|
||||||
retry_round = 0;
|
retry_round = 0;
|
||||||
|
WriteReportIDToFile(info->dump_path, report_id);
|
||||||
break;
|
break;
|
||||||
case google_breakpad::RESULT_THROTTLED:
|
case google_breakpad::RESULT_THROTTLED:
|
||||||
report_id = L"<throttled>";
|
report_id = L"<throttled>";
|
||||||
|
|
|
@ -56,3 +56,4 @@ Modules for both sides:
|
||||||
* [Build instructions (Mac)](development/build-instructions-mac.md)
|
* [Build instructions (Mac)](development/build-instructions-mac.md)
|
||||||
* [Build instructions (Windows)](development/build-instructions-windows.md)
|
* [Build instructions (Windows)](development/build-instructions-windows.md)
|
||||||
* [Build instructions (Linux)](development/build-instructions-linux.md)
|
* [Build instructions (Linux)](development/build-instructions-linux.md)
|
||||||
|
* [Setting up symbol server in debugger](development/setting-up-symbol-server.md)
|
||||||
|
|
|
@ -7,7 +7,7 @@ module is emitted:
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
var app = require('app');
|
var app = require('app');
|
||||||
app.commandLine.appendSwitch('remote-debugging-port', '88315');
|
app.commandLine.appendSwitch('remote-debugging-port', '8315');
|
||||||
app.commandLine.appendSwitch('host-rules', 'MAP * 127.0.0.1');
|
app.commandLine.appendSwitch('host-rules', 'MAP * 127.0.0.1');
|
||||||
|
|
||||||
app.on('ready', function() {
|
app.on('ready', function() {
|
||||||
|
|
|
@ -27,6 +27,11 @@ crashReporter.start({
|
||||||
* Only string properties are send correctly.
|
* Only string properties are send correctly.
|
||||||
* Nested objects are not supported.
|
* Nested objects are not supported.
|
||||||
|
|
||||||
|
## crashReporter.getLastCrashReport()
|
||||||
|
|
||||||
|
Returns the date and ID of last crash report, when there was no crash report
|
||||||
|
sent or the crash reporter is not started, `null` will be returned.
|
||||||
|
|
||||||
# crash-reporter payload
|
# crash-reporter payload
|
||||||
|
|
||||||
The crash reporter will send the following data to the `submitUrl` as `POST`:
|
The crash reporter will send the following data to the `submitUrl` as `POST`:
|
||||||
|
|
56
docs/development/setting-up-symbol-server.md
Normal file
56
docs/development/setting-up-symbol-server.md
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
# Setting up symbol server in debugger
|
||||||
|
|
||||||
|
Debug symbols allow you to have better debugging sessions. They have information
|
||||||
|
about the functions contained in executables and dynamic libraries and provide
|
||||||
|
you with information to get clean call stacks. A Symbol Server allows the
|
||||||
|
debugger to load the correct symbols, binaries and sources automatically without
|
||||||
|
forcing users to download large debugging files. The server functions like
|
||||||
|
[Microsoft's symbol server](http://support.microsoft.com/kb/311503) so the
|
||||||
|
documentation there can be useful.
|
||||||
|
|
||||||
|
Note that because released atom-shell builds are heavily optimized, debugging is
|
||||||
|
not always easy. The debugger will not be able to show you the content of all
|
||||||
|
variables and the execution path can seem strange because of inlining, tail
|
||||||
|
calls, and other compiler optimizations. The only workaround is to build an
|
||||||
|
unoptimized local build.
|
||||||
|
|
||||||
|
The official symbol server URL for atom-shell is
|
||||||
|
http://54.249.141.255:8086/atom-shell/symbols.
|
||||||
|
You cannot visit this URL directly: you must add it to the symbol path of your
|
||||||
|
debugging tool. In the examples below, a local cache directory is used to avoid
|
||||||
|
repeatedly fetching the PDB from the server. Replace `c:\code\symbols` with an
|
||||||
|
appropriate cache directory on your machine.
|
||||||
|
|
||||||
|
## Using the symbol server in Windbg
|
||||||
|
|
||||||
|
The Windbg symbol path is configured with a string value delimited with asterisk
|
||||||
|
characters. To use only the atom-shell symbol server, add the following entry to
|
||||||
|
your symbol path (__note:__ you can replace `c:\code\symbols` with any writable
|
||||||
|
directory on your computer, if you'd prefer a different location for downloaded
|
||||||
|
symbols):
|
||||||
|
|
||||||
|
```
|
||||||
|
SRV*c:\code\symbols\*http://54.249.141.255:8086/atom-shell/symbols
|
||||||
|
```
|
||||||
|
|
||||||
|
Set this string as `_NT_SYMBOL_PATH` in the environment, using the Windbg menus,
|
||||||
|
or by typing the `.sympath` command. If you would like to get symbols from
|
||||||
|
Microsoft's symbol server as well, you should list that first:
|
||||||
|
|
||||||
|
```
|
||||||
|
SRV*c:\code\symbols\*http://msdl.microsoft.com/download/symbols;SRV*c:\code\symbols\*http://54.249.141.255:8086/atom-shell/symbols
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using the symbol server in Visual Studio
|
||||||
|
|
||||||
|
<img src='http://mdn.mozillademos.org/files/733/symbol-server-vc8express-menu.jpg'>
|
||||||
|
<img src='http://mdn.mozillademos.org/files/2497/2005_options.gif'>
|
||||||
|
|
||||||
|
## Troubleshooting: Symbols will not load
|
||||||
|
|
||||||
|
Type the following commands in Windbg to print why symbols are not loading:
|
||||||
|
|
||||||
|
```
|
||||||
|
> !sym noisy
|
||||||
|
> .reload /f chromiumcontent.dll
|
||||||
|
```
|
|
@ -183,6 +183,7 @@ def s3put(bucket, access_key, secret_key, prefix, key_prefix, files):
|
||||||
'--secret_key', secret_key,
|
'--secret_key', secret_key,
|
||||||
'--prefix', prefix,
|
'--prefix', prefix,
|
||||||
'--key_prefix', key_prefix,
|
'--key_prefix', key_prefix,
|
||||||
|
'--no_overwrite',
|
||||||
'--grant', 'public-read'
|
'--grant', 'public-read'
|
||||||
] + files
|
] + files
|
||||||
|
|
||||||
|
|
43
script/upload-windows-pdb.py
Executable file
43
script/upload-windows-pdb.py
Executable file
|
@ -0,0 +1,43 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
|
||||||
|
from lib.util import execute, rm_rf, safe_mkdir, s3put, s3_config
|
||||||
|
|
||||||
|
|
||||||
|
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||||
|
SYMBOLS_DIR = 'dist\\symbols'
|
||||||
|
DOWNLOAD_DIR = 'vendor\\brightray\\vendor\\download\\libchromiumcontent'
|
||||||
|
PDB_LIST = [
|
||||||
|
'out\\Release\\atom.exe.pdb',
|
||||||
|
DOWNLOAD_DIR + '\\Release\\chromiumcontent.dll.pdb',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
os.chdir(SOURCE_ROOT)
|
||||||
|
|
||||||
|
rm_rf(SYMBOLS_DIR)
|
||||||
|
safe_mkdir(SYMBOLS_DIR)
|
||||||
|
for pdb in PDB_LIST:
|
||||||
|
run_symstore(pdb, SYMBOLS_DIR, 'AtomShell')
|
||||||
|
|
||||||
|
bucket, access_key, secret_key = s3_config()
|
||||||
|
files = glob.glob(SYMBOLS_DIR + '/*.pdb/*/*.pdb')
|
||||||
|
files = [f.lower() for f in files]
|
||||||
|
upload_symbols(bucket, access_key, secret_key, files)
|
||||||
|
|
||||||
|
|
||||||
|
def run_symstore(pdb, dest, product):
|
||||||
|
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
|
||||||
|
|
||||||
|
|
||||||
|
def upload_symbols(bucket, access_key, secret_key, files):
|
||||||
|
s3put(bucket, access_key, secret_key, SYMBOLS_DIR, 'atom-shell/symbols',
|
||||||
|
files)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
sys.exit(main())
|
|
@ -59,19 +59,24 @@ def main():
|
||||||
upload_atom_shell(github, release_id,
|
upload_atom_shell(github, release_id,
|
||||||
os.path.join(DIST_DIR, CHROMEDRIVER_NAME))
|
os.path.join(DIST_DIR, CHROMEDRIVER_NAME))
|
||||||
|
|
||||||
|
if args.publish_release:
|
||||||
# Upload node's headers to S3.
|
# Upload node's headers to S3.
|
||||||
bucket, access_key, secret_key = s3_config()
|
bucket, access_key, secret_key = s3_config()
|
||||||
upload_node(bucket, access_key, secret_key, ATOM_SHELL_VERSION)
|
upload_node(bucket, access_key, secret_key, ATOM_SHELL_VERSION)
|
||||||
|
|
||||||
if args.publish_release:
|
|
||||||
# Press the publish button.
|
|
||||||
publish_release(github, release_id)
|
|
||||||
|
|
||||||
# Upload the SHASUMS.txt.
|
# Upload the SHASUMS.txt.
|
||||||
execute([sys.executable,
|
execute([sys.executable,
|
||||||
os.path.join(SOURCE_ROOT, 'script', 'upload-checksums.py'),
|
os.path.join(SOURCE_ROOT, 'script', 'upload-checksums.py'),
|
||||||
'-v', ATOM_SHELL_VERSION])
|
'-v', ATOM_SHELL_VERSION])
|
||||||
|
|
||||||
|
# Upload PDBs to Windows symbol server.
|
||||||
|
if TARGET_PLATFORM == 'win32':
|
||||||
|
execute([sys.executable,
|
||||||
|
os.path.join(SOURCE_ROOT, 'script', 'upload-windows-pdb.py')])
|
||||||
|
|
||||||
|
# Press the publish button.
|
||||||
|
publish_release(github, release_id)
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser(description='upload distribution file')
|
parser = argparse.ArgumentParser(description='upload distribution file')
|
||||||
|
|
|
@ -32,7 +32,7 @@ describe 'crash-reporter module', ->
|
||||||
assert.equal fields['_version'], require('remote').require('app').getVersion()
|
assert.equal fields['_version'], require('remote').require('app').getVersion()
|
||||||
assert files['upload_file_minidump']['name']?
|
assert files['upload_file_minidump']['name']?
|
||||||
|
|
||||||
res.end()
|
res.end('abc-123-def')
|
||||||
server.close()
|
server.close()
|
||||||
done()
|
done()
|
||||||
server.listen 0, '127.0.0.1', ->
|
server.listen 0, '127.0.0.1', ->
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue