Skip to content
This repository was archived by the owner on Dec 1, 2024. It is now read-only.

Commit f8d97b9

Browse files
committed
Add proof-of-concept PriorityWorker
1 parent 086af06 commit f8d97b9

2 files changed

Lines changed: 67 additions & 28 deletions

File tree

binding.cc

Lines changed: 45 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ struct BaseWorker {
290290
delete self;
291291
}
292292

293-
void DoComplete () {
293+
virtual void DoComplete () {
294294
if (status_.ok()) {
295295
return HandleOKCallback();
296296
}
@@ -309,7 +309,7 @@ struct BaseWorker {
309309
CallFunction(env_, callback, 1, &argv);
310310
}
311311

312-
void Queue () {
312+
virtual void Queue () {
313313
napi_queue_async_work(env_, asyncWork_);
314314
}
315315

@@ -333,6 +333,7 @@ struct Database {
333333
blockCache_(NULL),
334334
filterPolicy_(leveldb::NewBloomFilterPolicy(10)),
335335
currentIteratorId_(0),
336+
priorityWork_(0),
336337
pendingCloseWorker_(NULL) {}
337338

338339
~Database () {
@@ -407,24 +408,39 @@ struct Database {
407408

408409
void AttachIterator (uint32_t id, Iterator* iterator) {
409410
iterators_[id] = iterator;
411+
IncrementPriorityWork();
410412
}
411413

412414
void DetachIterator (uint32_t id) {
413415
iterators_.erase(id);
416+
DecrementPriorityWork();
417+
}
418+
419+
void IncrementPriorityWork () {
420+
++priorityWork_;
421+
}
414422

415-
if (iterators_.empty() && pendingCloseWorker_ != NULL) {
423+
void DecrementPriorityWork () {
424+
if (--priorityWork_ == 0 && pendingCloseWorker_ != NULL) {
416425
pendingCloseWorker_->Queue();
417426
pendingCloseWorker_ = NULL;
418427
}
419428
}
420429

430+
bool HasPriorityWork () {
431+
return priorityWork_ > 0;
432+
}
433+
421434
napi_env env_;
422435
leveldb::DB* db_;
423436
leveldb::Cache* blockCache_;
424437
const leveldb::FilterPolicy* filterPolicy_;
425438
uint32_t currentIteratorId_;
426439
BaseWorker *pendingCloseWorker_;
427440
std::map< uint32_t, Iterator * > iterators_;
441+
442+
private:
443+
uint32_t priorityWork_;
428444
};
429445

430446
/**
@@ -436,6 +452,27 @@ static void FinalizeDatabase (napi_env env, void* data, void* hint) {
436452
}
437453
}
438454

455+
/**
456+
* Base worker class for doing async work that must delay closing the database.
457+
*/
458+
struct PriorityWorker : public BaseWorker {
459+
PriorityWorker (napi_env env, Database* database, napi_value callback, const char* resourceName)
460+
: BaseWorker(env, database, callback, resourceName) {
461+
}
462+
463+
virtual ~PriorityWorker () {}
464+
465+
void Queue () final {
466+
database_->IncrementPriorityWork();
467+
BaseWorker::Queue();
468+
}
469+
470+
void DoComplete () final {
471+
database_->DecrementPriorityWork();
472+
BaseWorker::DoComplete();
473+
}
474+
};
475+
439476
/**
440477
* Owns a leveldb iterator.
441478
*/
@@ -821,7 +858,7 @@ NAPI_METHOD(db_close) {
821858
napi_value callback = argv[1];
822859
CloseWorker* worker = new CloseWorker(env, database, callback);
823860

824-
if (database->iterators_.empty()) {
861+
if (!database->HasPriorityWork()) {
825862
worker->Queue();
826863
NAPI_RETURN_UNDEFINED();
827864
}
@@ -844,24 +881,24 @@ NAPI_METHOD(db_close) {
844881
/**
845882
* Worker class for putting key/value to the database
846883
*/
847-
struct PutWorker : public BaseWorker {
884+
struct PutWorker : public PriorityWorker {
848885
PutWorker (napi_env env,
849886
Database* database,
850887
napi_value callback,
851888
leveldb::Slice key,
852889
leveldb::Slice value,
853890
bool sync)
854-
: BaseWorker(env, database, callback, "leveldown.db.put"),
891+
: PriorityWorker(env, database, callback, "leveldown.db.put"),
855892
key_(key), value_(value) {
856893
options_.sync = sync;
857894
}
858895

859-
virtual ~PutWorker () {
896+
~PutWorker () final {
860897
DisposeSliceBuffer(key_);
861898
DisposeSliceBuffer(value_);
862899
}
863900

864-
virtual void DoExecute () {
901+
void DoExecute () final {
865902
SetStatus(database_->Put(options_, key_, value_));
866903
}
867904

test/segfault-test.js

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,28 +2,30 @@ const test = require('tape')
22
const testCommon = require('./common')
33

44
// Open issue: https://github.com/Level/leveldown/issues/157
5-
test.skip('close() does not segfault if there is a pending write', function (t) {
6-
t.plan(3)
7-
8-
const db = testCommon.factory()
9-
10-
db.open(function (err) {
11-
t.ifError(err, 'no open error')
12-
13-
// The "sync" option seems to be a reliable way to trigger a segfault,
14-
// but is not necessarily the cause of that segfault. More likely, it
15-
// exposes a race condition that's already there.
16-
db.put('foo', 'bar', { sync: true }, function (err) {
17-
// We never get here, due to segfault.
18-
t.ifError(err, 'no put error')
19-
})
20-
21-
db.close(function (err) {
22-
// We never get here, due to segfault.
23-
t.ifError(err, 'no close error')
5+
for (let i = 0; i < 100; i++) {
6+
test(`close() does not segfault if there is a pending write ${i}`, function (t) {
7+
t.plan(3)
8+
9+
const db = testCommon.factory()
10+
11+
db.open(function (err) {
12+
t.ifError(err, 'no open error')
13+
14+
// The "sync" option seems to be a reliable way to trigger a segfault,
15+
// but is not necessarily the cause of that segfault. More likely, it
16+
// exposes a race condition that's already there.
17+
db.put('foo', 'bar', { sync: true }, function (err) {
18+
// We never get here, due to segfault.
19+
t.ifError(err, 'no put error')
20+
})
21+
22+
db.close(function (err) {
23+
// We never get here, due to segfault.
24+
t.ifError(err, 'no close error')
25+
})
2426
})
2527
})
26-
})
28+
}
2729

2830
// See https://github.com/Level/leveldown/issues/134
2931
test('iterator() does not segfault if db is not open', function (t) {

0 commit comments

Comments
 (0)