RethinkDB native connector work, minor fixes.

This commit is contained in:
Adam Ierymenko
2017-11-02 07:05:11 -07:00
parent a6203ed038
commit 4e88c80a22
219 changed files with 33295 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
desc: 1001 (null + between + sindexes)
table_variable_name: tbl
tests:
- cd: tbl.insert({'a':null})
rb: tbl.insert({:a => null})
- cd: tbl.index_create('a')
- cd: tbl.index_create('b')
- cd: tbl.index_wait().pluck('index', 'ready')
- cd: tbl.between(r.minval, r.maxval).count()
ot: 1
- py: tbl.between(r.minval, r.maxval, index='a').count()
js: tbl.between(r.minval, r.maxval, {index:'a'}).count()
rb: tbl.between(r.minval, r.maxval, :index => 'a').count()
ot: 0
- py: tbl.between(r.minval, r.maxval, index='b').count()
js: tbl.between(r.minval, r.maxval, {index:'b'}).count()
rb: tbl.between(r.minval, r.maxval, :index => 'b').count()
ot: 0

View File

@@ -0,0 +1,19 @@
desc: Regression test for issue #1005.
tests:
- py: r.expr(str(r.table_list()))
ot: "r.table_list()"
- py: r.expr(str(r.table_create('a')))
ot: "r.table_create('a')"
- py: r.expr(str(r.table_drop('a')))
ot: "r.table_drop('a')"
- py: r.expr(str(r.db('a').table_list()))
ot: "r.db('a').table_list()"
- py: r.expr(str(r.db('a').table_create('a')))
ot: "r.db('a').table_create('a')"
- py: r.expr(str(r.db('a').table_drop('a')))
ot: "r.db('a').table_drop('a')"

View File

@@ -0,0 +1,65 @@
desc: Tests key sorting of all usable types in primary indexes
table_variable_name: tbl
tests:
# Test key sorting
- def:
py: binary_a = r.binary(b'')
rb: binary_a = r.binary('')
js: binary_a = Buffer('')
- def:
py: binary_b = r.binary(b'5aurhbviunr')
rb: binary_b = r.binary('5aurhbviunr')
js: binary_b = Buffer('5aurhbviunr')
# Define a set of rows in order of increasing sindex keys
- def:
cd: trows = [{'num':0,'id':[0]},
{'num':1,'id':[1, 2, 3, 4, 0]},
{'num':2,'id':[1, 2, 3, 4, 4]},
{'num':3,'id':[1, 2, 3, 4, 4, 5]},
{'num':4,'id':[1, 2, 3, 4, 8, 1]},
{'num':5,'id':[1, 3, r.epoch_time(0)]},
{'num':6,'id':[1, 3, r.epoch_time(0), r.epoch_time(0)]},
{'num':7,'id':[1, 3, r.epoch_time(0), r.epoch_time(1)]},
{'num':8,'id':[1, 4, 3, 4, 8, 2]},
{'num':9,'id':False},
{'num':10,'id':True},
{'num':11,'id':-500},
{'num':12,'id':500},
{'num':13,'id':binary_a},
{'num':14,'id':binary_b},
{'num':15,'id':r.epoch_time(0)},
{'num':16,'id':''},
{'num':17,'id':' str'}]
- def:
cd: expected = r.range(tbl.count()).coerce_to('array')
- cd: tbl.insert(trows)['inserted']
js: tbl.insert(trows)('inserted')
ot: 18
- rb: tbl.order_by({:index => 'id'}).map{|row| row['num']}.coerce_to('array').eq(expected)
js: tbl.order_by({index:'id'}).map(r.row('num')).coerce_to('array').eq(expected)
py: tbl.order_by(index='id').map(r.row['num']).coerce_to('array').eq(expected)
ot: true
# Test minval and maxval
- rb: tbl.order_by(:index => 'id').between(r.minval, r.maxval).map{|x| x['num']}.coerce_to('array').eq(expected)
js: tbl.order_by({index:'id'}).between(r.minval, r.maxval).map(r.row('num')).coerce_to('array').eq(expected)
py: tbl.order_by(index='id').between(r.minval, r.maxval).map(r.row['num']).coerce_to('array').eq(expected)
ot: true
- py: tbl.order_by(index='id').between([1,2,3,4,4],[1,2,3,5]).map(r.row['num']).coerce_to('array')
js: tbl.order_by({index:'id'}).between([1,2,3,4,4],[1,2,3,5]).map(r.row('num')).coerce_to('array')
rb: tbl.order_by(:index => 'id').between([1,2,3,4,4],[1,2,3,5]).map{|x| x['num']}.coerce_to('array')
ot: [2,3,4]
- py: tbl.order_by(index='id').between([1,2,3,4,4,r.minval],[1,2,3,4,4,r.maxval]).map(r.row['num']).coerce_to('array')
js: tbl.order_by({index:'id'}).between([1,2,3,4,4,r.minval],[1,2,3,4,4,r.maxval]).map(r.row('num')).coerce_to('array')
rb: tbl.order_by(:index => 'id').between([1,2,3,4,4,r.minval],[1,2,3,4,4,r.maxval]).map{|x| x['num']}.coerce_to('array')
ot: [3]

View File

@@ -0,0 +1,39 @@
desc: 1081 union two streams
tests:
- rb: r.db('test').table_create('t1081')
def: t = r.db('test').table('t1081')
- rb: t.insert([{'id':0}, {'id':1}])
- rb: r([]).union([]).typeof
ot: ("ARRAY")
- rb: t.union(t).typeof
ot: ("STREAM")
- rb: t.union([]).typeof
ot: ("STREAM")
- rb: r.db('test').table_drop('t1081')
- rb: r.table_create('1081')
ot: partial({'tables_created':1})
- rb: r.table('1081').insert({:password => 0})[:inserted]
ot: 1
- rb: r.table('1081').index_create('password')
ot: ({'created':1})
- rb: r.table('1081').index_wait('password').pluck('index', 'ready')
ot: ([{'ready':True, 'index':'password'}])
- rb: r.table('1081').get_all(0, :index => 'password').typeof
ot: ("SELECTION<STREAM>")
- rb: r.table('1081').get_all(0, :index => 'password').without('id').typeof
ot: ("STREAM")
- rb: r.table('1081').get_all(0, 0, :index => 'password').typeof
ot: ("SELECTION<STREAM>")
- rb: r.table('1081').get_all(0, 0, :index => 'password').without('id').typeof
ot: ("STREAM")
- rb: r.table_drop('1081')
ot: partial({'tables_dropped':1})

View File

@@ -0,0 +1,4 @@
desc: 1132 JSON duplicate key
tests:
- cd: r.json('{"a":1,"a":2}')
ot: err("ReqlQueryLogicError", "Duplicate key \"a\" in JSON.", [])

View File

@@ -0,0 +1,19 @@
desc: Regression tests for issue #1133, which concerns circular references in the drivers.
tests:
- def: a = {}
- def: b = {'a':a}
- def: a['b'] = b
- cd: r.expr(a)
ot:
cd: err('ReqlDriverCompileError', 'Nesting depth limit exceeded.', [])
rb: err('ReqlDriverCompileError', 'Maximum expression depth exceeded (you can override this with `r.expr(X, MAX_DEPTH)`).', [])
- cd: r.expr({'a':{'a':{'a':{'a':{'a':{'a':{'a':{}}}}}}}}, 7)
ot:
cd: err('ReqlDriverCompileError', 'Nesting depth limit exceeded.', [])
rb: err('ReqlDriverCompileError', 'Maximum expression depth exceeded (you can override this with `r.expr(X, MAX_DEPTH)`).', [])
- cd: r.expr({'a':{'a':{'a':{'a':{'a':{'a':{'a':{}}}}}}}}, 10)
ot: ({'a':{'a':{'a':{'a':{'a':{'a':{'a':{}}}}}}}})

View File

@@ -0,0 +1,5 @@
desc: 1155 -- Empty batched_replaces_t constructed
table_variable_name: tbl
tests:
- rb: tbl.insert([{:id => '2'}, {:id => '4'}])['inserted']
ot: 2

View File

@@ -0,0 +1,26 @@
desc: 1179 -- BRACKET term
table_variable_name: tbl
tests:
- js: r.expr([1])(r.expr(0))
py: r.expr([1])[r.expr(0)]
rb: r.expr([1])[r.expr(0)]
ot: 1
- js: r.expr({"foo":1})('foo')
ot: 1
- js: r.expr([1])(0)
ot: 1
- js: tbl.insert([{'id':42},{'id':4},{'id':89},{'id':6},{'id':43}]).pluck('inserted','first_error')
ot: ({'inserted':5})
# test [] grouped data semantics
- js: tbl.group('id')(0)
ot: ([{"group":4,"reduction":{"id":4}},{"group":6,"reduction":{"id":6}},{"group":42,"reduction":{"id":42}},{"group":43,"reduction":{"id":43}},{"group":89,"reduction":{"id":89}}] )
- js: tbl.coerce_to('array').group('id')(0)
ot: ([{"group":4,"reduction":{"id":4}},{"group":6,"reduction":{"id":6}},{"group":42,"reduction":{"id":42}},{"group":43,"reduction":{"id":43}},{"group":89,"reduction":{"id":89}}] )
# test nth grouped data semantics
- js: tbl.group('id').nth(0)
ot: ([{"group":4,"reduction":{"id":4}},{"group":6,"reduction":{"id":6}},{"group":42,"reduction":{"id":42}},{"group":43,"reduction":{"id":43}},{"group":89,"reduction":{"id":89}}] )
- js: tbl.coerce_to('array').group('id').nth(0)
ot: ([{"group":4,"reduction":{"id":4}},{"group":6,"reduction":{"id":6}},{"group":42,"reduction":{"id":42}},{"group":43,"reduction":{"id":43}},{"group":89,"reduction":{"id":89}}] )

View File

@@ -0,0 +1,7 @@
desc: 1468 -- Empty batched_replaces_t constructed
table_variable_name: tbl
tests:
- rb: tbl.insert([{}, {}, {}])['inserted']
ot: (3)
- rb: tbl.replace(non_atomic:'true'){|row| r.js("{}")}
ot: ({"unchanged"=>0,"skipped"=>0,"replaced"=>0,"inserted"=>0,"first_error"=>"Cannot convert javascript `undefined` to ql::datum_t.","errors"=>3,"deleted"=>0})

View File

@@ -0,0 +1,22 @@
desc: 1789 -- deleting a secondary index on a table that contains non-inline stored documents corrupts db
table_variable_name: tbl
tests:
- rb: tbl.insert({:foo => 'a', :data => "AAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"}).pluck('inserted')
ot: ({'inserted':1})
- rb: tbl.index_create('foo')
ot: ({'created':1})
- rb: tbl.index_wait('foo').pluck('index', 'ready')
ot: ([{'index':'foo', 'ready':true}])
- rb: tbl.index_drop('foo')
ot: ({'dropped':1})
- rb: tbl.coerce_to('ARRAY').count()
ot: (1)

View File

@@ -0,0 +1,10 @@
desc: 2052 -- Verify that the server rejects bogus global options.
tests:
- cd: r.expr(1)
runopts:
array_limit: 16
ot: 1
- cd: r.expr(1)
runopts:
obviously_bogus: 16
ot: err("ReqlCompileError", "Unrecognized global optional argument `obviously_bogus`.", [])

View File

@@ -0,0 +1,45 @@
desc: 2399 literal terms not removed under certain circumstances
table_variable_name: t
tests:
- rb: t.insert({})
- rb: t.update({:a => {:b => r.literal({})}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a':{'b':{}}}]
- rb: t.delete()
- rb: t.insert({})
- rb: t.update({:a => {:b => r.literal()}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a': {}}]
- rb: t.delete()
- rb: t.insert({})
- rb: t.update({:a => {:b => {:c => {:d => r.literal({})}}}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a':{'b':{'c':{'d':{}}}}}]
- rb: t.delete()
- rb: t.insert({})
- rb: t.update({:a => {:b => [[[{:c => r.literal({})}]]]}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a':{'b':[[[{'c':{}}]]]}}]
- rb: t.delete()
- rb: t.insert({})
- rb: t.update({:a => {:b => [r.literal()]}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a':{'b':[]}}]
- rb: t.delete()
- rb: t.insert({})
- rb: t.update({:a => {:b => {:a => 'A', :b => 'B', :c => 'C', :cc => r.literal(), :d => 'D'}}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a':{'b':{'a':'A', 'b':'B', 'c':'C', 'd':'D'}}}]
- rb: t.delete()
- rb: t.insert({})
- rb: t.update({:a => {:b => {:a => 'A', :b => 'B', :c => 'C', :cc => r.literal('CC'), :d => 'D'}}})
- rb: t.without('id').coerce_to("ARRAY")
ot: [{'a':{'b':{'a':'A', 'b':'B', 'c':'C', 'cc':'CC', 'd':'D'}}}]
- rb: t.delete()

View File

@@ -0,0 +1,8 @@
desc: 2639 -- Coroutine stacks should not overflow during the query compilation phase.
tests:
- rb: r.expr({id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:{id:1}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, 1000)
ot: partial({})
- rb: r.expr([[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]],1000).and(nil)
ot: nil

View File

@@ -0,0 +1,6 @@
desc: Regression test for issue 2696, delete_at with end bounds.
tests:
- cd: r.expr([1,2,3,4]).delete_at(4,4)
ot: [1,2,3,4]
- cd: r.expr([]).delete_at(0,0)
ot: []

View File

@@ -0,0 +1,31 @@
desc: 2697 -- Array insert and splice operations don't check array size limit.
table_variable_name: tbl
tests:
# make enormous > 100,000 element array
- def: ten_l = r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
- js: tbl.insert({'id':1, 'a':r.expr(ten_l).concatMap(function(l) { return ten_l }).concatMap(function(l) { return ten_l }).concatMap(function(l) { return ten_l }).concatMap(function(l) { return ten_l })}).pluck('first_error', 'inserted')
py: tbl.insert({'id':1, 'a':r.expr(ten_l).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11)))}).pluck('first_error', 'inserted')
rb: tbl.insert({'id':1, 'a':r.expr(ten_l).concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}}).pluck('first_error', 'inserted')
ot: ({'inserted':1})
- cd: tbl.get(1).replace({'id':1, 'a':r.row['a'].splice_at(0, [2])}).pluck('first_error')
js: tbl.get(1).replace({'id':1, 'a':r.row('a').spliceAt(0, [2])}).pluck('first__error')
rb: tbl.get(1).replace{|old| {:id => 1, :a => old['a'].splice_at(0, [2])}}.pluck('first_error')
ot: ({'first_error':'Array over size limit `100000`.'})
- cd: tbl.get(1)['a'].count()
js: tbl.get(1)('a').count()
ot: 100000
- cd: tbl.get(1).replace({'id':1, 'a':r.row['a'].insert_at(0, [2])}).pluck('first_error')
js: tbl.get(1).replace({'id':1, 'a':r.row('a').insertAt(0, [2])}).pluck('first__error')
rb: tbl.get(1).replace{|old| {:id => 1, :a => old['a'].insert_at(0, [2])}}.pluck('first_error')
ot: ({'first_error':'Array over size limit `100000`.'})
- cd: tbl.get(1)['a'].count()
js: tbl.get(1)('a').count()
ot: 100000
- js: r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).spliceAt(0, [1]).count()
py: r.expr(ten_l).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).splice_at(0, [1]).count()
rb: r.expr(ten_l).concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.splice_at(0, [1]).count()
ot: err("ReqlResourceLimitError", "Array over size limit `100000`.", [])
- js: r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).concatMap(function(l) { return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] }).insertAt(0, [1]).count()
py: r.expr(ten_l).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).concat_map(lambda l:list(range(1,11))).insert_at(0, [1]).count()
rb: r.expr(ten_l).concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.insert_at(0, [1]).count()
ot: err("ReqlResourceLimitError", "Array over size limit `100000`.", [])

View File

@@ -0,0 +1,21 @@
desc: 2709 -- Guarantee failed with [max_els >= min_els]
table_variable_name: tbl
tests:
- py: tbl.insert([{'result':i} for i in range(1,1000)]).pluck('first_error', 'inserted')
runopts:
min_batch_rows: 10
max_batch_rows: 13
ot: ({'inserted':999})
- py: tbl.map(lambda thing:'key').count()
runopts:
min_batch_rows: 10
max_batch_rows: 13
ot: (999)
- py: tbl.map(lambda thing:'key').count()
runopts:
min_batch_rows: 10
max_batch_rows: 13
ot: (999)

View File

@@ -0,0 +1,6 @@
desc: Test pseudo literal strings in JSON.
tests:
- js: r.expr({"a":{"b":1, "c":2}}).merge(r.json('{"a":{"$reql_'+'type$":"LITERAL", "value":{"b":2}}}'))
py: r.expr({"a":{"b":1, "c":2}}).merge(r.json('{"a":{"$reql_type$":"LITERAL", "value":{"b":2}}}'))
rb: r.expr({:a => {:b => 1, :c => 2}}).merge(r.json('{"a":{"$reql_type$":"LITERAL", "value":{"b":2}}}'))
ot: ({'a':{'b':2}})

View File

@@ -0,0 +1,25 @@
desc: Stop people treating ptypes as objects
tests:
- cd: r.now()['epoch_time']
js: r.now()('epoch_time')
ot: err("ReqlQueryLogicError", "Cannot call `bracket` on objects of type `PTYPE<TIME>`.")
- cd: r.now().get_field('epoch_time')
ot: err("ReqlQueryLogicError", "Cannot call `get_field` on objects of type `PTYPE<TIME>`.")
- cd: r.now().keys()
ot: err("ReqlQueryLogicError", "Cannot call `keys` on objects of type `PTYPE<TIME>`.")
- cd: r.now().pluck('epoch_time')
ot: err("ReqlQueryLogicError", "Cannot call `pluck` on objects of type `PTYPE<TIME>`.")
- cd: r.now().without('epoch_time')
ot: err("ReqlQueryLogicError", "Cannot call `without` on objects of type `PTYPE<TIME>`.")
- cd: r.now().merge({"foo":4})
rb: r.now().merge({"foo"=>4})
ot: err("ReqlQueryLogicError", "Cannot call `merge` on objects of type `PTYPE<TIME>`.")
- cd: r.expr({"foo":4}).merge(r.now())
rb: r.expr({"foo"=>4}).merge(r.now())
ot: err("ReqlQueryLogicError", "Cannot merge objects of type `PTYPE<TIME>`.")
- cd: r.now().has_fields('epoch_time')
ot: err("ReqlQueryLogicError", "Cannot call `has_fields` on objects of type `PTYPE<TIME>`.")
- cd: r.object().has_fields(r.time(2014, 7, 7, 'Z'))
ot: err("ReqlQueryLogicError", "Invalid path argument `1404691200`.")
- cd: r.expr(1).keys()
ot: err("ReqlQueryLogicError", "Cannot call `keys` on objects of type `NUMBER`.")

View File

@@ -0,0 +1,20 @@
desc: 2767 -- Evaulate secondary index function with pristine env.
table_variable_name: tbl
tests:
- py: tbl.index_create('foo', lambda x:(x['a']+[1,2,3,4,5]+[6,7,8,9,10]).count())
runopts:
array_limit: 6
ot: {'created':1}
- py: tbl.index_wait()
- py: tbl.insert({'id':1,'a':[1,2,3,4,5]})
runopts:
array_limit: 6
ot: {'deleted':0,'replaced':0,'unchanged':0,'errors':0,'skipped':0,'inserted':1}
- py: tbl.coerce_to('array')
ot: [{'id':1,'a':[1,2,3,4,5]}]
- py: tbl.get_all(15, index='foo').coerce_to('array')
ot: [{'id':1,'a':[1,2,3,4,5]}]
- py: tbl.get_all(15, index='foo').coerce_to('array')
runopts:
array_limit: 6
ot: [{'id':1,'a':[1,2,3,4,5]}]

View File

@@ -0,0 +1,99 @@
desc: Tests key sorting of all usable types in secondary indexes
table_variable_name: tbl
tests:
# Test key sorting
- def:
py: binary_a = r.binary(b'')
rb: binary_a = r.binary('')
js: binary_a = Buffer('')
- def:
py: binary_trunc1 = r.binary(b'123456789012345678901234567890123456789012345678901234567890' +
b'123456789012345678901234567890123456789012345678901234567890')
rb: binary_trunc1 = r.binary('123456789012345678901234567890123456789012345678901234567890' +
'123456789012345678901234567890123456789012345678901234567890')
js: binary_trunc1 = Buffer('123456789012345678901234567890123456789012345678901234567890' +
'123456789012345678901234567890123456789012345678901234567890')
- def:
py: binary_trunc2 = r.binary(b'123456789012345678901234567890123456789012345678901234567890' +
b'123456789012345678901234567890123456789012345678901234567891')
rb: binary_trunc2 = r.binary('123456789012345678901234567890123456789012345678901234567890' +
'123456789012345678901234567890123456789012345678901234567891')
js: binary_trunc2 = Buffer('123456789012345678901234567890123456789012345678901234567890' +
'123456789012345678901234567890123456789012345678901234567891')
- def:
py: binary_b = r.binary(b'5aurhbviunr')
rb: binary_b = r.binary('5aurhbviunr')
js: binary_b = Buffer('5aurhbviunr')
- def: str_trunc1 = '123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890'
- def: str_trunc2 = '123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567891'
# Define a set of rows in order of increasing sindex keys
- def: trows = [ {'id':0,'idx':[0]},
{'id':1,'idx':[1, 2, 3, 4, 5, 6, 7, 0]},
{'id':2,'idx':[1, 2, 3, 4, 5, 6, 7, 4]},
{'id':3,'idx':[1, 2, 3, 4, 5, 6, 7, 4, 5]},
{'id':4,'idx':[1, 2, 3, 4, 5, 6, 7, 8, 1]},
{'id':5,'idx':[1, 3, binary_trunc1]},
{'id':6,'idx':[1, 3, binary_trunc2]},
{'id':7,'idx':[1, 3, r.epoch_time(0), r.epoch_time(0), r.epoch_time(0)]},
{'id':8,'idx':[1, 3, r.epoch_time(0), r.epoch_time(0), r.epoch_time(0), r.epoch_time(0)]},
{'id':9,'idx':[1, 3, r.epoch_time(0), r.epoch_time(0), r.epoch_time(0), r.epoch_time(1)]},
{'id':10,'idx':[1, 3, str_trunc1, str_trunc1]},
{'id':11,'idx':[1, 3, str_trunc1, str_trunc2]},
{'id':12,'idx':[1, 4, 3, 4, 5, 6, 7, 8, 2]},
{'id':13,'idx':[binary_trunc1]},
{'id':14,'idx':[binary_trunc2]},
{'id':15,'idx':False},
{'id':16,'idx':True},
{'id':17,'idx':-500},
{'id':18,'idx':500},
{'id':19,'idx':binary_a},
{'id':20,'idx':binary_trunc1},
{'id':21,'idx':binary_trunc2},
{'id':22,'idx':binary_b},
{'id':23,'idx':r.epoch_time(0)},
{'id':24,'idx':''},
{'id':25,'idx':' str'},
{'id':26,'idx':str_trunc1},
{'id':27,'idx':str_trunc2}]
- def:
cd: expected = r.range(tbl.count()).coerce_to('array')
- cd: tbl.insert(trows)['inserted']
js: tbl.insert(trows)('inserted')
ot: 28
- cd: tbl.index_create('idx')
ot: ({'created':1})
- cd: tbl.index_wait('idx').pluck('index', 'ready')
ot: [{'index':'idx','ready':true}]
- rb: tbl.order_by({:index => 'idx'}).map{|row| row['id']}.coerce_to('array').eq(expected)
js: tbl.order_by({index:'idx'}).map(r.row('id')).coerce_to('array').eq(expected)
py: tbl.order_by(index='idx').map(r.row['id']).coerce_to('array').eq(expected)
ot: true
# Test minval and maxval
- rb: tbl.order_by(:index => 'idx').between(r.minval, r.maxval).map{|x| x['id']}.coerce_to('array').eq(expected)
js: tbl.order_by({index:'idx'}).between(r.minval, r.maxval).map(r.row('id')).coerce_to('array').eq(expected)
py: tbl.order_by(index='idx').between(r.minval, r.maxval).map(r.row['id']).coerce_to('array').eq(expected)
ot: true
- py: tbl.order_by(index='idx').between([1,2,3,4,5,6,7,4],[1,2,3,4,5,6,8]).map(r.row['id']).coerce_to('array')
js: tbl.order_by({index:'idx'}).between([1,2,3,4,5,6,7,4],[1,2,3,4,5,6,8]).map(r.row('id')).coerce_to('array')
rb: tbl.order_by(:index => 'idx').between([1,2,3,4,5,6,7,4],[1,2,3,4,5,6,8]).map{|x| x['id']}.coerce_to('array')
ot: [2,3,4]
- py: tbl.order_by(index='idx').between([1,2,3,4,5,6,7,4,r.minval],[1,2,3,4,5,6,7,4,r.maxval]).map(r.row['id']).coerce_to('array')
js: tbl.order_by({index:'idx'}).between([1,2,3,4,5,6,7,4,r.minval],[1,2,3,4,5,6,7,4,r.maxval]).map(r.row('id')).coerce_to('array')
rb: tbl.order_by(:index => 'idx').between([1,2,3,4,5,6,7,4,r.minval],[1,2,3,4,5,6,7,4,r.maxval]).map{|x| x['id']}.coerce_to('array')
ot: [3]

View File

@@ -0,0 +1,16 @@
desc: Test that return_changes fails gracefully.
table_variable_name: tbl
tests:
- py: tbl.insert([{'result':i} for i in range(1,100)]).pluck('first_error', 'inserted')
ot: {'inserted':99}
- py: tbl.update({'foo':'bar'}, return_changes=True)['changes'].count()
runopts:
array_limit: 40
ot: 40
- py: tbl.update({'foo':'quux'}, return_changes=True)['warnings']
runopts:
array_limit: 40
ot: ['Too many changes, array truncated to 40.']

View File

@@ -0,0 +1,17 @@
desc: Avoid misleading array limit error message
table_variable_name: tbl
tests:
- py: tbl.insert([{'id':i,'mod':i%5,'foo':5} for i in range(1,1000)]).pluck('first_error', 'inserted')
ot: ({'inserted':999})
- py: tbl.coerce_to('array')
runopts:
array_limit: 500
ot: err("ReqlResourceLimitError", "Array over size limit `500`.", [0])
- py: tbl.group('mod').coerce_to('array')
runopts:
array_limit: 500
ot: err("ReqlResourceLimitError", "Grouped data over size limit `500`. Try putting a reduction (like `.reduce` or `.count`) on the end.", [0])
- py: tbl.group('foo').coerce_to('array')
runopts:
array_limit: 500
ot: err("ReqlResourceLimitError", "Grouped data over size limit `500`. Try putting a reduction (like `.reduce` or `.count`) on the end.", [0])

View File

@@ -0,0 +1,10 @@
desc: Test empty polygon special cases
tests:
- cd: r.polygon([0,0], [0,10], [10, 10], [10, 0]).polygon_sub(r.polygon([0,0], [0,10], [10, 10], [10, 0])).intersects(r.point(0,0))
ot: (false)
- cd: r.polygon([0,0], [0,10], [10, 10], [10, 0]).polygon_sub(r.polygon([0,0], [0,10], [10, 10], [10, 0])).intersects(r.polygon([0,0], [0,10], [10, 10], [10, 0]))
ot: (false)
- cd: r.polygon([0,0], [0,10], [10, 10], [10, 0]).polygon_sub(r.polygon([0,0], [0,10], [10, 10], [10, 0])).intersects(r.line([0,0], [0,10]))
ot: (false)
- cd: r.polygon([0,0], [0,10], [10, 10], [10, 0]).intersects(r.polygon([0,0], [0,10], [10, 10], [10, 0]).polygon_sub(r.polygon([0,0], [0,10], [10, 10], [10, 0])))
ot: (false)

View File

@@ -0,0 +1,7 @@
desc: Use pseudotype name properly in `info`
tests:
- cd: r.point(0, 1).type_of()
ot: ("PTYPE<GEOMETRY>")
- cd: r.point(0, 1).info()['type']
js: r.point(0, 1).info()('type')
ot: ("PTYPE<GEOMETRY>")

View File

@@ -0,0 +1,15 @@
desc: Regression tests for issue #309, using 'union' on an array and a stream doesn't seem to work
table_variable_name: t
tests:
# Set up a stream
- cd: t.insert([{'id':0}, {'id':1}])
# Try to union to an array
- cd: t.union([2,3,4])
ot: bag([{'id':0}, {'id':1}, 2, 3, 4])
- cd: r.expr([2,3,4]).union(t)
ot: bag([{'id':0}, {'id':1}, 2, 3, 4])

View File

@@ -0,0 +1,38 @@
desc: Test truncated secondary key ordering under variably sized primary keys
table_variable_name: tbl
tests:
- rb: tbl.index_create("3444_A") {|rec| rec['A']}
ot: ({'created':1})
- rb: tbl.index_create("3444_Z") {|rec| rec['Z']}
ot: ({'created':1})
- rb: tbl.index_wait("3444_A", "3444_Z").pluck('index', 'ready')
ot: (bag([{'ready':True, 'index':'3444_A'}, {'ready':True, 'index':'3444_Z'}]))
# Insert two documents with very long keys A*1 and A*0 / Z*0 and Z*1
# Note that the primary keys "a" and "aa" have different lengths, so
# the secondary index key will be truncated at different places.
# We insert a key A*[01] and a key Z*[01] because the former sorts before the appended
# primary key (Sa / Saa), and the latter sorts after it, which are two distinct cases
# that we have to test here.
- rb: tbl.insert({id:'a', A:'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1', Z:'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ0'})['inserted']
ot: (1)
- rb: tbl.insert({id:'aa', A:'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0', Z:'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ1'})['inserted']
ot: (1)
- rb: tbl.order_by(:index => r.asc('3444_A')).limit(1)['id'].coerce_to('ARRAY')
ot: (['aa'])
- rb: tbl.order_by(:index => r.desc('3444_A')).limit(1)['id'].coerce_to('ARRAY')
ot: (['a'])
- rb: tbl.order_by(:index => r.asc('3444_Z')).limit(1)['id'].coerce_to('ARRAY')
ot: (['a'])
- rb: tbl.order_by(:index => r.desc('3444_Z')).limit(1)['id'].coerce_to('ARRAY')
ot: (['aa'])
- rb: tbl.index_drop("3444_A")
ot: ({'dropped':1})
- rb: tbl.index_drop("3444_Z")
ot: ({'dropped':1})

View File

@@ -0,0 +1,21 @@
desc: 3449 -- test openness and closedness of range limits under descending ordering
table_variable_name: tbl
tests:
- js: tbl.insert([{id: 0}, {id: 1}, {id: 2}, {id: 3}])
ot: {'skipped':0, 'deleted':0, 'unchanged':0, 'errors':0, 'replaced':0, 'inserted':4}
# Test ascending ordering as well for completeness
- js: tbl.between(1, 3).orderBy({index: r.asc('id')})
ot: [{id:1}, {id:2}]
- js: tbl.between(1, 3).orderBy({index: r.desc('id')})
ot: [{id:2}, {id:1}]
- js: tbl.between(1, 3, {left_bound: 'open'}).orderBy({index: r.asc('id')})
ot: [{id:2}]
- js: tbl.between(1, 3, {left_bound: 'open'}).orderBy({index: r.desc('id')})
ot: [{id:2}]
- js: tbl.between(1, 3, {left_bound: 'open', right_bound: 'closed'}).orderBy({index: r.asc('id')})
ot: [{id:2}, {id:3}]
- js: tbl.between(1, 3, {left_bound: 'open', right_bound: 'closed'}).orderBy({index: r.desc('id')})
ot: [{id:3}, {id:2}]

View File

@@ -0,0 +1,20 @@
desc: Regression tests for issue #354, Skip and limit should throw and error
tests:
- def: arr = r.expr([1,2,3,4,5])
# Correct behavior
- cd: arr.skip(2)
ot: [3,4,5]
- cd: arr.skip('a')
ot: err("ReqlQueryLogicError", "Expected type NUMBER but found STRING.", [1])
- cd: arr.skip([1,2,3])
ot: err("ReqlQueryLogicError", "Expected type NUMBER but found ARRAY.", [1])
- cd: arr.skip({}).count()
ot: err("ReqlQueryLogicError", "Expected type NUMBER but found OBJECT.", [0, 1])
- cd: arr.skip(null)
ot: err("ReqlNonExistenceError", "Expected type NUMBER but found NULL.", [1])

View File

@@ -0,0 +1,51 @@
desc: Test that negative zero and positive zero refer to the same row
table_variable_name: tbl
tests:
# In order to send a `-0` from JS we need to provide raw JSON
- cd: tbl.insert([{'id':0.0, 'value':'abc'}, {'id':[1, -0.0], 'value':'def'}])
js: tbl.insert([{'id':0.0, 'value':'abc'}, {'id':[1, r.json('-0.0')], 'value':'def'}])
ot: partial({'inserted':2})
# Test getting the rows by their original and opposite id
- cd: tbl.get(0.0)
ot: {'id':0.0, 'value':'abc'}
- cd: tbl.get(-0.0)
js: tbl.get(r.json('-0.0'))
ot: {'id':0.0, 'value':'abc'}
- cd: tbl.get([1, 0.0])
ot: {'id':[1, -0.0], 'value':'def'}
- cd: tbl.get([1, -0.0])
js: tbl.get([1, r.json('-0.0')])
ot: {'id':[1, -0.0], 'value':'def'}
# Because I don't trust our test framework, test against a JSON string
- cd: tbl.get(0.0).pluck('id').to_json_string()
ot: '{"id":0}'
- cd: tbl.get(-0.0).pluck('id').to_json_string()
js: tbl.get(r.json('-0.0')).pluck('id').to_json_string()
ot: '{"id":0}'
- cd: tbl.get([1, 0.0]).pluck('id').to_json_string()
ot: '{"id":[1,-0.0]}'
- cd: tbl.get([1, -0.0]).pluck('id').to_json_string()
js: tbl.get([1, r.json('-0.0')]).pluck('id').to_json_string()
ot: '{"id":[1,-0.0]}'
# Test inserting a duplicate
- cd:
- tbl.insert({'id':0.0})
- tbl.insert({'id':[1,0.0]})
ot: partial({'errors':1})
- cd:
- tbl.insert({'id':-0.0})
- tbl.insert({'id':[1,-0.0]})
js:
- tbl.insert({'id':r.json('-0.0')})
- tbl.insert({'id':[1,r.json('-0.0')]})
ot: partial({'errors':1})

View File

@@ -0,0 +1,19 @@
desc: Regression tests for issue #370, calling `map` after `db_list` or `table_list`
tests:
# Set up a stream
- cd: r.db('test').table_create('t370')
def: d = r.db('test')
# Map after db_list
- cd: r.db_list().map(r.row)
rb: r.db_list.map{|row| row}
ot: (['rethinkdb', 'test'])
# Map after table_list
- cd: d.table_list().map(r.row)
rb: d.table_list.map{|row| row}
ot: (['t370'])
# clean up
- cd: r.db('test').table_drop('t370')

View File

@@ -0,0 +1,17 @@
desc: Test that we do not crash on an error during a function called at unsharding
table_variable_name: tbl
tests:
- cd: tbl.insert([ {'id':0, 'a':5}, {'id':1, 'a':6} ])
ot: partial({'inserted':2})
# Test bare reduce
- rb: tbl.reduce{|x,y| r.object('a', r.add(x['a'], y['a']))}
py: tbl.reduce(lambda x,y:r.object('a', r.add(x['a'], y['a'])))
js: tbl.reduce(function(x,y){return r.object('a', r.add(x('a'), y('a')));})
ot: ({'a':11})
# Test reduce with a function that errors
- rb: tbl.reduce{|x,y| r.expr(0)[0]}
py: tbl.reduce(lambda x,y:r.expr(0)[0])
js: tbl.reduce(function(x,y){return r.expr(0)(0);})
ot: err('ReqlQueryLogicError','Cannot convert NUMBER to SEQUENCE')

View File

@@ -0,0 +1,26 @@
desc: Test cleanup of cursors on the server
table_variable_name: tbl
tests:
# The JS test driver doesn't support noreply wait, so only test with Python and Ruby
- py: r.db('rethinkdb').table('jobs').map(lambda:1)
rb: r.db('rethinkdb').table('jobs').map{|x| 1}
ot: [1]
- py: r.db('rethinkdb').table('jobs').map(lambda:1)
rb: r.db('rethinkdb').table('jobs').map{|x| 1}
runopts:
noreply: true
ot: null
- py: r.db('rethinkdb').table('jobs').map(lambda:1)
rb: r.db('rethinkdb').table('jobs').map{|x| 1}
runopts:
noreply: true
testopts:
noreply_wait: true
ot: null
- py: r.db('rethinkdb').table('jobs').map(lambda:1)
rb: r.db('rethinkdb').table('jobs').map{|x| 1}
ot: [1]

View File

@@ -0,0 +1,48 @@
# note: this should be converted to a full test when #4030 is completed
desc: test that r.union is a top level function
table_variable_name: tbl
tests:
# == setup
- def: data = [{'id':1}, {'id':2}, {'id':3}, {'id':4}, {'id':5}, {'id':6}]
- def: changes = [{'id':7}, {'id':8}, {'id':9}, {'id':10}]
- cd: tbl.insert(data)
ot: partial({'errors':0, 'inserted':6})
- cd: tbl.count()
ot: (6)
# == tests
# - two streams
- cd: tbl.union(tbl)
ot:
cd: bag(data * 2)
js: bag(data.concat(data))
# - top level object
- cd: r.union(tbl, tbl)
ot:
cd: bag(data * 2)
js: bag(data.concat(data))
# # - two changefeeds
#
# - cd: unionFeed = tbl.changes().union(tbl.changes())['new_val']
# js: unionFeed = tbl.changes().union(tbl.changes())('new_val')
# - cd: tbl.insert(changes)
# - cd: fetch(unionFeed, 4 * 2)
# ot:
# cd: bag(changes * 2)
# js: bag(changes.concat(changes))
#
# # == errors
#
# # - order by
#
# - cd: tbl.union(tbl).changes().order_by('id')
# ot: err('ReqlRuntimeError', "Cannot call a terminal (`reduce`, `count`, etc.) on an infinite stream (such as a changefeed).")
#

View File

@@ -0,0 +1,9 @@
desc: Changefeeds on geo intersection filter
table_variable_name: tbl
tests:
- js: changefeed = tbl.filter(function(d){ return d("l").intersects(r.polygon([1,2],[2,2],[2,1],[1,1])) }).changes()
- js: tbl.insert([{"l":r.point(1.5,1.5), "id":1}])
ot: partial({'errors':0, 'inserted':1})
- js: fetch(changefeed, 1)
ot: [{"new_val":{"l":{"$reql_type$":"GEOMETRY","coordinates":[1.5,1.5],"type":"Point"}, "id":1}, "old_val":null}]

View File

@@ -0,0 +1,12 @@
desc: Regression tests for issue 4132
table_variable_name: tbl
tests:
- cd: r.and()
py: r.and_()
ot: true
- cd: r.or()
py: r.or_()
ot: false
- cd: r.expr(false).or(nil)
py: r.expr(false).or_(nil)
ot: nil

View File

@@ -0,0 +1,14 @@
desc: Test that multi indexes ignore values that cannot be indexed, still indexing the remaining values
table_variable_name: tbl
tests:
# This is testing a property of the server, so a single language (in this case JS)
# is enough.
- js: tbl.indexCreate("multi_idx", function(x) { return [x("a"), x("b")] }, {multi:true})
ot: {created: 1}
- js: tbl.indexWait("multi_idx")
- js: tbl.insert([{a:"a", b:null}, {a:"a", b:r.point(0,0)}])("inserted")
ot: 2
- js: tbl.getAll("a", {index:"multi_idx"}).count()
ot: 2

View File

@@ -0,0 +1,10 @@
desc: 4431 -- detect `use_outdated` optarg
tests:
- cd: r.table('test')
runopts:
use_outdated: true
ot: err('ReqlQueryLogicError', 'The `use_outdated` optarg is no longer supported. Use the `read_mode` optarg instead.')
- py: r.table('test', use_outdated=True)
cd: r.table('test', {use_outdated:true})
ot: err('ReqlQueryLogicError', 'The `use_outdated` optarg is no longer supported. Use the `read_mode` optarg instead.')

View File

@@ -0,0 +1,24 @@
desc: 4462 -- Coroutine stacks should not overflow during datum serialization
table_variable_name: tbl
tests:
# Create a deeply nested array
- js: tbl.insert({id:1,arr:[]})('inserted')
ot: 1
- js: r.range(1000).forEach(function(i) { return tbl.get(1).update(function(x) { return {arr:[x('arr')]} }, {durability:'soft'})})('replaced')
ot: 1000
# We don't care about the actual contents here. We just want to make sure the server
# doesn't crash when sending the document over.
- js: tbl.get(1)
ot: partial({})
- js: tbl.get(1).delete()('deleted')
ot: 1
# A similar test with a nested object
- js: tbl.insert({id:1,obj:{}})('inserted')
ot: 1
- js: r.range(1000).forEach(function(i) { return tbl.get(1).update(function(x) { return {obj:{a:x('obj')}} }, {durability:'soft'})})('replaced')
ot: 1000
- js: tbl.get(1)
ot: partial({})
- js: tbl.get(1).delete()('deleted')
ot: 1

View File

@@ -0,0 +1,8 @@
desc: 4465 (Delete tables from table_config)
table_variable_name: blah
tests:
- py: r.db("rethinkdb").table("db_config").delete()
ot: {"deleted":1,"errors":0,"inserted":0,"replaced":0,"skipped":0,"unchanged":0}
- py: r.db("rethinkdb").table("table_status")
ot: []

View File

@@ -0,0 +1,5 @@
desc: index_wait should throw on missing indexes.
table_variable_name: tbl
tests:
- cd: tbl.index_wait("missing")
ot: err_regex('ReqlOpFailedError', 'Index `missing` was not found on table `[a-zA-Z0-9_]+.[a-zA-Z0-9_]+`[.]', [0])

View File

@@ -0,0 +1,16 @@
desc: Sanity Check Fails, with r.table() expression inside a map (#453)
table_variable_name: tbl
tests:
- cd: tbl.insert([{'a':1},{'a':2}])
ot: partial({'inserted':2})
- js: tbl.map(function(x) { return tbl; })
py: tbl.map(lambda x: tbl)
rb: tbl.map{ |x| tbl }
ot: err("ReqlQueryLogicError", 'Expected type DATUM but found TABLE:', [0])
- js: tbl.map(function(x) { return tbl.coerceTo('array'); }).count()
py: tbl.map(lambda x: tbl.coerce_to('array')).count()
rb: tbl.map{ |x| tbl.coerce_to('array') }.count
ot: 2

View File

@@ -0,0 +1,9 @@
desc: 4582 -- non-deterministic arguments to `replace` and `update`.
table_variable_name: tbl
tests:
- cd: tbl.get(0).replace(tbl.get(0))
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')
- cd: tbl.get(0).update(tbl.get(0))
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')
- cd: tbl.replace(r.args([tbl.get(0)]))
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')

View File

@@ -0,0 +1,4 @@
desc: Test that using r.args with order_by doesn't crash the server.
tests:
- cd: r.expr([{"x":2},{"x":1}]).order_by(r.args(["x","y"]))
ot: err('ReqlQueryLogicError','r.args is not supported in an order_by or union command yet.')

View File

@@ -0,0 +1,11 @@
desc: Issue #46 -- bare table_create and table_drop
tests:
- cd: r.table_create('46')
ot: partial({'tables_created':1})
- cd: r.table_list()
ot: ['46']
- cd: r.table_drop('46')
ot: partial({'tables_dropped':1})

View File

@@ -0,0 +1,139 @@
desc: Issue #469: add pkey term
tests:
- cd: r.db_create('d469')
ot: partial({'dbs_created':1})
- cd: r.db('d469').table_create('t469')
ot: partial({'tables_created':1})
- cd: r.db('d469').table('t469').index_create('x')
ot: {'created':1}
- cd: r.db('d469').table('t469').index_wait('x').pluck('index', 'ready')
ot: [{'ready':True, 'index':'x'}]
- cd: r.minval.info()
ot: {'type':'MINVAL'}
- cd: r.maxval.info()
ot: {'type':'MAXVAL'}
- cd: r(null).info()
py: r.expr(null).info()
ot: {'type':'NULL'}
- rb: r(true).info
py: r.expr(True).info()
js: r(true).info()
ot: {'type':'BOOL','value':'true'}
- rb: r(1).info
py: r.expr(1).info()
js: r(1).info()
ot: {'type':'NUMBER','value':'1'}
- rb: r('1').info
py: r.expr('1').info()
js: r('1').info()
ot: {'type':'STRING','value':('"1"')}
- rb: r([1]).info
py: r.expr([1]).info()
js: r([1]).info()
ot: {'type':'ARRAY','value':"[\n\t1\n]"}
- rb: r({:a => 1}).info
py: r.expr({'a':1}).info()
js: r({a:1}).info()
ot: {'type':'OBJECT','value':"{\n\t\"a\":\t1\n}"}
- cd: r.db('d469').info()
ot: partial({'type':'DB','name':'d469'})
- cd: r.db('d469').table('t469').info()
ot: {'type':'TABLE','name':'t469','id':uuid(),
'db':{'type':'DB','name':'d469','id':uuid()},
'primary_key':'id', 'indexes':['x'], 'doc_count_estimates':[0]}
- rb: r.db('d469').table('t469').filter{true}.info
py: r.db('d469').table('t469').filter(lambda x:True).info()
js: r.db('d469').table('t469').filter(function(x) { return true; }).info()
ot: {'type':'SELECTION<STREAM>',
'table':{'type':'TABLE','name':'t469','id':uuid(),
'db':{'type':'DB','name':'d469','id':uuid()},
'primary_key':'id', 'indexes':['x'], 'doc_count_estimates':[0]}}
- rb: r.db('d469').table('t469').map{|x| 1}.info
py: r.db('d469').table('t469').map(lambda x:1).info()
js: r.db('d469').table('t469').map(function(x) { return 1; }).info()
ot: {'type':'STREAM'}
- cd: r.db('d469').table('t469').between(0, 1).info()
ot: {'index':'id',
'left_bound':0,
'left_bound_type':'closed',
'right_bound':1,
'right_bound_type':'open',
'sorting':'UNORDERED',
'table':{'db':{'id':uuid(), 'name':'d469', 'type':'DB'},
'doc_count_estimates':[0],
'id':uuid(),
'indexes':['x'],
'name':'t469',
'primary_key':'id',
'type':'TABLE'},
'type':'TABLE_SLICE'}
- cd: r.db('d469').table('t469').between(0, 1, {index:'a'}).info()
py: r.db('d469').table('t469').between(0, 1, index='a').info()
ot: {'index':'a',
'left_bound':0,
'left_bound_type':'closed',
'right_bound':1,
'right_bound_type':'open',
'sorting':'UNORDERED',
'table':{'db':{'id':uuid(), 'name':'d469', 'type':'DB'},
'doc_count_estimates':[0],
'id':uuid(),
'indexes':['x'],
'name':'t469',
'primary_key':'id',
'type':'TABLE'},
'type':'TABLE_SLICE'}
- cd: r.db('d469').table('t469').order_by({index:'a'}).between(0, 1, {index:'a'}).info()
py: r.db('d469').table('t469').order_by(index='a').between(0, 1, index='a').info()
ot: {'index':'a',
'left_bound':0,
'left_bound_type':'closed',
'right_bound':1,
'right_bound_type':'open',
'sorting':'ASCENDING',
'table':{'db':{'id':uuid(), 'name':'d469', 'type':'DB'},
'doc_count_estimates':[0],
'id':uuid(),
'indexes':['x'],
'name':'t469',
'primary_key':'id',
'type':'TABLE'},
'type':'TABLE_SLICE'}
- cd: r.db('d469').table('t469').between(r.minval, r.maxval).info()
ot: {'index':'id',
'left_bound_type':'unbounded',
'right_bound_type':'unbounded',
'sorting':'UNORDERED',
'table':{'db':{'id':uuid(), 'name':'d469', 'type':'DB'},
'doc_count_estimates':[0],
'id':uuid(),
'indexes':['x'],
'name':'t469',
'primary_key':'id',
'type':'TABLE'},
'type':'TABLE_SLICE'}
- cd: r.db('d469').table('t469').between(r.maxval, r.minval).info()
ot: {'index':'id',
'left_bound_type':'unachievable',
'right_bound_type':'unachievable',
'sorting':'UNORDERED',
'table':{'db':{'id':uuid(), 'name':'d469', 'type':'DB'},
'doc_count_estimates':[0],
'id':uuid(),
'indexes':['x'],
'name':'t469',
'primary_key':'id',
'type':'TABLE'},
'type':'TABLE_SLICE'}
- cd: r.db_drop('d469')
ot: partial({'dbs_dropped':1})

View File

@@ -0,0 +1,13 @@
desc: 4729 read mode for changefeeds
table_variable_name: tbl
tests:
- rb: r.table_create('test_4729')['tables_created']
ot: 1
- rb: r.table('test_4729', read_mode:'outdated').get_all('').changes(include_initial:true, include_states:true).limit(1)
ot: [{"state"=>"initializing"}]
- rb: r.table('test_4729', read_mode:'majority').get_all('').changes(include_initial:true, include_states:true).limit(1)
ot: [{"state"=>"initializing"}]
- rb: r.table('test_4729', read_mode:'single').get_all('').changes(include_initial:true, include_states:true).limit(1)
ot: [{"state"=>"initializing"}]
- rb: r.table('test_4729', read_mode:'_debug_direct').get_all('').changes(include_initial:true, include_states:true).limit(1)
ot: err('RqlQueryLogicError','DEBUG_DIRECT is not a legal read mode for this operation (an up-to-date read mode is required).')

View File

@@ -0,0 +1,14 @@
desc: 5092 -- r.uuid with a string argument is deterministic
table_variable_name: tbl
tests:
- js: tbl.get(0).update({a:r.uuid()})
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')
- js: tbl.get(0).update({a:r.uuid(r.args([]))})
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')
# We are more cautious than necessary when it comes to `r.args`:
- js: tbl.get(0).update({a:r.uuid(r.args(["test"]))})
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')
- js: tbl.get(0).update({a:r.uuid("test")})
ot: partial({skipped:1})
- js: tbl.get(0).update({a:r.uuid(r.uuid())})
ot: err('ReqlQueryLogicError','Could not prove argument deterministic. Maybe you want to use the non_atomic flag?')

View File

@@ -0,0 +1,5 @@
desc: 5130 -- Incorrect determinism calculation of r.point
table_variable_name: tbl
tests:
- js: tbl.indexCreate("a", function (o) { return r.point(tbl.get(0)('x'), 1); })
ot: err("ReqlQueryLogicError", "Could not prove function deterministic. Index functions must be deterministic.")

View File

@@ -0,0 +1,7 @@
desc: Skip after orderby causes use-after-free (#522)
table_variable_name: tbl
tests:
- cd: tbl.insert([{'id':0}, {'id':1}, {'id':2}])
- py: tbl.order_by('id').skip(1)
ot: [{'id':1}, {'id':2}]

View File

@@ -0,0 +1,23 @@
desc: Can't store r.minval or r.maxval in a secondary index, even when they're in an array.
table_variable_name: tbl
tests:
- py: tbl.index_create("min", lambda x: r.minval)
ot: {"created": 1}
- py: tbl.index_create("max", lambda x: r.maxval)
ot: {"created": 1}
- py: tbl.index_create("min_arr", lambda x: [r.minval])
ot: {"created": 1}
- py: tbl.index_create("max_arr", lambda x: [r.maxval])
ot: {"created": 1}
- py: tbl.index_wait("min", "max", "min_arr", "max_arr")
- py: tbl.insert({"id": 1})
ot: partial({"inserted": 1})
- py: tbl.order_by(index="min").count()
ot: 0
- py: tbl.order_by(index="max").count()
ot: 0
- py: tbl.order_by(index="min_arr").count()
ot: 0
- py: tbl.order_by(index="max_arr").count()
ot: 0

View File

@@ -0,0 +1,28 @@
desc: Fix transformations and states on artificial tables.
table_variable_name: tbl
tests:
- def: dtbl = r.db('rethinkdb').table('_debug_scratch')
- rb: dtbl.changes(include_states: true).limit(1)
ot: [{'state':'ready'}]
- rb: dtbl.changes(include_initial: true, include_states: true).limit(2)
ot: [{'state':'initializing'},{'state':'ready'}]
- rb: dtbl.insert({})['inserted']
ot: 1
- rb: dtbl.filter{false}.changes(include_states: true).limit(1)
ot: [{'state':'ready'}]
- rb: dtbl.filter{false}.changes(include_initial: true, include_states: true).limit(2)
ot: [{'state':'initializing'},{'state':'ready'}]
- rb: dtbl.map{1}.changes(include_states: true).limit(1)
ot: [{'state':'ready'}]
- rb: dtbl.map{1}.changes(include_initial: true).limit(1)
ot: [{'new_val':1}]
- rb: dtbl.map{1}.changes(include_initial: true, include_states: true).limit(3)
ot: [{'state':'initializing'},{'new_val':1},{'state':'ready'}]

View File

@@ -0,0 +1,20 @@
desc: 5383 include_offsets problems
table_variable_name: tbl
tests:
- rb: tbl.index_create('datetime')
ot: partial({'created':1})
- rb: tbl.index_wait('datetime')
ot: [partial({'index':'datetime', 'ready':True})]
- rb: tbl.insert([{id:1, datetime:0}, {id:2, datetime:0}, {id:3, datetime:0}])['inserted']
ot: 3
- rb: c = tbl.order_by(index: 'datetime').limit(3).changes(include_initial: true)
- rb: c.next
ot: {'new_val':{'id':1, 'datetime':0}}
- rb: tbl.get(1).update({datetime: 1})['replaced']
ot: 1
- rb: c.next
ot: {'new_val':{'id':2, 'datetime':0}}
- rb: c.next
ot: {'new_val':{'id':3, 'datetime':0}}
- rb: c.next
ot: {'new_val':{'id':1, 'datetime':1}, 'old_val':{'id':1, 'datetime':0}}

View File

@@ -0,0 +1,16 @@
desc: Regression test for issue 5438
table_variable_name: tbl
tests:
- rb: tbl.index_create('a')['created']
ot: 1
- rb: tbl.index_wait('a')['ready']
ot: [true]
- rb: ch = tbl.get_all(1, index: 'a').filter({b: 1}).changes
- rb: tbl.insert({id: 0, a: 0, b: 0})['inserted']
ot: 1
- rb: tbl.get(0).update({a: 1})['replaced']
ot: 1
- rb: tbl.get(0).update({b: 1})['replaced']
ot: 1
- rb: fetch(ch, 1)
ot: [{"new_val"=>{"a"=>1, "b"=>1, "id"=>0}, "old_val"=>nil}]

View File

@@ -0,0 +1,15 @@
desc: r.js inside reduce crashes server (#545)
table_variable_name: tbl
tests:
- cd: tbl.insert([{'id':0}, {'id':1}, {'id':2}])
- js: tbl.reduce(r.js("(function(x,y){return 1;})"))
py: tbl.reduce(r.js("(function(x,y){return 1;})"))
rb: tbl.reduce(r.js("(function(x,y){return 1;})"))
ot: 1
- js: tbl.reduce(r.js("(function(x,y){return {id:x[\"id\"] + y[\"id\"]};})"))
py: tbl.reduce(r.js("(function(x,y){return {id:x[\"id\"] + y[\"id\"]};})"))
rb: tbl.reduce(r.js("(function(x,y){return {id:x[\"id\"] + y[\"id\"]};})"))
ot: ({'id':3})

View File

@@ -0,0 +1,27 @@
desc: Regression tests for issue #546, variable shadowing
tests:
# Just a single nesting level, for sanity
- js: r(1).do(function(a) { return a; })
py: r.expr(1).do(lambda a:a)
ot: 1
# Nested but returning the inner var
- js: |-
r(1).do(function(a) {
return r(2).do(function(b) {
return b;
});
})
py: r.expr(1).do(lambda a:r.expr(2).do(lambda b:b))
ot: 2
# Nested but returning the outer var (this was the problem in 546)
- js: |-
r(1).do(function(a) {
return r(2).do(function(b) {
return a;
});
})
py: r.expr(1).do(lambda a:r.expr(2).do(lambda b:a))
ot: 1

View File

@@ -0,0 +1,5 @@
desc: multi-stream map works even if one of the streams is empty from the beginning
table_variable_name: tbl
tests:
- py: r.range(100).map(tbl, lambda x, y: nil)
ot: []

View File

@@ -0,0 +1,12 @@
desc: Regression test for issue 5535
table_variable_name: tbl
tests:
- rb: tbl.insert([{id: 0, a: nil}, {id: 1, a: 1}])['inserted']
ot: 2
- rb: ch = tbl['a'].changes(include_initial: true, include_states: true)
- rb: fetch(ch, 3)
ot: [{"state"=>"initializing"},{"new_val"=>1},{"state"=>"ready"}]
- rb: tbl.get(0).update({a: 2})['replaced']
ot: 1
- rb: fetch(ch, 1)
ot: [{"new_val"=>2, "old_val"=>nil}]

View File

@@ -0,0 +1,11 @@
desc: r.minval can't be confused with an empty array. This used to crash.
table_variable_name: tbl
tests:
- py: tbl.index_create("v")
ot: {"created": 1}
- py: tbl.index_wait("v")
- py: tbl.insert({"id": 1, "v": []})
ot: partial({"inserted": 1})
- py: tbl.between([r.minval], [r.maxval], left_bound="open", index="v").count()
ot: 0

View File

@@ -0,0 +1,10 @@
desc: concatmap that doesn't return stream crashes server (#568)
table_variable_name: tbl
tests:
- cd: tbl.insert({'name':'Jim Brown'})
- js: tbl.concatMap(function(rec){return rec("name")})
py: tbl.concat_map(lambda rec:rec["name"])
rb: tbl.concat_map {|rec| rec[:name]}
ot: err("ReqlQueryLogicError", "Cannot convert STRING to SEQUENCE", [])

View File

@@ -0,0 +1,25 @@
desc: Catch obvious sindex creation/dropping errors (#578)
table_variable_name: tbl
tests:
- js: tbl.index_create("578", function(rec){return 1})
py: tbl.index_create("578", lambda rec:1)
rb: tbl.index_create("578") {|rec| 1}
ot: {'created':1}
- cd: tbl.index_wait("578").pluck('index', 'ready')
ot: [{'ready':True, 'index':'578'}]
- js: tbl.index_create("578", function(rec){return 1})
py: tbl.index_create("578", lambda rec:1)
rb: tbl.index_create("578") {|rec| 1}
ot: err_regex("ReqlOpFailedError", "Index `578` already exists on table `[a-zA-Z0-9_]+.[a-zA-Z0-9_]+`[.]", [])
- js: tbl.index_drop("578")
py: tbl.index_drop("578")
rb: tbl.index_drop("578")
ot: {'dropped':1}
- js: tbl.index_drop("578")
py: tbl.index_drop("578")
rb: tbl.index_drop("578")
ot: err_regex("ReqlOpFailedError", "Index `578` does not exist on table `[a-zA-Z0-9_]+.[a-zA-Z0-9_]+`[.]", [])

View File

@@ -0,0 +1,15 @@
desc: reject non-deterministic secondary indexes (#579)
table_variable_name: tbl
tests:
- cd: tbl.insert({'name':'Jim Brown'})
- js: tbl.index_create("579", function(rec){return r.js("1")})
py: tbl.index_create("579", lambda rec:r.js("1"))
rb: tbl.index_create("579") {|rec| r.js("1")}
ot: err("ReqlQueryLogicError", "Could not prove function deterministic. Index functions must be deterministic.", [])
- js: tbl.index_create("579", function(rec){return tbl.get(0)})
py: tbl.index_create("579", lambda rec:tbl.get(0))
rb: tbl.index_create("579") {|rec| tbl.get(0)}
ot: err("ReqlQueryLogicError", "Could not prove function deterministic. Index functions must be deterministic.", [])

View File

@@ -0,0 +1,15 @@
desc: Regression test for issue #619 "Python driver doesn't support inserting objects w/ 'self' as a key"
tests:
- py: r.expr({"self":"foo"})
ot: ({'self':'foo'})
- py: r.expr(1).do(lambda x:{'self':x})
ot: ({'self':1})
- py: r.expr({"a":{"self":1}})
ot: ({"a":{"self":1}})
- py: r.expr({"self":{"self":{"self":1}}})
ot: ({"self":{"self":{"self":1}}})

View File

@@ -0,0 +1,19 @@
desc: Regression tests for issue #665. Makes sharding not depend on operation's region.
tests:
# Create a table and do an insertion.
- cd: r.db('test').table_create('t665')
def: t = r.db('test').table('t665')
# Keys 1 and 4 are sharded to hash shards that, of the four hash
# shards, are not adjacent.
- cd: t.insert([{'id':1}, {'id':4}])
ot: ({'unchanged':0,
'skipped':0,
'replaced':0,
'inserted':2,
'errors':0,
'deleted':0
})
# clean up
- cd: r.db('test').table_drop('t665')

View File

@@ -0,0 +1,10 @@
desc: fix type of `limit` and `zip` on streams (#678)
table_variable_name: tbl
tests:
- rb: tbl.map{|x| x}.limit(1).typeof
ot: ("STREAM")
- rb: r([1]).map{|x| x}.limit(1).typeof
ot: ("ARRAY")

View File

@@ -0,0 +1,7 @@
desc: 718 -- another lazy crashing bug -- changed as of #1328 to allow referencing external variables
table_variable_name: tbl
tests:
- rb: r(4).do{|x| tbl.index_create('718') {|row| row[:id] % x}}
ot: ({'created':1})

View File

@@ -0,0 +1,4 @@
desc: Regression tests for issue #730 (unbound ruby functions)
tests:
- rb: r.table_Create
ot: err_regex("NoMethodError", "undefined method `table_Create'")

View File

@@ -0,0 +1,9 @@
desc: 757 -- another lazy crashing bug
tests:
- rb: r.db('test').table_create('metadata', {:primary_key => 'pagename'})
- rb: r.table('metadata').insert({'pagename' => 'homepage', 'versions' => [1]})
- rb: r.branch(r.table('metadata').get('homepage').has_fields('versions'), r.table('metadata').get('homepage'), r(false))
ot: ({'pagename':'homepage', 'versions':[1]})
- rb: r.db('test').table_drop('metadata')

View File

@@ -0,0 +1,33 @@
desc: issue 763 check arg count for indexCreate in JS driver
table_variable_name: tbl
tests:
- js: tbl.indexCreate()
ot: err("ReqlCompileError", "Expected between 1 and 3 arguments but found 0.")
- js: tbl.indexCreate('a', 'b', 'c', 'd')
ot: err("ReqlCompileError", "Expected between 1 and 3 arguments but found 4.")
- js: tbl.indexCreate('a', 'b')
ot: err("ReqlQueryLogicError", "Expected type FUNCTION but found DATUM:")
- js: tbl.indexCreate('a')
ot: ({'created':1})
# The fix also changed affected arg checking for other variable argument functions
- js: r('a').eq()
ot: err("ReqlCompileError", "Expected 2 or more arguments but found 1.")
- js: r('a').lt()
ot: err("ReqlCompileError", "Expected 2 or more arguments but found 1.")
- js: r(['a']).union()
ot: ['a']
- js: r.do()
ot: err("ReqlCompileError", "Expected 1 or more arguments but found 0.")
- js: r.add()
ot: err("ReqlCompileError", "Expected 1 or more arguments but found 0.")
- js: r.add(1)
ot: 1

View File

@@ -0,0 +1,10 @@
desc: 767 -- better NaN checking
tests:
- js: r({a:NaN})
ot: builtin_err("TypeError", "Illegal non-finite number `NaN`.")
- js: r({a:Infinity})
ot: builtin_err("TypeError", "Illegal non-finite number `Infinity`.")
- py: r.expr(float('NaN'))
ot: err_regex('ValueError', 'Out of range float values are not JSON compliant.*')
- py: r.expr(float('Infinity'))
ot: err_regex('ValueError', 'Out of range float values are not JSON compliant.*')

View File

@@ -0,0 +1,5 @@
desc: 831 -- Empty batched_replaces_t constructed
table_variable_name: tbl
tests:
- py: tbl.insert([True, True])
ot: ({'first_error':'Expected type OBJECT but found BOOL.', 'skipped':0, 'deleted':0, 'unchanged':0, 'errors':2, 'replaced':0, 'inserted':0})