Skip to content

Commit

Permalink
Merge pull request #541 from riptano/new-pep8-fixes
Browse files Browse the repository at this point in the history
Use autopep8 to cleanup dtest
  • Loading branch information
ptnapoleon committed Sep 9, 2015
2 parents 1383392 + cfadc7c commit 0082fe9
Show file tree
Hide file tree
Showing 47 changed files with 270 additions and 222 deletions.
2 changes: 1 addition & 1 deletion auth_roles_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,7 +1168,7 @@ def assert_unauthenticated(self, message, user, password):
self.cql_connection(node, user=user, password=password)
host, error = response.exception.errors.popitem()
pattern = 'Failed to authenticate to %s: code=0100 \[Bad credentials\] message="%s"' % (host, message)
assert type(error) == AuthenticationFailed, "Expected AuthenticationFailed, got %s" % error
assert isinstance(error, AuthenticationFailed), "Expected AuthenticationFailed, got %s" % error
assert re.search(pattern, error.message), "Expected: %s" % pattern

def prepare(self, nodes=1, roles_expiry=0):
Expand Down
5 changes: 3 additions & 2 deletions batch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from dtest import Tester, debug
from tools import since


class TestBatch(Tester):

def counter_batch_accepts_counter_mutations_test(self):
Expand Down Expand Up @@ -255,12 +256,12 @@ def assert_timedout(self, session, query, cl, acknowledged_by=None,
except Timeout as e:
if received_responses is not None:
msg = "Expecting received_responses to be {}, got: {}".format(
received_responses, e.received_responses,)
received_responses, e.received_responses,)
self.assertEqual(e.received_responses, received_responses, msg)
except Unavailable as e:
if received_responses is not None:
msg = "Expecting alive_replicas to be {}, got: {}".format(
received_responses, e.alive_replicas,)
received_responses, e.alive_replicas,)
self.assertEqual(e.alive_replicas, received_responses, msg)
except Exception as e:
assert False, "Expecting TimedOutException, got:" + str(e)
Expand Down
7 changes: 4 additions & 3 deletions cfid_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from tools import since
import os


@since('2.1')
class TestCFID(Tester):

Expand All @@ -22,15 +23,15 @@ def cfid_test(self):
node1.flush()
session.execute('drop table ks.cf;')

#get a list of cf directories
# get a list of cf directories
try:
cfs = os.listdir(node1.get_path() + "/data/ks")
except OSError:
self.fail("Path to sstables not valid.")

#check that there are 5 unique directories
# check that there are 5 unique directories
self.assertEqual(len(cfs), 5)

#check that these are in fact column family directories
# check that these are in fact column family directories
for dire in cfs:
self.assertTrue(dire[0:2] == 'cf')
8 changes: 4 additions & 4 deletions commitlog_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def prepare(self, configuration={}, create_test_keyspace=True, **kwargs):
def _change_commitlog_perms(self, mod):
path = self._get_commitlog_path()
os.chmod(path, mod)
commitlogs = glob.glob(path+'/*')
commitlogs = glob.glob(path + '/*')
for commitlog in commitlogs:
os.chmod(commitlog, mod)

Expand Down Expand Up @@ -103,16 +103,16 @@ def _segment_size_test(self, segment_size_in_mb, compressed=False):
smaller_found = False
for i, f in enumerate(commitlogs):
size = os.path.getsize(f)
size_in_mb = int(size/1024/1024)
size_in_mb = int(size / 1024 / 1024)
debug('segment file {} {}; smaller already found: {}'.format(f, size_in_mb, smaller_found))
if size_in_mb < 1 or size < (segment_size*0.1):
if size_in_mb < 1 or size < (segment_size * 0.1):
continue # commitlog not yet used

try:
if compressed:
# if compression is used, we assume there will be at most a 50% compression ratio
self.assertLess(size, segment_size)
self.assertGreater(size, segment_size/2)
self.assertGreater(size, segment_size / 2)
else:
# if no compression is used, the size will be close to what we expect
assert_almost_equal(size, segment_size, error=0.05)
Expand Down
14 changes: 7 additions & 7 deletions compaction_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def data_size_test(self):
if output.find(table_name) != -1:
output = output[output.find(table_name):]
output = output[output.find("Space used (live)"):]
initialValue = int(output[output.find(":")+1:output.find("\n")].strip())
initialValue = int(output[output.find(":") + 1:output.find("\n")].strip())
else:
debug("datasize not found")
debug(output)
Expand All @@ -84,7 +84,7 @@ def data_size_test(self):
if output.find(table_name) != -1:
output = output[output.find(table_name):]
output = output[output.find("Space used (live)"):]
finalValue = int(output[output.find(":")+1:output.find("\n")].strip())
finalValue = int(output[output.find(":") + 1:output.find("\n")].strip())
else:
debug("datasize not found")

Expand Down Expand Up @@ -253,7 +253,7 @@ def large_compaction_warning_test(self):
self.create_ks(session, 'ks', 1)

mark = node.mark_log()
strlen = (1024 * 1024)/100
strlen = (1024 * 1024) / 100
session.execute("CREATE TABLE large(userid text PRIMARY KEY, properties map<int, text>) with compression = {}")
for i in range(200): # ensures partition size larger than compaction_large_partition_warning_threshold_mb
session.execute("UPDATE ks.large SET properties[%i] = '%s' WHERE userid = 'user'" % (i, get_random_word(strlen)))
Expand Down Expand Up @@ -284,7 +284,7 @@ def disable_autocompaction_nodetool_test(self):
session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(self.strategy))
node.nodetool('disableautocompaction ks to_disable')
for i in range(1000):
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello'*100))
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
if i % 100 == 0:
node.flush()
self.assertTrue(len(node.grep_log('Compacting.+to_disable')) == 0, 'Found compaction log items for {0}'.format(self.strategy))
Expand All @@ -304,7 +304,7 @@ def disable_autocompaction_schema_test(self):
self.create_ks(session, 'ks', 1)
session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'false\'}}'.format(self.strategy))
for i in range(1000):
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello'*100))
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
if i % 100 == 0:
node.flush()
self.assertTrue(len(node.grep_log('Compacting.+to_disable')) == 0, 'Found compaction log items for {0}'.format(self.strategy))
Expand Down Expand Up @@ -333,7 +333,7 @@ def disable_autocompaction_alter_test(self):
session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(self.strategy))
session.execute('ALTER TABLE to_disable WITH compaction = {{\'class\':\'{0}\', \'enabled\':\'false\'}}'.format(self.strategy))
for i in range(1000):
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello'*100))
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
if i % 100 == 0:
node.flush()
self.assertTrue(len(node.grep_log('Compacting.+to_disable')) == 0, 'Found compaction log items for {0}'.format(self.strategy))
Expand All @@ -357,7 +357,7 @@ def disable_autocompaction_alter_and_nodetool_test(self):
session.execute('CREATE TABLE to_disable (id int PRIMARY KEY, d TEXT) WITH compaction = {{\'class\':\'{0}\'}}'.format(self.strategy))
node.nodetool('disableautocompaction ks to_disable')
for i in range(1000):
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello'*100))
session.execute('insert into to_disable (id, d) values ({0}, \'{1}\')'.format(i, 'hello' * 100))
if i % 100 == 0:
node.flush()
self.assertTrue(len(node.grep_log('Compacting.+to_disable')) == 0, 'Found compaction log items for {0}'.format(self.strategy))
Expand Down
15 changes: 8 additions & 7 deletions concurrent_schema_changes_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ def wait(delay=2):
"""
time.sleep(delay)


class TestConcurrentSchemaChanges(Tester):

def __init__(self, *argv, **kwargs):
Expand Down Expand Up @@ -46,10 +47,10 @@ def prepare_for_changes(self, session, namespace='ns1'):
session.execute(query)
wait(1)
session.execute("INSERT INTO cf_%s (col1, col2, col3) VALUES ('a', 'b', 'c');"
% namespace)
% namespace)

# create an index
session.execute("CREATE INDEX index_%s ON cf_%s(col2)"%(namespace, namespace))
session.execute("CREATE INDEX index_%s ON cf_%s(col2)" % (namespace, namespace))

# create a column family that can be deleted later.
query = """
Expand Down Expand Up @@ -111,7 +112,7 @@ def make_schema_changes(self, session, namespace='ns1'):
session.execute(query)

# add index
session.execute("CREATE INDEX index2_%s ON cf_%s(col3)"%(namespace, namespace))
session.execute("CREATE INDEX index2_%s ON cf_%s(col3)" % (namespace, namespace))

# remove an index
session.execute("DROP INDEX index_%s" % namespace)
Expand Down Expand Up @@ -516,10 +517,10 @@ def snapshot_test(self):

### restore the snapshots ##
# clear the commitlogs and data
dirs = ( '%s/commitlogs' % node1.get_path(),
'%s/commitlogs' % node2.get_path(),
'%s/data/ks_ns2/cf_*/*' % node1.get_path(),
'%s/data/ks_ns2/cf_*/*' % node2.get_path(),
dirs = ('%s/commitlogs' % node1.get_path(),
'%s/commitlogs' % node2.get_path(),
'%s/data/ks_ns2/cf_*/*' % node1.get_path(),
'%s/data/ks_ns2/cf_*/*' % node2.get_path(),
)
for dirr in dirs:
for f in glob.glob(os.path.join(dirr)):
Expand Down
2 changes: 1 addition & 1 deletion configuration_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def write_to_trigger_fsync(session, ks, table):
"""
execute_concurrent_with_args(session,
session.prepare('INSERT INTO "{ks}"."{table}" (key, a, b, c) VALUES (?, ?, ?, ?)'.format(ks=ks, table=table)),
((x, x+1, x+2, x+3) for x in range(50000)))
((x, x + 1, x + 2, x + 3) for x in range(50000)))


def commitlog_size(node):
Expand Down
18 changes: 10 additions & 8 deletions consistency_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ class TestAvailability(TestHelper):
"""
Test that we can read and write depending on the number of nodes that are alive and the consistency levels.
"""

def _test_simple_strategy(self, combinations):
"""
Helper test function for a single data center: invoke _test_insert_query_from_node() for each node
Expand Down Expand Up @@ -237,7 +238,7 @@ def _test_insert_query_from_node(self, session, dc_idx, rf_factors, num_nodes_al
cluster = self.cluster

self.log("Connected to %s for %s/%s/%s" %
(session.cluster.contact_points, self._name(write_cl), self._name(read_cl), self._name(serial_cl)))
(session.cluster.contact_points, self._name(write_cl), self._name(read_cl), self._name(serial_cl)))

start = 0
end = 100
Expand Down Expand Up @@ -324,6 +325,7 @@ class TestAccuracy(TestHelper):
"""

class Validation:

def __init__(self, outer, sessions, nodes, rf_factors, start, end, write_cl, read_cl, serial_cl=None):
self.outer = outer
self.sessions = sessions
Expand All @@ -336,7 +338,7 @@ def __init__(self, outer, sessions, nodes, rf_factors, start, end, write_cl, rea
self.serial_cl = serial_cl

outer.log('Testing accuracy for %s/%s/%s (keys : %d to %d)'
% (outer._name(write_cl), outer._name(read_cl), outer._name(serial_cl), start, end))
% (outer._name(write_cl), outer._name(read_cl), outer._name(serial_cl), start, end))

def get_num_nodes(self, idx):
"""
Expand Down Expand Up @@ -387,8 +389,8 @@ def check_all_sessions(idx, n, val):
if outer.query_user(s, n, val, read_cl, check_ret=strong_consistency):
num = num + 1
assert num >= write_nodes, \
"Failed to read value from sufficient number of nodes, required %d but got %d - [%d, %s]" \
% (write_nodes, num, n, val)
"Failed to read value from sufficient number of nodes, required %d but got %d - [%d, %s]" \
% (write_nodes, num, n, val)

for n in xrange(start, end):
age = 30
Expand Down Expand Up @@ -426,8 +428,8 @@ def check_all_sessions(idx, n, val):
if outer.query_counter(s, n, val, read_cl, check_ret=strong_consistency):
num = num + 1
assert num >= write_nodes, \
"Failed to read value from sufficient number of nodes, required %d but got %d - [%d, %s]" \
% (write_nodes, num, n, val)
"Failed to read value from sufficient number of nodes, required %d but got %d - [%d, %s]" \
% (write_nodes, num, n, val)

for n in xrange(start, end):
c = outer.read_counter(sessions[0], n, ConsistencyLevel.ALL)
Expand Down Expand Up @@ -629,7 +631,7 @@ def short_read_test(self):
assert len(res) == 3, 'Expecting 3 values, got %d (%s)' % (len(res), str(res))
# value 0, 1 and 2 have been deleted
for i in xrange(1, 4):
assert res[i-1][1] == 'value%d' % (i+2), 'Expecting value%d, got %s (%s)' % (i+2, res[i-1][1], str(res))
assert res[i - 1][1] == 'value%d' % (i + 2), 'Expecting value%d, got %s (%s)' % (i + 2, res[i - 1][1], str(res))

truncate_statement = SimpleStatement('TRUNCATE cf', consistency_level=ConsistencyLevel.QUORUM)
session.execute(truncate_statement)
Expand Down Expand Up @@ -771,7 +773,7 @@ def short_read_reversed_test(self):
assert len(res) == 3, 'Expecting 3 values, got %d (%s)' % (len(res), str(res))
# value 6, 7 and 8 have been deleted
for i in xrange(0, 3):
assert res[i][1] == 'value%d' % (5-i), 'Expecting value%d, got %s (%s)' % (5-i, res[i][1], str(res))
assert res[i][1] == 'value%d' % (5 - i), 'Expecting value%d, got %s (%s)' % (5 - i, res[i][1], str(res))

truncate_statement = SimpleStatement('TRUNCATE cf', consistency_level=ConsistencyLevel.QUORUM)
session.execute(truncate_statement)
Expand Down
4 changes: 2 additions & 2 deletions consistent_bootstrap_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def consistent_reads_after_move_test(self):
debug("Creating a ring")
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000,
'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0}, batch_commitlog=True)
'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0}, batch_commitlog=True)

cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
Expand Down Expand Up @@ -51,7 +51,7 @@ def consistent_reads_after_bootstrap_test(self):
debug("Creating a ring")
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000,
'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0}, batch_commitlog=True)
'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0}, batch_commitlog=True)

cluster.populate(2).start()
node1, node2 = cluster.nodelist()
Expand Down
15 changes: 9 additions & 6 deletions counter_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,13 @@
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement

import random, time, uuid
import random
import time
import uuid
from assertions import assert_invalid, assert_one
from tools import rows_to_list, since


class TestCounters(Tester):

def simple_increment_test(self):
Expand Down Expand Up @@ -56,7 +59,7 @@ def upgrade_test(self):
c counter
)
"""
query = query + "WITH compression = { 'sstable_compression' : 'SnappyCompressor' }"
query = query + "WITH compression = { 'sstable_compression' : 'SnappyCompressor' }"

session.execute(query)
time.sleep(2)
Expand All @@ -69,7 +72,7 @@ def make_updates():
upd = "UPDATE counterTable SET c = c + 1 WHERE k = %d;"
batch = " ".join(["BEGIN COUNTER BATCH"] + [upd % x for x in keys] + ["APPLY BATCH;"])

kmap = { "k%d" % i : i for i in keys }
kmap = {"k%d" % i: i for i in keys}
for i in range(0, updates):
query = SimpleStatement(batch, consistency_level=ConsistencyLevel.QUORUM)
session.execute(query)
Expand Down Expand Up @@ -130,7 +133,7 @@ def counter_consistency_test(self):
for i in xrange(25):
_id = str(uuid.uuid4())
counters.append(
{_id: {'counter_one':1, 'counter_two':1}}
{_id: {'counter_one': 1, 'counter_two': 1}}
)

query = SimpleStatement("""
Expand All @@ -141,7 +144,7 @@ def counter_consistency_test(self):

# increment a bunch of counters with CL.ONE
for i in xrange(10000):
counter = counters[random.randint(0, len(counters)-1)]
counter = counters[random.randint(0, len(counters) - 1)]
counter_id = counter.keys()[0]

query = SimpleStatement("""
Expand Down Expand Up @@ -208,7 +211,7 @@ def multi_counter_update_test(self):
expected_counts = {}

# set up expectations
for i in range(1,6):
for i in range(1, 6):
_id = uuid.uuid4()

expected_counts[_id] = i
Expand Down
1 change: 1 addition & 0 deletions cql_prepared_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import time


@since("1.2")
class TestCQL(Tester):

Expand Down
2 changes: 1 addition & 1 deletion cql_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,7 @@ class AbortedQueriesTester(CQLTester):
@jira_ticket CASSANDRA-7392
Test that read-queries that take longer than read_request_timeout_in_ms time out
"""

def local_query_test(self):
"""
Check that a query running on the local coordinator node times out
Expand Down Expand Up @@ -469,7 +470,6 @@ def remote_query_test(self):
assert_unavailable(lambda c: debug(c.execute(statement)), session)
node2.watch_log_for("'SELECT \* FROM ks.test2 (.*)' timed out 1 time", from_mark=mark, timeout=60)


def index_query_test(self):
"""
Check that a secondary index query times out
Expand Down
Loading

0 comments on commit 0082fe9

Please sign in to comment.