Skip to content

STAR-801: Changes from DSEDB - DO NOT MERGE #27

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 16 commits into
base: STAR-99
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
16 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,5 @@ last_test_dir
upgrade
html/
doxygen/doxypy-0.4.2/
.pytest_cache/
.vscode/
42 changes: 36 additions & 6 deletions auth_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,10 +541,16 @@ def test_materialized_views_auth(self):
* Create a new user, 'cathy', with no permissions
* Create a ks, table
* Connect as cathy
*
* Try CREATE MV without ALTER permission on base table, assert throws Unauthorized
* Grant cathy ALTER permissions, then CREATE MV successfully
*
* Try to MODIFY base without WRITE permission on base, assert throws Unauthorized
* Grant cathy WRITE permissions on base, and modify base successfully
*
* Try to SELECT from the mv, assert throws Unauthorized
* Grant cathy SELECT permissions, and read from the MV successfully
* Grant cathy SELECT permissions on base, and read from the MV successfully
*
* Revoke cathy's ALTER permissions, assert DROP MV throws Unauthorized
* Restore cathy's ALTER permissions, DROP MV successfully
"""
Expand All @@ -565,12 +571,36 @@ def test_materialized_views_auth(self):
cassandra.execute("GRANT ALTER ON ks.cf TO cathy")
cathy.execute(create_mv)

# TRY SELECT MV without SELECT permission on base table
assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on <table ks.cf> or any of its parents")
# Try MODIFY base without WRITE permission on base
assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on <table ks.cf> or any of its parents")

# Grant SELECT permission and CREATE MV
cassandra.execute("GRANT SELECT ON ks.cf TO cathy")
cathy.execute("SELECT * FROM ks.mv1")
if self.cluster.version() >= LooseVersion('4.0'):
# From 4.0 onward, only base MODIFY permission is required to update base with MV
# Grant WRITE permission on Base
cassandra.execute("GRANT MODIFY ON ks.cf TO cathy")
cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')")

# TRY SELECT MV without SELECT permission on base table
assert_unauthorized(cathy, "SELECT * FROM ks.cf", "User cathy has no SELECT permission on <table ks.cf> or any of its parents")
assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on <table ks.cf> or any of its parents")

# Grant SELECT permission
cassandra.execute("GRANT SELECT ON ks.cf TO cathy")
assert_one(cathy, "SELECT * FROM ks.cf", [1, '1'])
assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1])
else:
# Before 4.0, MODIFY on MV is required to insert to base
# Grant WRITE permission on Base
cassandra.execute("GRANT MODIFY ON ks.cf TO cathy")
assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no SELECT permission on <table ks.cf> or any of its parents")
cassandra.execute("GRANT SELECT ON ks.cf TO cathy")
assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on <table ks.mv1> or any of its parents")

# Grant WRITE permission on MV
cassandra.execute("GRANT MODIFY ON ks.mv1 TO cathy")
cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')")
assert_one(cathy, "SELECT * FROM ks.cf", [1, '1'])
assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1])

# Revoke ALTER permission and try DROP MV
cassandra.execute("REVOKE ALTER ON ks.cf FROM cathy")
Expand Down
53 changes: 53 additions & 0 deletions bootstrap_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -882,7 +882,9 @@ def test_simultaneous_bootstrap(self):
# Repeat the select count(*) query, to help catch
# bugs like 9484, where count(*) fails at higher
# data loads.
logger.error(node1.nodetool('status').stdout)
for _ in range(5):
logger.error("Executing SELECT to node2")
assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)

def test_cleanup(self):
Expand Down Expand Up @@ -1018,6 +1020,57 @@ def test_bootstrap_binary_disabled(self):
assert_bootstrap_state(self, node3, 'COMPLETED', user='cassandra', password='cassandra')
node3.wait_for_binary_interface()

@since('4.0')
@pytest.mark.no_vnodes
def test_simple_bootstrap_with_everywhere_strategy(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(2)
cluster.set_configuration_options(values={'num_tokens': 1})

logger.debug("[node1, node2] tokens: %r" % (tokens,))

keys = 10000

# Create a single node cluster
cluster.populate(1)
node1 = cluster.nodelist()[0]
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.start()

session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 'EverywhereStrategy')
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)])

node1.flush()
node1.compact()

# Reads inserted data all during the bootstrap process. We shouldn't
# get any error
query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE)
session.shutdown()

# Bootstrapping a new node in the current version
node2 = new_node(cluster)
node2.set_configuration_options(values={'initial_token': tokens[1]})
node2.start(wait_for_binary_proto=True)
node2.compact()

node1.cleanup()
logger.debug("node1 size for ks.cf after cleanup: %s" % float(data_size(node1,'ks','cf')))
node1.compact()
logger.debug("node1 size for ks.cf after compacting: %s" % float(data_size(node1,'ks','cf')))

logger.debug("node2 size for ks.cf after compacting: %s" % float(data_size(node2,'ks','cf')))

size1 = float(data_size(node1,'ks','cf'))
size2 = float(data_size(node2,'ks','cf'))
assert_almost_equal(size1, size2, error=0.3)

assert_bootstrap_state(self, node2, 'COMPLETED')

@since('4.1')
def test_invalid_host_id(self):
"""
Expand Down
8 changes: 8 additions & 0 deletions byteman/guardrails/disk_usage_full.btm
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
RULE return FULL disk usage
CLASS org.apache.cassandra.service.disk.usage.DiskUsageMonitor
METHOD getState
AT EXIT
IF TRUE
DO
return org.apache.cassandra.service.disk.usage.DiskUsageState.FULL;
ENDRULE
8 changes: 8 additions & 0 deletions byteman/guardrails/disk_usage_stuffed.btm
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
RULE return STUFFED disk usage
CLASS org.apache.cassandra.service.disk.usage.DiskUsageMonitor
METHOD getState
AT EXIT
IF TRUE
DO
return org.apache.cassandra.service.disk.usage.DiskUsageState.STUFFED;
ENDRULE
3 changes: 2 additions & 1 deletion client_request_metrics_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,15 @@ def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
'Testing write failures', # The error to simulate a write failure
'ERROR WRITE_FAILURE', # Logged in DEBUG mode for write failures
f"Scanned over {TOMBSTONE_FAILURE_THRESHOLD + 1} tombstones during query" # Caused by the read failure tests
f"Scanned over {TOMBSTONE_FAILURE_THRESHOLD + 1} (tombstones|tombstone rows) during query" # Caused by the read failure tests
)

def setup_once(self):
cluster = self.cluster
cluster.set_configuration_options({'read_request_timeout_in_ms': 3000,
'write_request_timeout_in_ms': 3000,
'phi_convict_threshold': 12,
'tombstone_warn_threshold': -1,
'tombstone_failure_threshold': TOMBSTONE_FAILURE_THRESHOLD,
'enable_materialized_views': 'true'})
cluster.populate(2, debug=True)
Expand Down
10 changes: 8 additions & 2 deletions compaction_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,10 @@ def test_large_compaction_warning(self):
Check that we log a warning when the partition size is bigger than compaction_large_partition_warning_threshold_mb
"""
cluster = self.cluster
cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1})
if self.supports_guardrails:
cluster.set_configuration_options({'guardrails': {'partition_size_warn_threshold_in_mb': 1}})
else:
cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1})
cluster.populate(1).start()
[node] = cluster.nodelist()

Expand All @@ -361,7 +364,10 @@ def test_large_compaction_warning(self):
node.nodetool('compact ks large')
verb = 'Writing' if self.cluster.version() > '2.2' else 'Compacting'
sizematcher = '\d+ bytes' if self.cluster.version() < LooseVersion('3.6') else '\d+\.\d{3}(K|M|G)iB'
node.watch_log_for('{} large partition ks/large:user \({}'.format(verb, sizematcher), from_mark=mark, timeout=180)
log_message = '{} large partition ks/large:user \({}'.format(verb, sizematcher)
if self.supports_guardrails:
log_message = "Detected partition 'user' in ks.large of size 2MB is greater than the maximum recommended size \(1MB\)"
node.watch_log_for(log_message, from_mark=mark, timeout=180)

ret = list(session.execute("SELECT properties from ks.large where userid = 'user'"))
assert_length_equal(ret, 1)
Expand Down
3 changes: 3 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ def check_required_loopback_interfaces_available():


def pytest_addoption(parser):
parser.addoption("--sstable-format", action="store", default="bti",
help="SSTable format to be used by default for all newly created SSTables: "
"big or bti (default: bti)")
parser.addoption("--use-vnodes", action="store_true", default=False,
help="Determines wither or not to setup clusters using vnodes for tests")
parser.addoption("--use-off-heap-memtables", action="store_true", default=False,
Expand Down
17 changes: 17 additions & 0 deletions cqlsh_tests/cqlshrc.sample.cloud
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
; Copyright DataStax, Inc.
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; Sample ~/.cqlshrc file with cloud configuration.
[connection]
secure_connect_bundle = /path/to/creds.zip
Binary file added cqlsh_tests/secure-connect-test.zip
Binary file not shown.
102 changes: 100 additions & 2 deletions cqlsh_tests/test_cqlsh.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import BatchStatement, BatchType
from ccmlib import common
from ccmlib.node import ToolError

from .cqlsh_tools import monkeypatch_driver, unmonkeypatch_driver
from dtest import Tester, create_ks, create_cf
Expand Down Expand Up @@ -92,6 +93,7 @@ def run_cqlsh(self, node, cmds, cqlsh_options=None, env_vars=None):
logger.debug("Cqlsh command stderr:\n" + stderr)
return stdout, stderr


class TestCqlsh(Tester, CqlshMixin):

# override cluster options to enable user defined functions
Expand Down Expand Up @@ -1103,6 +1105,7 @@ def get_test_table_output(self, has_val=True, has_val_idx=True):
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = {}
AND crc_check_chance = 1.0
AND default_time_to_live = 0
AND extensions = {}
Expand Down Expand Up @@ -1192,6 +1195,7 @@ def get_users_table_output(self):
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = {}
AND crc_check_chance = 1.0
AND default_time_to_live = 0
AND extensions = {}
Expand Down Expand Up @@ -1298,6 +1302,7 @@ def get_users_by_state_mv_output(self):
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = {}
AND crc_check_chance = 1.0
AND default_time_to_live = 0
AND extensions = {}
Expand Down Expand Up @@ -1829,8 +1834,11 @@ def test_client_warnings(self):
"""
max_partitions_per_batch = 5
self.cluster.populate(3)
self.cluster.set_configuration_options({
'unlogged_batch_across_partitions_warn_threshold': str(max_partitions_per_batch)})

config_opts = {'unlogged_batch_across_partitions_warn_threshold': str(max_partitions_per_batch)}
if self.supports_guardrails:
config_opts = {"guardrails": config_opts}
self.cluster.set_configuration_options(config_opts)

self.cluster.start()

Expand Down Expand Up @@ -1880,6 +1888,52 @@ def test_connect_timeout(self):
stdout, stderr = self.run_cqlsh(node1, cmds='USE system', cqlsh_options=['--debug', '--connect-timeout=10'])
assert "Using connect timeout: 10 seconds" in stderr

@since('4.0')
def test_consistency_level_options(self):
"""
Tests for new cmdline consistency options:
- consistency-level
- serial-consistency-level
@jira_ticket STAR-432
"""
self.cluster.populate(1)
self.cluster.start()

node1, = self.cluster.nodelist()

def expect_output_no_errors(cmd, options, output):
stdout, stderr = self.run_cqlsh(node1, cmds=cmd, cqlsh_options=options)
assert output in stdout, stderr
assert stderr == ''

expect_output_no_errors('CONSISTENCY', [],
'Current consistency level is ONE.')

expect_output_no_errors('CONSISTENCY', ['--consistency-level', 'quorum'],
'Current consistency level is QUORUM.')

expect_output_no_errors('SERIAL CONSISTENCY', [],
'Current serial consistency level is SERIAL.')

expect_output_no_errors('SERIAL CONSISTENCY', ['--serial-consistency-level', 'local_serial'],
'Current serial consistency level is LOCAL_SERIAL.')

def expect_error(cmd, options, error_msg):
stdout, stderr = self.run_cqlsh(node1, cmds=cmd, cqlsh_options=options)
assert error_msg in stderr

expect_error('CONSISTENCY', ['--consistency-level', 'foop'],
'"foop" is not a valid consistency level')

expect_error('CONSISTENCY', ['--consistency-level', 'serial'],
'"serial" is not a valid consistency level')

expect_error('SERIAL CONSISTENCY', ['--serial-consistency-level', 'foop'],
'"foop" is not a valid serial consistency level')

expect_error('SERIAL CONSISTENCY', ['--serial-consistency-level', 'ONE'],
'"ONE" is not a valid serial consistency level')

@since('3.0.19')
def test_protocol_negotiation(self):
"""
Expand Down Expand Up @@ -2453,6 +2507,50 @@ def test_cjk_output(self):
"""
assert stdout_lines_sorted.find(expected) >= 0

@since('4.0')
def test_no_file_io(self):
def run_cqlsh_catch_toolerror(cmd, env):
"""
run_cqlsh will throw ToolError if cqlsh exits with a non-zero exit code.
"""
out = ""
err = ""
try:
out, err, _ = self.node1.run_cqlsh(cmd, env)
except ToolError as e:
return e.stdout, e.stderr
return out, err

create_ks(self.session, 'foo', rf=1)
create_cf(self.session, 'bar', key_type='int', columns={'name': 'text'})

cqlsh_stdout, cqlsh_stderr, _ = self.node1.run_cqlsh('COPY foo.bar TO \'/dev/null\';', [])
assert '0 rows exported to 1 files' in cqlsh_stdout
assert cqlsh_stderr == ''
cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('COPY foo.bar TO \'/dev/null\';', ['--no-file-io'])
assert cqlsh_stdout == ''
assert 'No file I/O permitted' in cqlsh_stderr

cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', [])
assert '(Pdb)' in cqlsh_stdout
cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', ['--no-file-io'])
assert cqlsh_stdout == ''
assert 'No file I/O permitted' in cqlsh_stderr

cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', [])
assert cqlsh_stdout == 'Now capturing query output to \'nah\'.\n'
assert cqlsh_stderr == ''
cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', ['--no-file-io'])
assert cqlsh_stdout == ''
assert 'No file I/O permitted' in cqlsh_stderr

cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', [])
assert cqlsh_stdout == ''
assert cqlsh_stderr == ''
cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', ['--no-file-io'])
assert cqlsh_stdout == ''
assert 'No file I/O permitted' in cqlsh_stderr


class TestCqlLogin(Tester, CqlshMixin):
"""
Expand Down
Loading