Respond to RFCs in #2908.

pull/2908/head
derekpierre 2022-04-12 09:36:08 -04:00
parent d6d5f400c3
commit 871094c548
5 changed files with 16 additions and 32 deletions

View File

@ -306,16 +306,16 @@ class WebController(InterfaceControlServer):
@staticmethod
def json_response_from_worker_pool_exception(exception):
json_response = {}
json_response['failure_message'] = f"{exception}; " \
f"{len(exception.failures)} concurrent failures recorded"
json_response = {
'failure_message': str(exception)
}
if exception.failures:
failures = []
for value, exc_info in exception.failures.items():
failure = {'value': value}
_, exception, tb = exc_info
failure['error'] = str(exception)
failures.append(failure)
failures.append({
'value': value,
'error': str(exc_info[1])
})
json_response['failures'] = failures
return json_response
@ -367,8 +367,8 @@ class WebController(InterfaceControlServer):
json_error_response=json_response_from_exception,
e=RuntimeError(json_response_from_exception['failure_message']),
error_message=WebController._captured_status_codes[__exception_code],
log_level='warn',
response_code=__exception_code)
response_code=__exception_code,
log_level='warn')
#
# Unhandled Server Errors

View File

@ -247,8 +247,7 @@ class WebEmitter:
exception = f"{type(e).__name__}: {str(e)}" if str(e) else type(e).__name__
message = f"{self} [{str(response_code)} - {error_message}] | ERROR: {exception}"
logger = getattr(self.log, log_level)
# See #724 / 2156
message_cleaned_for_logger = message.replace("{", "<^<").replace("}", ">^>")
message_cleaned_for_logger = Logger.escape_format_string(message)
logger(message_cleaned_for_logger)
@staticmethod
@ -274,8 +273,8 @@ class WebEmitter:
json_error_response,
e,
error_message: str,
log_level: str = 'info',
response_code: int = 500):
response_code: int,
log_level: str = 'info'):
self._log_exception(e, error_message, log_level, response_code)
if self.crash_on_error:
raise e

View File

@ -190,13 +190,12 @@ class NucypherMiddlewareClient:
endpoint,
*args, **kwargs):
# Use existing cached SSL certificate or fetch fresh copy and retry
cached_certificate_filepath = Path(
self.storage.generate_certificate_filepath(host=host, port=port))
if cached_certificate_filepath.exists():
cached_cert_filepath = Path(self.storage.generate_certificate_filepath(host=host, port=port))
if cached_cert_filepath.exists():
# already cached try it
try:
# Send request
response = self.invoke_method(method, endpoint, verify=cached_certificate_filepath,
response = self.invoke_method(method, endpoint, verify=cached_cert_filepath,
*args, **kwargs)
# successful use of cached certificate

View File

@ -98,17 +98,7 @@ class WorkerPoolException(Exception):
# craft message
msg = message_prefix
if self.failures:
# Using one random failure
# Most probably they're all the same anyway.
value = list(self.failures)[0]
_, exception, tb = self.failures[value]
f = io.StringIO()
traceback.print_tb(tb, file=f)
traceback_str = f.getvalue()
msg = (f"{message_prefix} ({len(self.failures)} failures recorded); "
f"for example, for {value}:\n"
f"{traceback_str}\n"
f"{exception}")
msg = f"{message_prefix} ({len(self.failures)} failures recorded)"
super().__init__(msg)
def get_tracebacks(self) -> Dict[Any, str]:

View File

@ -170,10 +170,6 @@ def test_wait_for_successes_out_of_values(join_worker_pool):
assert 'raise Exception(f"Operator for {value} failed")' in traceback
assert f'Operator for {value} failed' in traceback
# This will be the last line in the displayed traceback;
# That's where the worker actually failed. (Operator for {value} failed)
assert 'raise Exception(f"Operator for {value} failed")' in message
def test_wait_for_successes_timed_out(join_worker_pool):
"""