From ae45464a346875b684fdb8ac1fb300ad6360283a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 22 Oct 2025 09:33:05 -0500 Subject: [PATCH 01/46] PYTHON-5517 Updates to connection pool backoff --- pymongo/asynchronous/pool.py | 48 ++++++++++++------- pymongo/logger.py | 2 +- pymongo/monitoring.py | 31 ++++++++++++ pymongo/synchronous/pool.py | 48 ++++++++++++------- .../connection-logging.json | 10 ++-- .../pool-create-min-size-error.json | 6 +-- .../unified/auth-network-error.json | 4 +- test/load_balancer/sdam-error-handling.json | 14 ++---- 8 files changed, 105 insertions(+), 58 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 065686f43a..0f96e09b56 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -52,6 +52,7 @@ DocumentTooLarge, ExecutionTimeout, InvalidOperation, + NetworkTimeout, NotPrimaryError, OperationFailure, PyMongoError, @@ -723,6 +724,7 @@ class PoolState: PAUSED = 1 READY = 2 CLOSED = 3 + BACKOFF = 4 # Do *not* explicitly inherit from object or Jython won't call __del__ @@ -791,6 +793,7 @@ def __init__( self._pending = 0 self._client_id = client_id self._backoff = 0 + self._backoff_connection_time = -1 if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_created( @@ -817,6 +820,9 @@ def __init__( async def ready(self) -> None: # Take the lock to avoid the race condition described in PYTHON-2699. async with self.lock: + # Do not set the pool as ready if in backoff. + if self._backoff: + return if self.state != PoolState.READY: self.state = PoolState.READY if self.enabled_for_cmap: @@ -846,7 +852,7 @@ async def _reset( async with self.size_cond: if self.closed: return - # Clear the backoff state. + # Clear the backoff amount. self._backoff = 0 if self.opts.pause_enabled and pause and not self.opts.load_balanced: old_state, self.state = self.state, PoolState.PAUSED @@ -1029,26 +1035,34 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: self.requests -= 1 self.size_cond.notify() - def _handle_connection_error(self, error: BaseException, phase: str, conn_id: int) -> None: + def _handle_connection_error(self, error: BaseException, phase: str) -> None: # Handle system overload condition for non-sdam pools. - # Look for an AutoReconnect error raised from a ConnectionResetError with - # errno == errno.ECONNRESET or raised from an OSError that we've created due to - # a closed connection. + # Look for an AutoReconnect or NetworkTimeout error. # If found, set backoff and add error labels. - if self.is_sdam or type(error) != AutoReconnect: + if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return - self._backoff += 1 error._add_error_label("SystemOverloadedError") error._add_error_label("RetryableError") + self.backoff() + + def backoff(self): + """Set/increase backoff mode.""" + self._backoff += 1 + if self.state != PoolState.BACKOFF: + self.state = PoolState.BACKOFF + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_backoff(self.address, self._backoff) + self._backoff_connection_time = _backoff(self._backoff) + time.monotonic() + # Log the pool backoff message. if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - message=_ConnectionStatusMessage.POOL_BACKOFF, + message=_ConnectionStatusMessage.POOL_BACKOFF % self._backoff, clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], - driverConnectionId=conn_id, reason=_verbose_connection_error_reason(ConnectionClosedReason.POOL_BACKOFF), error=ConnectionClosedReason.POOL_BACKOFF, ) @@ -1082,10 +1096,6 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A driverConnectionId=conn_id, ) - # Apply backoff if applicable. - if self._backoff: - await asyncio.sleep(_backoff(self._backoff)) - # Pass a context to determine if we successfully create a configured socket. context = dict(has_created_socket=False) @@ -1114,7 +1124,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A error=ConnectionClosedReason.ERROR, ) if context["has_created_socket"]: - self._handle_connection_error(error, "handshake", conn_id) + self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) @@ -1138,7 +1148,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A except BaseException as e: async with self.lock: self.active_contexts.discard(conn.cancel_context) - self._handle_connection_error(e, "hello", conn_id) + self._handle_connection_error(e, "hello") await conn.close_conn(ConnectionClosedReason.ERROR) raise @@ -1146,7 +1156,10 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A await handler.client._topology.receive_cluster_time(conn._cluster_time) # Clear the backoff state. - self._backoff = 0 + if self._backoff: + self._backoff = 0 + await self.ready() + return conn @contextlib.asynccontextmanager @@ -1342,6 +1355,9 @@ async def _get_conn( if await self._perished(conn): conn = None continue + # See if we need to wait for the backoff period. + elif self._backoff and (self._backoff_connection_time < time.monotonic()): + continue else: # We need to create a new connection try: conn = await self.connect(handler=handler) diff --git a/pymongo/logger.py b/pymongo/logger.py index ccfc45ed88..17cebb471a 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -42,7 +42,7 @@ class _ConnectionStatusMessage(str, enum.Enum): POOL_READY = "Connection pool ready" POOL_CLOSED = "Connection pool closed" POOL_CLEARED = "Connection pool cleared" - POOL_BACKOFF = "Connection pool backoff" + POOL_BACKOFF = "Connection pool backoff attempt number {%s}" CONN_CREATED = "Connection created" CONN_READY = "Connection ready" diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 0dfbbb915a..0d96c3bd92 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -914,6 +914,28 @@ class PoolClosedEvent(_PoolEvent): __slots__ = () +class PoolBackoffEvent(_PoolEvent): + """Published when a Connection Pool is backing off. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + :param attempt: The backoff attempt number. + + .. versionadded:: 4.16 + """ + + __slots__ = ("__attempt",) + + def __init__(self, address: _Address, attempt: int) -> None: + super().__init__(address) + self.__attempt = attempt + + @property + def attempt(self) -> Optional[ObjectId]: + """The backoff attempt number.""" + return self.__attempt + + class ConnectionClosedReason: """An enum that defines values for `reason` on a :class:`ConnectionClosedEvent`. @@ -1830,6 +1852,15 @@ def publish_pool_closed(self, address: _Address) -> None: except Exception: _handle_exception() + def publish_pool_backoutt(self, address: _Address, attempt: int) -> None: + """Publish a :class:`PoolBackoffEvent` to all pool listeners.""" + event = PoolBackoffEvent(address, attempt) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_closed(event) + except Exception: + _handle_exception() + def publish_connection_created(self, address: _Address, connection_id: int) -> None: """Publish a :class:`ConnectionCreatedEvent` to all connection listeners. diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index d0c517f186..977fb2c126 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -49,6 +49,7 @@ DocumentTooLarge, ExecutionTimeout, InvalidOperation, + NetworkTimeout, NotPrimaryError, OperationFailure, PyMongoError, @@ -721,6 +722,7 @@ class PoolState: PAUSED = 1 READY = 2 CLOSED = 3 + BACKOFF = 4 # Do *not* explicitly inherit from object or Jython won't call __del__ @@ -789,6 +791,7 @@ def __init__( self._pending = 0 self._client_id = client_id self._backoff = 0 + self._backoff_connection_time = -1 if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_created( @@ -815,6 +818,9 @@ def __init__( def ready(self) -> None: # Take the lock to avoid the race condition described in PYTHON-2699. with self.lock: + # Do not set the pool as ready if in backoff. + if self._backoff: + return if self.state != PoolState.READY: self.state = PoolState.READY if self.enabled_for_cmap: @@ -844,7 +850,7 @@ def _reset( with self.size_cond: if self.closed: return - # Clear the backoff state. + # Clear the backoff amount. self._backoff = 0 if self.opts.pause_enabled and pause and not self.opts.load_balanced: old_state, self.state = self.state, PoolState.PAUSED @@ -1025,26 +1031,34 @@ def remove_stale_sockets(self, reference_generation: int) -> None: self.requests -= 1 self.size_cond.notify() - def _handle_connection_error(self, error: BaseException, phase: str, conn_id: int) -> None: + def _handle_connection_error(self, error: BaseException, phase: str) -> None: # Handle system overload condition for non-sdam pools. - # Look for an AutoReconnect error raised from a ConnectionResetError with - # errno == errno.ECONNRESET or raised from an OSError that we've created due to - # a closed connection. + # Look for an AutoReconnect or NetworkTimeout error. # If found, set backoff and add error labels. - if self.is_sdam or type(error) != AutoReconnect: + if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return - self._backoff += 1 error._add_error_label("SystemOverloadedError") error._add_error_label("RetryableError") + self.backoff() + + def backoff(self): + """Set/increase backoff mode.""" + self._backoff += 1 + if self.state != PoolState.BACKOFF: + self.state = PoolState.BACKOFF + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_backoff(self.address, self._backoff) + self._backoff_connection_time = _backoff(self._backoff) + time.monotonic() + # Log the pool backoff message. if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - message=_ConnectionStatusMessage.POOL_BACKOFF, + message=_ConnectionStatusMessage.POOL_BACKOFF % self._backoff, clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], - driverConnectionId=conn_id, reason=_verbose_connection_error_reason(ConnectionClosedReason.POOL_BACKOFF), error=ConnectionClosedReason.POOL_BACKOFF, ) @@ -1078,10 +1092,6 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect driverConnectionId=conn_id, ) - # Apply backoff if applicable. - if self._backoff: - time.sleep(_backoff(self._backoff)) - # Pass a context to determine if we successfully create a configured socket. context = dict(has_created_socket=False) @@ -1110,7 +1120,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect error=ConnectionClosedReason.ERROR, ) if context["has_created_socket"]: - self._handle_connection_error(error, "handshake", conn_id) + self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) @@ -1134,7 +1144,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect except BaseException as e: with self.lock: self.active_contexts.discard(conn.cancel_context) - self._handle_connection_error(e, "hello", conn_id) + self._handle_connection_error(e, "hello") conn.close_conn(ConnectionClosedReason.ERROR) raise @@ -1142,7 +1152,10 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect handler.client._topology.receive_cluster_time(conn._cluster_time) # Clear the backoff state. - self._backoff = 0 + if self._backoff: + self._backoff = 0 + self.ready() + return conn @contextlib.contextmanager @@ -1338,6 +1351,9 @@ def _get_conn( if self._perished(conn): conn = None continue + # See if we need to wait for the backoff period. + elif self._backoff and (self._backoff_connection_time < time.monotonic()): + continue else: # We need to create a new connection try: conn = self.connect(handler=handler) diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 60190c7dc0..3e74259fd3 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -331,9 +331,7 @@ "uriOptions": { "retryReads": false, "appname": "clientAppName", - "heartbeatFrequencyMS": 10000, - "socketTimeoutMS": 500, - "connectTimeoutMS": 500 + "heartbeatFrequencyMS": 10000 }, "observeLogMessages": { "connection": "debug" @@ -357,9 +355,7 @@ "failCommands": [ "saslContinue" ], - "closeConnection": false, - "blockConnection": true, - "blockTimeMS": 1000, + "errorCode": 18, "appName": "clientAppName" } } @@ -372,7 +368,7 @@ "filter": {} }, "expectError": { - "isClientError": true + "isError": true } } ], diff --git a/test/connection_monitoring/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json index 8ec958780d..da9357b963 100644 --- a/test/connection_monitoring/pool-create-min-size-error.json +++ b/test/connection_monitoring/pool-create-min-size-error.json @@ -15,17 +15,13 @@ "isMaster", "hello" ], - "closeConnection": false, - "blockConnection": true, - "blockTimeMS": 1000, + "errorCode": 18, "appName": "poolCreateMinSizeErrorTest" } }, "poolOptions": { "minPoolSize": 1, "backgroundThreadIntervalMS": 50, - "socketTimeoutMS": 500, - "connectTimeoutMS": 500, "appName": "poolCreateMinSizeErrorTest" }, "operations": [ diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json index 656b291366..ccfc723617 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -53,9 +53,7 @@ "failCommands": [ "saslContinue" ], - "closeConnection": false, - "blockConnection": true, - "blockTimeMS": 1000, + "errorCode": 18, "appName": "authNetworkErrorTest" } } diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index b9842b8017..28823d5b95 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -32,8 +32,6 @@ "useMultipleMongoses": false, "uriOptions": { "appname": "lbSDAMErrorTestClient", - "socketTimeoutMS": 500, - "connectTimeoutMS": 500, "retryWrites": false }, "observeEvents": [ @@ -66,9 +64,7 @@ "id": "multiClient", "useMultipleMongoses": true, "uriOptions": { - "retryWrites": false, - "socketTimeoutMS": 500, - "connectTimeoutMS": 500 + "retryWrites": false }, "observeEvents": [ "connectionCreatedEvent", @@ -286,8 +282,7 @@ "isMaster", "hello" ], - "blockConnection": true, - "blockTimeMS": 1000, + "errorCode": 18, "appName": "lbSDAMErrorTestClient" } } @@ -302,7 +297,7 @@ } }, "expectError": { - "isClientError": true + "isError": true } } ], @@ -350,8 +345,7 @@ "failCommands": [ "saslContinue" ], - "blockConnection": true, - "blockTimeMS": 1000, + "errorCode": 18, "appName": "lbSDAMErrorTestClient" } } From a4dd0f14d6387314d569b8ddaca38f68407d5466 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 23 Oct 2025 06:53:19 -0500 Subject: [PATCH 02/46] wip add tests --- pymongo/asynchronous/pool.py | 3 +- pymongo/logger.py | 2 +- pymongo/monitoring.py | 16 +- pymongo/synchronous/pool.py | 3 +- .../test_connection_monitoring.py | 2 + .../connection-logging.json | 337 ++++++++++++++++++ .../pool-backoff-connection-close.json | 72 ++++ test/test_connection_monitoring.py | 2 + test/unified_format_shared.py | 4 + test/utils_shared.py | 5 + 10 files changed, 439 insertions(+), 7 deletions(-) create mode 100644 test/connection_monitoring/pool-backoff-connection-close.json diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 0f96e09b56..a41ece82a8 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1157,7 +1157,6 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A # Clear the backoff state. if self._backoff: - self._backoff = 0 await self.ready() return conn @@ -1242,7 +1241,7 @@ async def checkout( await self.checkin(conn) def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: - if self.state != PoolState.READY: + if self.state not in (PoolState.READY, PoolState.BACKOFF): if emit_event: duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: diff --git a/pymongo/logger.py b/pymongo/logger.py index 17cebb471a..3b1fbebff2 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -42,7 +42,7 @@ class _ConnectionStatusMessage(str, enum.Enum): POOL_READY = "Connection pool ready" POOL_CLOSED = "Connection pool closed" POOL_CLEARED = "Connection pool cleared" - POOL_BACKOFF = "Connection pool backoff attempt number {%s}" + POOL_BACKOFF = "Connection pool backoff attempt number %s" CONN_CREATED = "Connection created" CONN_READY = "Connection ready" diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 0d96c3bd92..2e8f6944b2 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -137,6 +137,9 @@ def pool_cleared(self, event): def pool_closed(self, event): logging.info("[pool {0.address}] pool closed".format(event)) + def pool_backoff(self, event): + logging.info("[pool {0.address}] pool backoff attempt {0.event}".format(event)) + def connection_created(self, event): logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection created".format(event)) @@ -305,6 +308,15 @@ def pool_closed(self, event: PoolClosedEvent) -> None: """ raise NotImplementedError + def pool_backoff(self, event: PoolBackoffEvent) -> None: + """Abstract method to handle a `PoolBackoffEvent`. + + Emitted when a connection Pool is in backoff. + + :param event: An instance of :class:`PoolBackoffEvent`. + """ + raise NotImplementedError + def connection_created(self, event: ConnectionCreatedEvent) -> None: """Abstract method to handle a :class:`ConnectionCreatedEvent`. @@ -1852,12 +1864,12 @@ def publish_pool_closed(self, address: _Address) -> None: except Exception: _handle_exception() - def publish_pool_backoutt(self, address: _Address, attempt: int) -> None: + def publish_pool_backoff(self, address: _Address, attempt: int) -> None: """Publish a :class:`PoolBackoffEvent` to all pool listeners.""" event = PoolBackoffEvent(address, attempt) for subscriber in self.__cmap_listeners: try: - subscriber.pool_closed(event) + subscriber.pool_backoff(event) except Exception: _handle_exception() diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 977fb2c126..b563be139e 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1153,7 +1153,6 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect # Clear the backoff state. if self._backoff: - self._backoff = 0 self.ready() return conn @@ -1238,7 +1237,7 @@ def checkout( self.checkin(conn) def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: - if self.state != PoolState.READY: + if self.state not in (PoolState.READY, PoolState.BACKOFF): if emit_event: duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py index c6dc6f0a69..f2502a7d54 100644 --- a/test/asynchronous/test_connection_monitoring.py +++ b/test/asynchronous/test_connection_monitoring.py @@ -52,6 +52,7 @@ ConnectionClosedReason, ConnectionCreatedEvent, ConnectionReadyEvent, + PoolBackoffEvent, PoolClearedEvent, PoolClosedEvent, PoolCreatedEvent, @@ -75,6 +76,7 @@ "ConnectionPoolReady": PoolReadyEvent, "ConnectionPoolCleared": PoolClearedEvent, "ConnectionPoolClosed": PoolClosedEvent, + "ConnectionPoolBackoff": PoolBackoffEvent, # Error types. "PoolClosedError": _PoolClosedError, "WaitQueueTimeoutError": WaitQueueTimeoutError, diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 3e74259fd3..2d66c8b2cc 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -518,6 +518,343 @@ ] } ] + }, + { + "description": "Connection enters backoff on closed connection", + "runOnRequirements": [ + { + "auth": true, + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryReads": true, + "appname": "clientAppName", + "heartbeatFrequencyMS": 10000 + }, + "observeLogMessages": { + "connection": "debug" + } + } + }, + { + "database": { + "id": "database0", + "client": "client", + "databaseName": "ci-tests" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "clientAppName" + } + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "command": { + "find": "test" + }, + "commandName": "find" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while using the connection", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool backoff attempt number 1", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "Connection pool is in backoff", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout failed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while trying to establish a new connection", + "error": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, +{ + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] } ] } diff --git a/test/connection_monitoring/pool-backoff-connection-close.json b/test/connection_monitoring/pool-backoff-connection-close.json new file mode 100644 index 0000000000..571cd9f769 --- /dev/null +++ b/test/connection_monitoring/pool-backoff-connection-close.json @@ -0,0 +1,72 @@ +{ + "version": 1, + "style": "integration", + "description": "pool enters backoff on connection close", + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true + } + }, + "poolOptions": { + "minPoolSize": 0 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionClosed" + }, + { + "type": "ConnectionPoolBackoff" + }, + { + "type": "ConnectionCheckOutFailed" + } + ], + "ignore": [ + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 1405824453..580d214541 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -51,6 +51,7 @@ ConnectionClosedReason, ConnectionCreatedEvent, ConnectionReadyEvent, + PoolBackoffEvent, PoolClearedEvent, PoolClosedEvent, PoolCreatedEvent, @@ -75,6 +76,7 @@ "ConnectionPoolReady": PoolReadyEvent, "ConnectionPoolCleared": PoolClearedEvent, "ConnectionPoolClosed": PoolClosedEvent, + "ConnectionPoolBackoff": PoolBackoffEvent, # Error types. "PoolClosedError": _PoolClosedError, "WaitQueueTimeoutError": WaitQueueTimeoutError, diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 17dd73ec8c..99d36a7b35 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -64,6 +64,7 @@ ConnectionClosedEvent, ConnectionCreatedEvent, ConnectionReadyEvent, + PoolBackoffEvent, PoolClearedEvent, PoolClosedEvent, PoolCreatedEvent, @@ -618,6 +619,9 @@ def match_event(self, expectation, actual): self.test.assertIsInstance(actual.interrupt_connections, bool) elif name == "poolClosedEvent": self.test.assertIsInstance(actual, PoolClosedEvent) + elif name == "poolBackoffEvent": + self.test.assertIsInstance(actual, PoolBackoffEvent) + self.test.assertIsInstance(actual.attempt, int) elif name == "connectionCreatedEvent": self.test.assertIsInstance(actual, ConnectionCreatedEvent) elif name == "connectionReadyEvent": diff --git a/test/utils_shared.py b/test/utils_shared.py index f2e8852f0c..e64d600a46 100644 --- a/test/utils_shared.py +++ b/test/utils_shared.py @@ -48,6 +48,7 @@ ConnectionClosedEvent, ConnectionCreatedEvent, ConnectionReadyEvent, + PoolBackoffEvent, PoolClearedEvent, PoolClosedEvent, PoolCreatedEvent, @@ -142,6 +143,10 @@ def pool_closed(self, event): assert isinstance(event, PoolClosedEvent) self.add_event(event) + def pool_backoff(self, event): + assert isinstance(event, PoolBackoffEvent) + self.add_event(event) + class EventListener(BaseListener, monitoring.CommandListener): def __init__(self): From 25ab418f29135778c5d830b7b42dbd7711cb0490 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 23 Oct 2025 10:29:45 -0500 Subject: [PATCH 03/46] update tests --- test/asynchronous/test_pooling.py | 2 +- .../unified/auth-network-timeout-error.json | 63 +++---------------- test/test_pooling.py | 2 +- 3 files changed, 10 insertions(+), 57 deletions(-) diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 6cbdf7a65c..230fae7217 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -526,7 +526,7 @@ async def test_pool_check_backoff(self): await conn.conn.close() # Enable backoff. - cx_pool._backoff = 1 + cx_pool.backoff() # Swap pool's address with a bad one. address, cx_pool.address = cx_pool.address, ("foo.com", 1234) diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json index 3cf9576eba..fef02e8c84 100644 --- a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -71,8 +71,8 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", - "serverDescriptionChangedEvent", - "poolClearedEvent" + "poolBackoffEvent", + "poolReadyEvent" ], "uriOptions": { "retryWrites": false, @@ -111,9 +111,6 @@ "_id": 4 } ] - }, - "expectError": { - "isError": true } }, { @@ -122,11 +119,7 @@ "arguments": { "client": "client", "event": { - "serverDescriptionChangedEvent": { - "newDescription": { - "type": "Unknown" - } - } + "poolBackoffEvent": {} }, "count": 1 } @@ -137,47 +130,7 @@ "arguments": { "client": "client", "event": { - "poolClearedEvent": {} - }, - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "serverDescriptionChangedEvent": { - "newDescription": { - "type": "Unknown" - } - } - }, - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolClearedEvent": {} + "poolReadyEvent": {} }, "count": 1 } @@ -194,10 +147,10 @@ "insert": "auth-network-timeout-error", "documents": [ { - "_id": 5 + "_id": 3 }, { - "_id": 6 + "_id": 4 } ] }, @@ -220,10 +173,10 @@ "_id": 2 }, { - "_id": 5 + "_id": 3 }, { - "_id": 6 + "_id": 4 } ] } diff --git a/test/test_pooling.py b/test/test_pooling.py index f3bfcf4ba2..e386130cda 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -524,7 +524,7 @@ def test_pool_check_backoff(self): conn.conn.close() # Enable backoff. - cx_pool._backoff = 1 + cx_pool.backoff() # Swap pool's address with a bad one. address, cx_pool.address = cx_pool.address, ("foo.com", 1234) From 58602c7811ecc9cea8b3b3a76002a9f30d0a3fc5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 23 Oct 2025 20:06:41 -0500 Subject: [PATCH 04/46] update sdam tests --- .../unified/auth-network-error.json | 85 ++++--------------- .../unified/auth-network-timeout-error.json | 27 ++++-- 2 files changed, 36 insertions(+), 76 deletions(-) diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json index ccfc723617..515d1aac5a 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -37,7 +37,7 @@ ], "tests": [ { - "description": "Reset server and pool after network error during authentication", + "description": "Backoff and retry after network connection error during authentication", "operations": [ { "name": "failPoint", @@ -47,13 +47,13 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "saslContinue" ], - "errorCode": 18, + "closeConnection": true, "appName": "authNetworkErrorTest" } } @@ -70,14 +70,15 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", - "serverDescriptionChangedEvent", - "poolClearedEvent" + "poolBackoffEvent", + "poolReadyEvent", + "poolClearEvent" ], "uriOptions": { "retryWrites": false, - "socketTimeoutMS": 500, - "connectTimeoutMS": 500, - "appname": "authNetworkErrorTest" + "appname": "authNetworkErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 } } }, @@ -110,9 +111,6 @@ "_id": 4 } ] - }, - "expectError": { - "isError": true } }, { @@ -121,65 +119,12 @@ "arguments": { "client": "client", "event": { - "serverDescriptionChangedEvent": { - "newDescription": { - "type": "Unknown" - } + "poolBackoffEvent": { + "attempt": 1 } }, "count": 1 } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolClearedEvent": {} - }, - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "serverDescriptionChangedEvent": { - "newDescription": { - "type": "Unknown" - } - } - }, - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolClearedEvent": {} - }, - "count": 1 - } } ], "expectEvents": [ @@ -193,10 +138,10 @@ "insert": "auth-network-error", "documents": [ { - "_id": 5 + "_id": 3 }, { - "_id": 6 + "_id": 4 } ] }, @@ -219,10 +164,10 @@ "_id": 2 }, { - "_id": 5 + "_id": 3 }, { - "_id": 6 + "_id": 4 } ] } diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json index fef02e8c84..6b8b29b039 100644 --- a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -37,7 +37,7 @@ ], "tests": [ { - "description": "Reset server and pool after network timeout error during authentication", + "description": "Backoff and retry after network timeout error during authentication", "operations": [ { "name": "failPoint", @@ -47,14 +47,13 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "saslContinue" ], - "blockConnection": true, - "blockTimeMS": 500, + "closeConnection": true, "appName": "authNetworkTimeoutErrorTest" } } @@ -72,7 +71,8 @@ "observeEvents": [ "commandStartedEvent", "poolBackoffEvent", - "poolReadyEvent" + "poolReadyEvent", + "poolClearEvent" ], "uriOptions": { "retryWrites": false, @@ -119,7 +119,22 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "poolBackoffEvent": { + "attempt": 1 + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": { + "attempt": 2 + } }, "count": 1 } From d3a495879c56f3f669053f8f062b7400eac1f1ce Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 07:51:01 -0500 Subject: [PATCH 05/46] wip update tests --- pymongo/asynchronous/helpers.py | 4 + pymongo/asynchronous/pool.py | 25 ++- pymongo/synchronous/helpers.py | 4 + pymongo/synchronous/pool.py | 25 ++- requirements/test.txt | 1 + .../unified/auth-network-error-fail.json | 154 ++++++++++++++++++ uv.lock | 112 ++++++++----- 7 files changed, 278 insertions(+), 47 deletions(-) create mode 100644 test/discovery_and_monitoring/unified/auth-network-error-fail.json diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 96241b947c..3883d78884 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -172,18 +172,22 @@ async def inner(self: Any, *args: Any, **kwargs: Any) -> Any: retry_policy = self._retry_policy attempt = 0 while True: + print("in retry overload", attempt, func, args, kwargs) try: res = await func(self, *args, **kwargs) await retry_policy.record_success(retry=attempt > 0) + print("finished retry overload", attempt, func, args, kwargs) return res except PyMongoError as exc: if not exc.has_error_label("RetryableError"): + print("retry overload no retryable overload", attempt, func, args, kwargs) raise attempt += 1 delay = 0 if exc.has_error_label("SystemOverloadedError"): delay = retry_policy.backoff(attempt) if not await retry_policy.should_retry(attempt, delay): + print("bailing on the retry", attempt, func, args, kwargs) raise # Implement exponential backoff on retry. diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index a41ece82a8..8dbc926168 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1041,6 +1041,7 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: # If found, set backoff and add error labels. if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return + print("handling connection error", id(self)) error._add_error_label("SystemOverloadedError") error._add_error_label("RetryableError") self.backoff() @@ -1326,6 +1327,7 @@ async def _get_conn( conn = None incremented = False emitted_event = False + try: async with self.lock: self.active_sockets += 1 @@ -1337,7 +1339,17 @@ async def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None + if self._backoff and (self._backoff_connection_time > time.monotonic()): + timeout = 0.01 if not await _async_cond_wait(self._max_connecting_cond, timeout): + # Check whether we should continue to wait for the backoff condition. + if self._backoff and deadline is None or deadline < time.monotonic(): + print("looping?", id(self)) + if self._backoff_connection_time > time.monotonic(): + print("continue", id(self)) + continue + print("break", id(self)) + break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self.max_connecting: @@ -1345,7 +1357,6 @@ async def _get_conn( emitted_event = True self._raise_wait_queue_timeout(checkout_started_time) self._raise_if_not_ready(checkout_started_time, emit_event=False) - try: conn = self.conns.popleft() except IndexError: @@ -1355,17 +1366,22 @@ async def _get_conn( conn = None continue # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time < time.monotonic()): + elif self._backoff and (self._backoff_connection_time > time.monotonic()): + print("wat", id(self)) continue else: # We need to create a new connection + print("trying a connection", id(self)) try: conn = await self.connect(handler=handler) finally: + print("finished trying a connection", id(self)) async with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. - except BaseException: + except BaseException as e: + print("got an exception", e, id(self)) if conn: # We checked out a socket but authentication failed. await conn.close_conn(ConnectionClosedReason.ERROR) @@ -1393,9 +1409,12 @@ async def _get_conn( error=ConnectionCheckOutFailedReason.CONN_ERROR, durationMS=duration, ) + print("raising the exception", id(self)) raise conn.active = True + if self._backoff: + print("finished get_conn", id(self)) return conn async def checkin(self, conn: AsyncConnection) -> None: diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index 72d8978796..09c1d78f0d 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -172,18 +172,22 @@ def inner(self: Any, *args: Any, **kwargs: Any) -> Any: retry_policy = self._retry_policy attempt = 0 while True: + print("in retry overload", attempt, func, args, kwargs) try: res = func(self, *args, **kwargs) retry_policy.record_success(retry=attempt > 0) + print("finished retry overload", attempt, func, args, kwargs) return res except PyMongoError as exc: if not exc.has_error_label("RetryableError"): + print("retry overload no retryable overload", attempt, func, args, kwargs) raise attempt += 1 delay = 0 if exc.has_error_label("SystemOverloadedError"): delay = retry_policy.backoff(attempt) if not retry_policy.should_retry(attempt, delay): + print("bailing on the retry", attempt, func, args, kwargs) raise # Implement exponential backoff on retry. diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index b563be139e..785c811804 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1037,6 +1037,7 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: # If found, set backoff and add error labels. if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return + print("handling connection error", id(self)) error._add_error_label("SystemOverloadedError") error._add_error_label("RetryableError") self.backoff() @@ -1322,6 +1323,7 @@ def _get_conn( conn = None incremented = False emitted_event = False + try: with self.lock: self.active_sockets += 1 @@ -1333,7 +1335,17 @@ def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None + if self._backoff and (self._backoff_connection_time > time.monotonic()): + timeout = 0.01 if not _cond_wait(self._max_connecting_cond, timeout): + # Check whether we should continue to wait for the backoff condition. + if self._backoff and deadline is None or deadline < time.monotonic(): + print("looping?", id(self)) + if self._backoff_connection_time > time.monotonic(): + print("continue", id(self)) + continue + print("break", id(self)) + break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self.max_connecting: @@ -1341,7 +1353,6 @@ def _get_conn( emitted_event = True self._raise_wait_queue_timeout(checkout_started_time) self._raise_if_not_ready(checkout_started_time, emit_event=False) - try: conn = self.conns.popleft() except IndexError: @@ -1351,17 +1362,22 @@ def _get_conn( conn = None continue # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time < time.monotonic()): + elif self._backoff and (self._backoff_connection_time > time.monotonic()): + print("wat", id(self)) continue else: # We need to create a new connection + print("trying a connection", id(self)) try: conn = self.connect(handler=handler) finally: + print("finished trying a connection", id(self)) with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. - except BaseException: + except BaseException as e: + print("got an exception", e, id(self)) if conn: # We checked out a socket but authentication failed. conn.close_conn(ConnectionClosedReason.ERROR) @@ -1389,9 +1405,12 @@ def _get_conn( error=ConnectionCheckOutFailedReason.CONN_ERROR, durationMS=duration, ) + print("raising the exception", id(self)) raise conn.active = True + if self._backoff: + print("finished get_conn", id(self)) return conn def checkin(self, conn: Connection) -> None: diff --git a/requirements/test.txt b/requirements/test.txt index 135114feff..fa16e74d38 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,2 +1,3 @@ pytest>=8.2 pytest-asyncio>=0.24.0 +pytest-timeout diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/auth-network-error-fail.json new file mode 100644 index 0000000000..36d58240f3 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error-fail.json @@ -0,0 +1,154 @@ +{ + "description": "auth-network-error-fail", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error-fail", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Backoff and fail after network connection error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authNetworkErrorFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorFailTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error-fail" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": {} + }, + { + "poolBackoffEvent": { + "attempt": 1 + } + }, + { + "poolBackoffEvent": { + "attempt": 2 + } + }, + { + "poolBackoffEvent": { + "attempt": 3 + } + }, + { + "poolBackoffEvent": { + "attempt": 4 + } + } + ] + } + ] + } + ] +} diff --git a/uv.lock b/uv.lock index 9c45c4cdb9..18fc29485b 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.9" resolution-markers = [ "python_full_version == '3.14.*'", @@ -1047,46 +1047,53 @@ dependencies = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, + { name = "pathspec" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, - { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, - { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, - { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, - { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, - { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, - { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/8e/22/ea637422dedf0bf36f3ef238eab4e455e2a0dcc3082b5cc067615347ab8e/mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01", size = 3352570, upload-time = "2025-07-31T07:54:19.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/a9/3d7aa83955617cdf02f94e50aab5c830d205cfa4320cf124ff64acce3a8e/mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972", size = 11003299, upload-time = "2025-07-31T07:54:06.425Z" }, + { url = "https://files.pythonhosted.org/packages/83/e8/72e62ff837dd5caaac2b4a5c07ce769c8e808a00a65e5d8f94ea9c6f20ab/mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7", size = 10125451, upload-time = "2025-07-31T07:53:52.974Z" }, + { url = "https://files.pythonhosted.org/packages/7d/10/f3f3543f6448db11881776f26a0ed079865926b0c841818ee22de2c6bbab/mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df", size = 11916211, upload-time = "2025-07-31T07:53:18.879Z" }, + { url = "https://files.pythonhosted.org/packages/06/bf/63e83ed551282d67bb3f7fea2cd5561b08d2bb6eb287c096539feb5ddbc5/mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390", size = 12652687, upload-time = "2025-07-31T07:53:30.544Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/68f2eeef11facf597143e85b694a161868b3b006a5fbad50e09ea117ef24/mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94", size = 12896322, upload-time = "2025-07-31T07:53:50.74Z" }, + { url = "https://files.pythonhosted.org/packages/a3/87/8e3e9c2c8bd0d7e071a89c71be28ad088aaecbadf0454f46a540bda7bca6/mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b", size = 9507962, upload-time = "2025-07-31T07:53:08.431Z" }, + { url = "https://files.pythonhosted.org/packages/46/cf/eadc80c4e0a70db1c08921dcc220357ba8ab2faecb4392e3cebeb10edbfa/mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58", size = 10921009, upload-time = "2025-07-31T07:53:23.037Z" }, + { url = "https://files.pythonhosted.org/packages/5d/c1/c869d8c067829ad30d9bdae051046561552516cfb3a14f7f0347b7d973ee/mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5", size = 10047482, upload-time = "2025-07-31T07:53:26.151Z" }, + { url = "https://files.pythonhosted.org/packages/98/b9/803672bab3fe03cee2e14786ca056efda4bb511ea02dadcedde6176d06d0/mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd", size = 11832883, upload-time = "2025-07-31T07:53:47.948Z" }, + { url = "https://files.pythonhosted.org/packages/88/fb/fcdac695beca66800918c18697b48833a9a6701de288452b6715a98cfee1/mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b", size = 12566215, upload-time = "2025-07-31T07:54:04.031Z" }, + { url = "https://files.pythonhosted.org/packages/7f/37/a932da3d3dace99ee8eb2043b6ab03b6768c36eb29a02f98f46c18c0da0e/mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5", size = 12751956, upload-time = "2025-07-31T07:53:36.263Z" }, + { url = "https://files.pythonhosted.org/packages/8c/cf/6438a429e0f2f5cab8bc83e53dbebfa666476f40ee322e13cac5e64b79e7/mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b", size = 9507307, upload-time = "2025-07-31T07:53:59.734Z" }, + { url = "https://files.pythonhosted.org/packages/17/a2/7034d0d61af8098ec47902108553122baa0f438df8a713be860f7407c9e6/mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb", size = 11086295, upload-time = "2025-07-31T07:53:28.124Z" }, + { url = "https://files.pythonhosted.org/packages/14/1f/19e7e44b594d4b12f6ba8064dbe136505cec813549ca3e5191e40b1d3cc2/mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403", size = 10112355, upload-time = "2025-07-31T07:53:21.121Z" }, + { url = "https://files.pythonhosted.org/packages/5b/69/baa33927e29e6b4c55d798a9d44db5d394072eef2bdc18c3e2048c9ed1e9/mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056", size = 11875285, upload-time = "2025-07-31T07:53:55.293Z" }, + { url = "https://files.pythonhosted.org/packages/90/13/f3a89c76b0a41e19490b01e7069713a30949d9a6c147289ee1521bcea245/mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341", size = 12737895, upload-time = "2025-07-31T07:53:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/23/a1/c4ee79ac484241301564072e6476c5a5be2590bc2e7bfd28220033d2ef8f/mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb", size = 12931025, upload-time = "2025-07-31T07:54:17.125Z" }, + { url = "https://files.pythonhosted.org/packages/89/b8/7409477be7919a0608900e6320b155c72caab4fef46427c5cc75f85edadd/mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19", size = 9584664, upload-time = "2025-07-31T07:54:12.842Z" }, + { url = "https://files.pythonhosted.org/packages/5b/82/aec2fc9b9b149f372850291827537a508d6c4d3664b1750a324b91f71355/mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7", size = 11075338, upload-time = "2025-07-31T07:53:38.873Z" }, + { url = "https://files.pythonhosted.org/packages/07/ac/ee93fbde9d2242657128af8c86f5d917cd2887584cf948a8e3663d0cd737/mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81", size = 10113066, upload-time = "2025-07-31T07:54:14.707Z" }, + { url = "https://files.pythonhosted.org/packages/5a/68/946a1e0be93f17f7caa56c45844ec691ca153ee8b62f21eddda336a2d203/mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6", size = 11875473, upload-time = "2025-07-31T07:53:14.504Z" }, + { url = "https://files.pythonhosted.org/packages/9f/0f/478b4dce1cb4f43cf0f0d00fba3030b21ca04a01b74d1cd272a528cf446f/mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849", size = 12744296, upload-time = "2025-07-31T07:53:03.896Z" }, + { url = "https://files.pythonhosted.org/packages/ca/70/afa5850176379d1b303f992a828de95fc14487429a7139a4e0bdd17a8279/mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14", size = 12914657, upload-time = "2025-07-31T07:54:08.576Z" }, + { url = "https://files.pythonhosted.org/packages/53/f9/4a83e1c856a3d9c8f6edaa4749a4864ee98486e9b9dbfbc93842891029c2/mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a", size = 9593320, upload-time = "2025-07-31T07:53:01.341Z" }, + { url = "https://files.pythonhosted.org/packages/38/56/79c2fac86da57c7d8c48622a05873eaab40b905096c33597462713f5af90/mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733", size = 11040037, upload-time = "2025-07-31T07:54:10.942Z" }, + { url = "https://files.pythonhosted.org/packages/4d/c3/adabe6ff53638e3cad19e3547268482408323b1e68bf082c9119000cd049/mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd", size = 10131550, upload-time = "2025-07-31T07:53:41.307Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c5/2e234c22c3bdeb23a7817af57a58865a39753bde52c74e2c661ee0cfc640/mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0", size = 11872963, upload-time = "2025-07-31T07:53:16.878Z" }, + { url = "https://files.pythonhosted.org/packages/ab/26/c13c130f35ca8caa5f2ceab68a247775648fdcd6c9a18f158825f2bc2410/mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a", size = 12710189, upload-time = "2025-07-31T07:54:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/82/df/c7d79d09f6de8383fe800521d066d877e54d30b4fb94281c262be2df84ef/mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91", size = 12900322, upload-time = "2025-07-31T07:53:10.551Z" }, + { url = "https://files.pythonhosted.org/packages/b8/98/3d5a48978b4f708c55ae832619addc66d677f6dc59f3ebad71bae8285ca6/mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed", size = 9751879, upload-time = "2025-07-31T07:52:56.683Z" }, + { url = "https://files.pythonhosted.org/packages/29/cb/673e3d34e5d8de60b3a61f44f80150a738bff568cd6b7efb55742a605e98/mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9", size = 10992466, upload-time = "2025-07-31T07:53:57.574Z" }, + { url = "https://files.pythonhosted.org/packages/0c/d0/fe1895836eea3a33ab801561987a10569df92f2d3d4715abf2cfeaa29cb2/mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99", size = 10117638, upload-time = "2025-07-31T07:53:34.256Z" }, + { url = "https://files.pythonhosted.org/packages/97/f3/514aa5532303aafb95b9ca400a31054a2bd9489de166558c2baaeea9c522/mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8", size = 11915673, upload-time = "2025-07-31T07:52:59.361Z" }, + { url = "https://files.pythonhosted.org/packages/ab/c3/c0805f0edec96fe8e2c048b03769a6291523d509be8ee7f56ae922fa3882/mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8", size = 12649022, upload-time = "2025-07-31T07:53:45.92Z" }, + { url = "https://files.pythonhosted.org/packages/45/3e/d646b5a298ada21a8512fa7e5531f664535a495efa672601702398cea2b4/mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259", size = 12895536, upload-time = "2025-07-31T07:53:06.17Z" }, + { url = "https://files.pythonhosted.org/packages/14/55/e13d0dcd276975927d1f4e9e2ec4fd409e199f01bdc671717e673cc63a22/mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d", size = 9512564, upload-time = "2025-07-31T07:53:12.346Z" }, + { url = "https://files.pythonhosted.org/packages/1d/f3/8fcd2af0f5b806f6cf463efaffd3c9548a28f84220493ecd38d127b6b66d/mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9", size = 2283411, upload-time = "2025-07-31T07:53:24.664Z" }, ] [[package]] @@ -1116,6 +1123,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + [[package]] name = "pip" version = "25.2" @@ -1247,6 +1263,7 @@ snappy = [ test = [ { name = "pytest" }, { name = "pytest-asyncio" }, + { name = "pytest-timeout" }, ] zstd = [ { name = "zstandard" }, @@ -1300,6 +1317,7 @@ requires-dist = [ { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, { name = "pytest", marker = "extra == 'test'", specifier = ">=8.2" }, { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24.0" }, + { name = "pytest-timeout", marker = "extra == 'test'" }, { name = "python-snappy", marker = "extra == 'snappy'" }, { name = "readthedocs-sphinx-search", marker = "extra == 'docs'", specifier = "~=0.3" }, { name = "requests", marker = "extra == 'ocsp'", specifier = "<3.0.0" }, @@ -1315,7 +1333,7 @@ provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "tes [package.metadata.requires-dev] coverage = [ - { name = "coverage", specifier = ">=5,<=7.5" }, + { name = "coverage", specifier = ">=5,<=7.10.3" }, { name = "pytest-cov" }, ] dev = [{ name = "pre-commit", specifier = ">=4.0" }] @@ -1329,9 +1347,9 @@ perf = [{ name = "simplejson" }] pip = [{ name = "pip" }] pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] typing = [ - { name = "mypy", specifier = "==1.14.1" }, + { name = "mypy", specifier = "==1.17.1" }, { name = "pip" }, - { name = "pyright", specifier = "==1.1.392.post0" }, + { name = "pyright", specifier = "==1.1.403" }, { name = "typing-extensions" }, ] @@ -1375,15 +1393,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.392.post0" +version = "1.1.403" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/66/df/3c6f6b08fba7ccf49b114dfc4bb33e25c299883fd763f93fad47ef8bc58d/pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd", size = 3789911, upload-time = "2025-01-15T15:01:20.913Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/f6/35f885264ff08c960b23d1542038d8da86971c5d8c955cfab195a4f672d7/pyright-1.1.403.tar.gz", hash = "sha256:3ab69b9f41c67fb5bbb4d7a36243256f0d549ed3608678d381d5f51863921104", size = 3913526, upload-time = "2025-07-09T07:15:52.882Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/b1/a18de17f40e4f61ca58856b9ef9b0febf74ff88978c3f7776f910071f567/pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2", size = 5595487, upload-time = "2025-01-15T15:01:17.775Z" }, + { url = "https://files.pythonhosted.org/packages/49/b6/b04e5c2f41a5ccad74a1a4759da41adb20b4bc9d59a5e08d29ba60084d07/pyright-1.1.403-py3-none-any.whl", hash = "sha256:c0eeca5aa76cbef3fcc271259bbd785753c7ad7bcac99a9162b4c4c7daed23b3", size = 5684504, upload-time = "2025-07-09T07:15:50.958Z" }, ] [[package]] @@ -1432,6 +1450,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" }, ] +[[package]] +name = "pytest-timeout" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/82/4c9ecabab13363e72d880f2fb504c5f750433b2b6f16e99f4ec21ada284c/pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a", size = 17973, upload-time = "2025-05-05T19:44:34.99Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" From 76c4ee62f7ded582bffe17b1ee581ddda3f4a351 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 09:27:18 -0500 Subject: [PATCH 06/46] Revert "Merge branch 'backpressure' of github.com:mongodb/mongo-python-driver into PYTHON-5517" This reverts commit 1895e0002ad3adcf426fbc3c6b600f5bfe885fbc, reversing changes made to d3a495879c56f3f669053f8f062b7400eac1f1ce. --- pymongo/asynchronous/helpers.py | 4 ++-- pymongo/synchronous/helpers.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index d01158de7d..3883d78884 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -77,8 +77,8 @@ async def inner(*args: Any, **kwargs: Any) -> Any: return cast(F, inner) -_MAX_RETRIES = 5 -_BACKOFF_INITIAL = 0.1 +_MAX_RETRIES = 3 +_BACKOFF_INITIAL = 0.05 _BACKOFF_MAX = 10 # DRIVERS-3240 will determine these defaults. DEFAULT_RETRY_TOKEN_CAPACITY = 1000.0 diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index 5992f85c82..09c1d78f0d 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -77,8 +77,8 @@ def inner(*args: Any, **kwargs: Any) -> Any: return cast(F, inner) -_MAX_RETRIES = 5 -_BACKOFF_INITIAL = 0.1 +_MAX_RETRIES = 3 +_BACKOFF_INITIAL = 0.05 _BACKOFF_MAX = 10 # DRIVERS-3240 will determine these defaults. DEFAULT_RETRY_TOKEN_CAPACITY = 1000.0 From 546976df33b4701766214bbd35b7b8462f2ef36e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 09:27:30 -0500 Subject: [PATCH 07/46] Revert "wip update tests" This reverts commit d3a495879c56f3f669053f8f062b7400eac1f1ce. --- pymongo/asynchronous/helpers.py | 4 - pymongo/asynchronous/pool.py | 25 +-- pymongo/synchronous/helpers.py | 4 - pymongo/synchronous/pool.py | 25 +-- requirements/test.txt | 1 - .../unified/auth-network-error-fail.json | 154 ------------------ uv.lock | 112 +++++-------- 7 files changed, 47 insertions(+), 278 deletions(-) delete mode 100644 test/discovery_and_monitoring/unified/auth-network-error-fail.json diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 3883d78884..96241b947c 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -172,22 +172,18 @@ async def inner(self: Any, *args: Any, **kwargs: Any) -> Any: retry_policy = self._retry_policy attempt = 0 while True: - print("in retry overload", attempt, func, args, kwargs) try: res = await func(self, *args, **kwargs) await retry_policy.record_success(retry=attempt > 0) - print("finished retry overload", attempt, func, args, kwargs) return res except PyMongoError as exc: if not exc.has_error_label("RetryableError"): - print("retry overload no retryable overload", attempt, func, args, kwargs) raise attempt += 1 delay = 0 if exc.has_error_label("SystemOverloadedError"): delay = retry_policy.backoff(attempt) if not await retry_policy.should_retry(attempt, delay): - print("bailing on the retry", attempt, func, args, kwargs) raise # Implement exponential backoff on retry. diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 8dbc926168..a41ece82a8 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1041,7 +1041,6 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: # If found, set backoff and add error labels. if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return - print("handling connection error", id(self)) error._add_error_label("SystemOverloadedError") error._add_error_label("RetryableError") self.backoff() @@ -1327,7 +1326,6 @@ async def _get_conn( conn = None incremented = False emitted_event = False - try: async with self.lock: self.active_sockets += 1 @@ -1339,17 +1337,7 @@ async def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None - if self._backoff and (self._backoff_connection_time > time.monotonic()): - timeout = 0.01 if not await _async_cond_wait(self._max_connecting_cond, timeout): - # Check whether we should continue to wait for the backoff condition. - if self._backoff and deadline is None or deadline < time.monotonic(): - print("looping?", id(self)) - if self._backoff_connection_time > time.monotonic(): - print("continue", id(self)) - continue - print("break", id(self)) - break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self.max_connecting: @@ -1357,6 +1345,7 @@ async def _get_conn( emitted_event = True self._raise_wait_queue_timeout(checkout_started_time) self._raise_if_not_ready(checkout_started_time, emit_event=False) + try: conn = self.conns.popleft() except IndexError: @@ -1366,22 +1355,17 @@ async def _get_conn( conn = None continue # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time > time.monotonic()): - print("wat", id(self)) + elif self._backoff and (self._backoff_connection_time < time.monotonic()): continue else: # We need to create a new connection - print("trying a connection", id(self)) try: conn = await self.connect(handler=handler) finally: - print("finished trying a connection", id(self)) async with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() - # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. - except BaseException as e: - print("got an exception", e, id(self)) + except BaseException: if conn: # We checked out a socket but authentication failed. await conn.close_conn(ConnectionClosedReason.ERROR) @@ -1409,12 +1393,9 @@ async def _get_conn( error=ConnectionCheckOutFailedReason.CONN_ERROR, durationMS=duration, ) - print("raising the exception", id(self)) raise conn.active = True - if self._backoff: - print("finished get_conn", id(self)) return conn async def checkin(self, conn: AsyncConnection) -> None: diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index 09c1d78f0d..72d8978796 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -172,22 +172,18 @@ def inner(self: Any, *args: Any, **kwargs: Any) -> Any: retry_policy = self._retry_policy attempt = 0 while True: - print("in retry overload", attempt, func, args, kwargs) try: res = func(self, *args, **kwargs) retry_policy.record_success(retry=attempt > 0) - print("finished retry overload", attempt, func, args, kwargs) return res except PyMongoError as exc: if not exc.has_error_label("RetryableError"): - print("retry overload no retryable overload", attempt, func, args, kwargs) raise attempt += 1 delay = 0 if exc.has_error_label("SystemOverloadedError"): delay = retry_policy.backoff(attempt) if not retry_policy.should_retry(attempt, delay): - print("bailing on the retry", attempt, func, args, kwargs) raise # Implement exponential backoff on retry. diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 785c811804..b563be139e 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1037,7 +1037,6 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: # If found, set backoff and add error labels. if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return - print("handling connection error", id(self)) error._add_error_label("SystemOverloadedError") error._add_error_label("RetryableError") self.backoff() @@ -1323,7 +1322,6 @@ def _get_conn( conn = None incremented = False emitted_event = False - try: with self.lock: self.active_sockets += 1 @@ -1335,17 +1333,7 @@ def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None - if self._backoff and (self._backoff_connection_time > time.monotonic()): - timeout = 0.01 if not _cond_wait(self._max_connecting_cond, timeout): - # Check whether we should continue to wait for the backoff condition. - if self._backoff and deadline is None or deadline < time.monotonic(): - print("looping?", id(self)) - if self._backoff_connection_time > time.monotonic(): - print("continue", id(self)) - continue - print("break", id(self)) - break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self.max_connecting: @@ -1353,6 +1341,7 @@ def _get_conn( emitted_event = True self._raise_wait_queue_timeout(checkout_started_time) self._raise_if_not_ready(checkout_started_time, emit_event=False) + try: conn = self.conns.popleft() except IndexError: @@ -1362,22 +1351,17 @@ def _get_conn( conn = None continue # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time > time.monotonic()): - print("wat", id(self)) + elif self._backoff and (self._backoff_connection_time < time.monotonic()): continue else: # We need to create a new connection - print("trying a connection", id(self)) try: conn = self.connect(handler=handler) finally: - print("finished trying a connection", id(self)) with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() - # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. - except BaseException as e: - print("got an exception", e, id(self)) + except BaseException: if conn: # We checked out a socket but authentication failed. conn.close_conn(ConnectionClosedReason.ERROR) @@ -1405,12 +1389,9 @@ def _get_conn( error=ConnectionCheckOutFailedReason.CONN_ERROR, durationMS=duration, ) - print("raising the exception", id(self)) raise conn.active = True - if self._backoff: - print("finished get_conn", id(self)) return conn def checkin(self, conn: Connection) -> None: diff --git a/requirements/test.txt b/requirements/test.txt index fa16e74d38..135114feff 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,3 +1,2 @@ pytest>=8.2 pytest-asyncio>=0.24.0 -pytest-timeout diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/auth-network-error-fail.json deleted file mode 100644 index 36d58240f3..0000000000 --- a/test/discovery_and_monitoring/unified/auth-network-error-fail.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "description": "auth-network-error-fail", - "schemaVersion": "1.4", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "auth": true, - "serverless": "forbid", - "topologies": [ - "single", - "replicaset", - "sharded" - ] - } - ], - "createEntities": [ - { - "client": { - "id": "setupClient", - "useMultipleMongoses": false - } - } - ], - "initialData": [ - { - "collectionName": "auth-network-error-fail", - "databaseName": "sdam-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - ], - "tests": [ - { - "description": "Backoff and fail after network connection error during authentication", - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "alwaysOn", - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authNetworkErrorFailTest", - "closeConnection": true - } - } - } - }, - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "client": { - "id": "client", - "useMultipleMongoses": false, - "observeEvents": [ - "commandStartedEvent", - "poolBackoffEvent", - "poolReadyEvent", - "poolClearEvent" - ], - "uriOptions": { - "retryWrites": false, - "appname": "authNetworkErrorFailTest" - } - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "sdam-tests" - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "auth-network-error-fail" - } - } - ] - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "command", - "events": [] - }, - { - "client": "client", - "eventType": "cmap", - "events": [ - { - "poolReadyEvent": {} - }, - { - "poolBackoffEvent": { - "attempt": 1 - } - }, - { - "poolBackoffEvent": { - "attempt": 2 - } - }, - { - "poolBackoffEvent": { - "attempt": 3 - } - }, - { - "poolBackoffEvent": { - "attempt": 4 - } - } - ] - } - ] - } - ] -} diff --git a/uv.lock b/uv.lock index 18fc29485b..9c45c4cdb9 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.9" resolution-markers = [ "python_full_version == '3.14.*'", @@ -1047,53 +1047,46 @@ dependencies = [ [[package]] name = "mypy" -version = "1.17.1" +version = "1.14.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, - { name = "pathspec" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8e/22/ea637422dedf0bf36f3ef238eab4e455e2a0dcc3082b5cc067615347ab8e/mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01", size = 3352570, upload-time = "2025-07-31T07:54:19.204Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/a9/3d7aa83955617cdf02f94e50aab5c830d205cfa4320cf124ff64acce3a8e/mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972", size = 11003299, upload-time = "2025-07-31T07:54:06.425Z" }, - { url = "https://files.pythonhosted.org/packages/83/e8/72e62ff837dd5caaac2b4a5c07ce769c8e808a00a65e5d8f94ea9c6f20ab/mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7", size = 10125451, upload-time = "2025-07-31T07:53:52.974Z" }, - { url = "https://files.pythonhosted.org/packages/7d/10/f3f3543f6448db11881776f26a0ed079865926b0c841818ee22de2c6bbab/mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df", size = 11916211, upload-time = "2025-07-31T07:53:18.879Z" }, - { url = "https://files.pythonhosted.org/packages/06/bf/63e83ed551282d67bb3f7fea2cd5561b08d2bb6eb287c096539feb5ddbc5/mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390", size = 12652687, upload-time = "2025-07-31T07:53:30.544Z" }, - { url = "https://files.pythonhosted.org/packages/69/66/68f2eeef11facf597143e85b694a161868b3b006a5fbad50e09ea117ef24/mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94", size = 12896322, upload-time = "2025-07-31T07:53:50.74Z" }, - { url = "https://files.pythonhosted.org/packages/a3/87/8e3e9c2c8bd0d7e071a89c71be28ad088aaecbadf0454f46a540bda7bca6/mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b", size = 9507962, upload-time = "2025-07-31T07:53:08.431Z" }, - { url = "https://files.pythonhosted.org/packages/46/cf/eadc80c4e0a70db1c08921dcc220357ba8ab2faecb4392e3cebeb10edbfa/mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58", size = 10921009, upload-time = "2025-07-31T07:53:23.037Z" }, - { url = "https://files.pythonhosted.org/packages/5d/c1/c869d8c067829ad30d9bdae051046561552516cfb3a14f7f0347b7d973ee/mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5", size = 10047482, upload-time = "2025-07-31T07:53:26.151Z" }, - { url = "https://files.pythonhosted.org/packages/98/b9/803672bab3fe03cee2e14786ca056efda4bb511ea02dadcedde6176d06d0/mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd", size = 11832883, upload-time = "2025-07-31T07:53:47.948Z" }, - { url = "https://files.pythonhosted.org/packages/88/fb/fcdac695beca66800918c18697b48833a9a6701de288452b6715a98cfee1/mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b", size = 12566215, upload-time = "2025-07-31T07:54:04.031Z" }, - { url = "https://files.pythonhosted.org/packages/7f/37/a932da3d3dace99ee8eb2043b6ab03b6768c36eb29a02f98f46c18c0da0e/mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5", size = 12751956, upload-time = "2025-07-31T07:53:36.263Z" }, - { url = "https://files.pythonhosted.org/packages/8c/cf/6438a429e0f2f5cab8bc83e53dbebfa666476f40ee322e13cac5e64b79e7/mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b", size = 9507307, upload-time = "2025-07-31T07:53:59.734Z" }, - { url = "https://files.pythonhosted.org/packages/17/a2/7034d0d61af8098ec47902108553122baa0f438df8a713be860f7407c9e6/mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb", size = 11086295, upload-time = "2025-07-31T07:53:28.124Z" }, - { url = "https://files.pythonhosted.org/packages/14/1f/19e7e44b594d4b12f6ba8064dbe136505cec813549ca3e5191e40b1d3cc2/mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403", size = 10112355, upload-time = "2025-07-31T07:53:21.121Z" }, - { url = "https://files.pythonhosted.org/packages/5b/69/baa33927e29e6b4c55d798a9d44db5d394072eef2bdc18c3e2048c9ed1e9/mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056", size = 11875285, upload-time = "2025-07-31T07:53:55.293Z" }, - { url = "https://files.pythonhosted.org/packages/90/13/f3a89c76b0a41e19490b01e7069713a30949d9a6c147289ee1521bcea245/mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341", size = 12737895, upload-time = "2025-07-31T07:53:43.623Z" }, - { url = "https://files.pythonhosted.org/packages/23/a1/c4ee79ac484241301564072e6476c5a5be2590bc2e7bfd28220033d2ef8f/mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb", size = 12931025, upload-time = "2025-07-31T07:54:17.125Z" }, - { url = "https://files.pythonhosted.org/packages/89/b8/7409477be7919a0608900e6320b155c72caab4fef46427c5cc75f85edadd/mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19", size = 9584664, upload-time = "2025-07-31T07:54:12.842Z" }, - { url = "https://files.pythonhosted.org/packages/5b/82/aec2fc9b9b149f372850291827537a508d6c4d3664b1750a324b91f71355/mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7", size = 11075338, upload-time = "2025-07-31T07:53:38.873Z" }, - { url = "https://files.pythonhosted.org/packages/07/ac/ee93fbde9d2242657128af8c86f5d917cd2887584cf948a8e3663d0cd737/mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81", size = 10113066, upload-time = "2025-07-31T07:54:14.707Z" }, - { url = "https://files.pythonhosted.org/packages/5a/68/946a1e0be93f17f7caa56c45844ec691ca153ee8b62f21eddda336a2d203/mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6", size = 11875473, upload-time = "2025-07-31T07:53:14.504Z" }, - { url = "https://files.pythonhosted.org/packages/9f/0f/478b4dce1cb4f43cf0f0d00fba3030b21ca04a01b74d1cd272a528cf446f/mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849", size = 12744296, upload-time = "2025-07-31T07:53:03.896Z" }, - { url = "https://files.pythonhosted.org/packages/ca/70/afa5850176379d1b303f992a828de95fc14487429a7139a4e0bdd17a8279/mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14", size = 12914657, upload-time = "2025-07-31T07:54:08.576Z" }, - { url = "https://files.pythonhosted.org/packages/53/f9/4a83e1c856a3d9c8f6edaa4749a4864ee98486e9b9dbfbc93842891029c2/mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a", size = 9593320, upload-time = "2025-07-31T07:53:01.341Z" }, - { url = "https://files.pythonhosted.org/packages/38/56/79c2fac86da57c7d8c48622a05873eaab40b905096c33597462713f5af90/mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733", size = 11040037, upload-time = "2025-07-31T07:54:10.942Z" }, - { url = "https://files.pythonhosted.org/packages/4d/c3/adabe6ff53638e3cad19e3547268482408323b1e68bf082c9119000cd049/mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd", size = 10131550, upload-time = "2025-07-31T07:53:41.307Z" }, - { url = "https://files.pythonhosted.org/packages/b8/c5/2e234c22c3bdeb23a7817af57a58865a39753bde52c74e2c661ee0cfc640/mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0", size = 11872963, upload-time = "2025-07-31T07:53:16.878Z" }, - { url = "https://files.pythonhosted.org/packages/ab/26/c13c130f35ca8caa5f2ceab68a247775648fdcd6c9a18f158825f2bc2410/mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a", size = 12710189, upload-time = "2025-07-31T07:54:01.962Z" }, - { url = "https://files.pythonhosted.org/packages/82/df/c7d79d09f6de8383fe800521d066d877e54d30b4fb94281c262be2df84ef/mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91", size = 12900322, upload-time = "2025-07-31T07:53:10.551Z" }, - { url = "https://files.pythonhosted.org/packages/b8/98/3d5a48978b4f708c55ae832619addc66d677f6dc59f3ebad71bae8285ca6/mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed", size = 9751879, upload-time = "2025-07-31T07:52:56.683Z" }, - { url = "https://files.pythonhosted.org/packages/29/cb/673e3d34e5d8de60b3a61f44f80150a738bff568cd6b7efb55742a605e98/mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9", size = 10992466, upload-time = "2025-07-31T07:53:57.574Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d0/fe1895836eea3a33ab801561987a10569df92f2d3d4715abf2cfeaa29cb2/mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99", size = 10117638, upload-time = "2025-07-31T07:53:34.256Z" }, - { url = "https://files.pythonhosted.org/packages/97/f3/514aa5532303aafb95b9ca400a31054a2bd9489de166558c2baaeea9c522/mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8", size = 11915673, upload-time = "2025-07-31T07:52:59.361Z" }, - { url = "https://files.pythonhosted.org/packages/ab/c3/c0805f0edec96fe8e2c048b03769a6291523d509be8ee7f56ae922fa3882/mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8", size = 12649022, upload-time = "2025-07-31T07:53:45.92Z" }, - { url = "https://files.pythonhosted.org/packages/45/3e/d646b5a298ada21a8512fa7e5531f664535a495efa672601702398cea2b4/mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259", size = 12895536, upload-time = "2025-07-31T07:53:06.17Z" }, - { url = "https://files.pythonhosted.org/packages/14/55/e13d0dcd276975927d1f4e9e2ec4fd409e199f01bdc671717e673cc63a22/mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d", size = 9512564, upload-time = "2025-07-31T07:53:12.346Z" }, - { url = "https://files.pythonhosted.org/packages/1d/f3/8fcd2af0f5b806f6cf463efaffd3c9548a28f84220493ecd38d127b6b66d/mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9", size = 2283411, upload-time = "2025-07-31T07:53:24.664Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, + { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, + { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, + { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, + { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, + { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, + { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, + { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, + { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, + { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, + { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, + { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, + { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, + { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, + { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, + { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, + { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, + { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, + { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, + { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, + { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, + { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, + { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, + { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, + { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, ] [[package]] @@ -1123,15 +1116,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] -[[package]] -name = "pathspec" -version = "0.12.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, -] - [[package]] name = "pip" version = "25.2" @@ -1263,7 +1247,6 @@ snappy = [ test = [ { name = "pytest" }, { name = "pytest-asyncio" }, - { name = "pytest-timeout" }, ] zstd = [ { name = "zstandard" }, @@ -1317,7 +1300,6 @@ requires-dist = [ { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, { name = "pytest", marker = "extra == 'test'", specifier = ">=8.2" }, { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24.0" }, - { name = "pytest-timeout", marker = "extra == 'test'" }, { name = "python-snappy", marker = "extra == 'snappy'" }, { name = "readthedocs-sphinx-search", marker = "extra == 'docs'", specifier = "~=0.3" }, { name = "requests", marker = "extra == 'ocsp'", specifier = "<3.0.0" }, @@ -1333,7 +1315,7 @@ provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "tes [package.metadata.requires-dev] coverage = [ - { name = "coverage", specifier = ">=5,<=7.10.3" }, + { name = "coverage", specifier = ">=5,<=7.5" }, { name = "pytest-cov" }, ] dev = [{ name = "pre-commit", specifier = ">=4.0" }] @@ -1347,9 +1329,9 @@ perf = [{ name = "simplejson" }] pip = [{ name = "pip" }] pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] typing = [ - { name = "mypy", specifier = "==1.17.1" }, + { name = "mypy", specifier = "==1.14.1" }, { name = "pip" }, - { name = "pyright", specifier = "==1.1.403" }, + { name = "pyright", specifier = "==1.1.392.post0" }, { name = "typing-extensions" }, ] @@ -1393,15 +1375,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.403" +version = "1.1.392.post0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fe/f6/35f885264ff08c960b23d1542038d8da86971c5d8c955cfab195a4f672d7/pyright-1.1.403.tar.gz", hash = "sha256:3ab69b9f41c67fb5bbb4d7a36243256f0d549ed3608678d381d5f51863921104", size = 3913526, upload-time = "2025-07-09T07:15:52.882Z" } +sdist = { url = "https://files.pythonhosted.org/packages/66/df/3c6f6b08fba7ccf49b114dfc4bb33e25c299883fd763f93fad47ef8bc58d/pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd", size = 3789911, upload-time = "2025-01-15T15:01:20.913Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/49/b6/b04e5c2f41a5ccad74a1a4759da41adb20b4bc9d59a5e08d29ba60084d07/pyright-1.1.403-py3-none-any.whl", hash = "sha256:c0eeca5aa76cbef3fcc271259bbd785753c7ad7bcac99a9162b4c4c7daed23b3", size = 5684504, upload-time = "2025-07-09T07:15:50.958Z" }, + { url = "https://files.pythonhosted.org/packages/e7/b1/a18de17f40e4f61ca58856b9ef9b0febf74ff88978c3f7776f910071f567/pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2", size = 5595487, upload-time = "2025-01-15T15:01:17.775Z" }, ] [[package]] @@ -1450,18 +1432,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" }, ] -[[package]] -name = "pytest-timeout" -version = "2.4.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pytest" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ac/82/4c9ecabab13363e72d880f2fb504c5f750433b2b6f16e99f4ec21ada284c/pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a", size = 17973, upload-time = "2025-05-05T19:44:34.99Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" }, -] - [[package]] name = "python-dateutil" version = "2.9.0.post0" From e52ecdffd363847e04de421e7320da4d5e842a82 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 09:27:41 -0500 Subject: [PATCH 08/46] wip update tests --- .../unified/auth-network-error-fail.json | 166 ++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 test/discovery_and_monitoring/unified/auth-network-error-fail.json diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/auth-network-error-fail.json new file mode 100644 index 0000000000..8d37dc9844 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error-fail.json @@ -0,0 +1,166 @@ +{ + "description": "auth-network-error-fail", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error-fail", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Backoff and fail after network connection error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 6 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authNetworkErrorFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorFailTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error-fail" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": {} + }, + { + "poolBackoffEvent": { + "attempt": 1 + } + }, + { + "poolBackoffEvent": { + "attempt": 2 + } + }, + { + "poolBackoffEvent": { + "attempt": 3 + } + }, + { + "poolBackoffEvent": { + "attempt": 4 + } + }, + { + "poolBackoffEvent": { + "attempt": 5 + } + }, + { + "poolBackoffEvent": { + "attempt": 6 + } + } + ] + } + ] + } + ] +} From 5ef7656e6cbb60aa777029ccaf3707b510a93cf4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 09:29:42 -0500 Subject: [PATCH 09/46] update to branch --- pymongo/asynchronous/helpers.py | 4 ++-- pymongo/synchronous/helpers.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 96241b947c..b29d02ea2c 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -77,8 +77,8 @@ async def inner(*args: Any, **kwargs: Any) -> Any: return cast(F, inner) -_MAX_RETRIES = 3 -_BACKOFF_INITIAL = 0.05 +_MAX_RETRIES = 5 +_BACKOFF_INITIAL = 0.1 _BACKOFF_MAX = 10 # DRIVERS-3240 will determine these defaults. DEFAULT_RETRY_TOKEN_CAPACITY = 1000.0 diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index 72d8978796..2def3a8c14 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -77,8 +77,8 @@ def inner(*args: Any, **kwargs: Any) -> Any: return cast(F, inner) -_MAX_RETRIES = 3 -_BACKOFF_INITIAL = 0.05 +_MAX_RETRIES = 5 +_BACKOFF_INITIAL = 0.1 _BACKOFF_MAX = 10 # DRIVERS-3240 will determine these defaults. DEFAULT_RETRY_TOKEN_CAPACITY = 1000.0 From 6d8369f01367487d248068cf822f79005e0683b6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 09:31:49 -0500 Subject: [PATCH 10/46] wip --- pymongo/asynchronous/pool.py | 9 ++++++++- pymongo/synchronous/pool.py | 9 ++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index a41ece82a8..ded9a59cde 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1313,7 +1313,14 @@ async def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): timeout = deadline - time.monotonic() if deadline else None + if self._backoff and (self._backoff_connection_time > time.monotonic()): + timeout = 0.01 if not await _async_cond_wait(self.size_cond, timeout): + # Check whether we should continue to wait for the backoff condition. + if self._backoff and deadline is None or deadline < time.monotonic(): + if self._backoff_connection_time > time.monotonic(): + continue + break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: @@ -1355,7 +1362,7 @@ async def _get_conn( conn = None continue # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time < time.monotonic()): + elif self._backoff and (self._backoff_connection_time > time.monotonic()): continue else: # We need to create a new connection try: diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index b563be139e..1ea1b807e3 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1309,7 +1309,14 @@ def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): timeout = deadline - time.monotonic() if deadline else None + if self._backoff and (self._backoff_connection_time > time.monotonic()): + timeout = 0.01 if not _cond_wait(self.size_cond, timeout): + # Check whether we should continue to wait for the backoff condition. + if self._backoff and deadline is None or deadline < time.monotonic(): + if self._backoff_connection_time > time.monotonic(): + continue + break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: @@ -1351,7 +1358,7 @@ def _get_conn( conn = None continue # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time < time.monotonic()): + elif self._backoff and (self._backoff_connection_time > time.monotonic()): continue else: # We need to create a new connection try: From 24542c9579fdc508ce2913b38e72d16ec988fc97 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 09:46:31 -0500 Subject: [PATCH 11/46] fix backoff logic --- pymongo/asynchronous/pool.py | 16 ++++++++-------- pymongo/synchronous/pool.py | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index ded9a59cde..dd3fb2e4e3 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1313,14 +1313,7 @@ async def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): timeout = deadline - time.monotonic() if deadline else None - if self._backoff and (self._backoff_connection_time > time.monotonic()): - timeout = 0.01 if not await _async_cond_wait(self.size_cond, timeout): - # Check whether we should continue to wait for the backoff condition. - if self._backoff and deadline is None or deadline < time.monotonic(): - if self._backoff_connection_time > time.monotonic(): - continue - break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: @@ -1339,12 +1332,19 @@ async def _get_conn( incremented = True while conn is None: # CMAP: we MUST wait for either maxConnecting OR for a socket - # to be checked back into the pool. + # to be checked back into the pool OR for the backoff period to expire. async with self._max_connecting_cond: self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None + if self._backoff and (self._backoff_connection_time > time.monotonic()): + timeout = 0.01 if not await _async_cond_wait(self._max_connecting_cond, timeout): + # Check whether we should continue to wait for the backoff condition. + if self._backoff and deadline is None or deadline < time.monotonic(): + if self._backoff_connection_time > time.monotonic(): + continue + break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self.max_connecting: diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 1ea1b807e3..f6c916826a 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1309,14 +1309,7 @@ def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): timeout = deadline - time.monotonic() if deadline else None - if self._backoff and (self._backoff_connection_time > time.monotonic()): - timeout = 0.01 if not _cond_wait(self.size_cond, timeout): - # Check whether we should continue to wait for the backoff condition. - if self._backoff and deadline is None or deadline < time.monotonic(): - if self._backoff_connection_time > time.monotonic(): - continue - break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: @@ -1335,12 +1328,19 @@ def _get_conn( incremented = True while conn is None: # CMAP: we MUST wait for either maxConnecting OR for a socket - # to be checked back into the pool. + # to be checked back into the pool OR for the backoff period to expire. with self._max_connecting_cond: self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None + if self._backoff and (self._backoff_connection_time > time.monotonic()): + timeout = 0.01 if not _cond_wait(self._max_connecting_cond, timeout): + # Check whether we should continue to wait for the backoff condition. + if self._backoff and deadline is None or deadline < time.monotonic(): + if self._backoff_connection_time > time.monotonic(): + continue + break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self.max_connecting: From 5e64aa9f32b1f5f798c760752635f5a48ae75ee3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 11:25:03 -0500 Subject: [PATCH 12/46] fix race condition --- pymongo/asynchronous/pool.py | 4 +++- pymongo/synchronous/pool.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index dd3fb2e4e3..9aece14ace 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1337,7 +1337,9 @@ async def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None - if self._backoff and (self._backoff_connection_time > time.monotonic()): + if self._backoff: + if self._backoff_connection_time < time.monotonic(): + break timeout = 0.01 if not await _async_cond_wait(self._max_connecting_cond, timeout): # Check whether we should continue to wait for the backoff condition. diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index f6c916826a..048ea133fe 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1333,7 +1333,9 @@ def _get_conn( self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): timeout = deadline - time.monotonic() if deadline else None - if self._backoff and (self._backoff_connection_time > time.monotonic()): + if self._backoff: + if self._backoff_connection_time < time.monotonic(): + break timeout = 0.01 if not _cond_wait(self._max_connecting_cond, timeout): # Check whether we should continue to wait for the backoff condition. From f67195e8bc988cc356b7024a2614378503d89e10 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 15:01:14 -0500 Subject: [PATCH 13/46] update to use durationms --- pymongo/asynchronous/pool.py | 8 ++++--- pymongo/logger.py | 2 +- pymongo/monitoring.py | 14 +++++------ pymongo/synchronous/pool.py | 8 ++++--- .../unified/auth-network-error-fail.json | 24 +++++-------------- .../unified/auth-network-error.json | 4 +--- .../unified/auth-network-timeout-error.json | 8 ++----- test/unified_format_shared.py | 2 +- 8 files changed, 28 insertions(+), 42 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 9aece14ace..d356c0f3f9 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1048,18 +1048,20 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: def backoff(self): """Set/increase backoff mode.""" self._backoff += 1 + backoff_duration_sec = _backoff(self._backoff) + backoff_duration_ms = int(backoff_duration_sec * 1000) if self.state != PoolState.BACKOFF: self.state = PoolState.BACKOFF if self.enabled_for_cmap: assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_pool_backoff(self.address, self._backoff) - self._backoff_connection_time = _backoff(self._backoff) + time.monotonic() + self.opts._event_listeners.publish_pool_backoff(self.address, backoff_duration_ms) + self._backoff_connection_time = backoff_duration_sec + time.monotonic() # Log the pool backoff message. if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - message=_ConnectionStatusMessage.POOL_BACKOFF % self._backoff, + message=_ConnectionStatusMessage.POOL_BACKOFF % backoff_duration_ms, clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], diff --git a/pymongo/logger.py b/pymongo/logger.py index 3b1fbebff2..052d042766 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -42,7 +42,7 @@ class _ConnectionStatusMessage(str, enum.Enum): POOL_READY = "Connection pool ready" POOL_CLOSED = "Connection pool closed" POOL_CLEARED = "Connection pool cleared" - POOL_BACKOFF = "Connection pool backoff attempt number %s" + POOL_BACKOFF = "Connection pool backoff %sms" CONN_CREATED = "Connection created" CONN_READY = "Connection ready" diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 2e8f6944b2..781439c213 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -931,21 +931,21 @@ class PoolBackoffEvent(_PoolEvent): :param address: The address (host, port) pair of the server this Pool is attempting to connect to. - :param attempt: The backoff attempt number. + :param duration_ms: The backoff duration in ms. .. versionadded:: 4.16 """ - __slots__ = ("__attempt",) + __slots__ = ("__duration_ms",) - def __init__(self, address: _Address, attempt: int) -> None: + def __init__(self, address: _Address, duration_ms: int) -> None: super().__init__(address) - self.__attempt = attempt + self.__duration_ms = duration_ms @property - def attempt(self) -> Optional[ObjectId]: - """The backoff attempt number.""" - return self.__attempt + def duration_ms(self) -> Optional[ObjectId]: + """The backoff duration in ms.""" + return self.__duration_ms class ConnectionClosedReason: diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 048ea133fe..762ca554ba 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1044,18 +1044,20 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: def backoff(self): """Set/increase backoff mode.""" self._backoff += 1 + backoff_duration_sec = _backoff(self._backoff) + backoff_duration_ms = int(backoff_duration_sec * 1000) if self.state != PoolState.BACKOFF: self.state = PoolState.BACKOFF if self.enabled_for_cmap: assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_pool_backoff(self.address, self._backoff) - self._backoff_connection_time = _backoff(self._backoff) + time.monotonic() + self.opts._event_listeners.publish_pool_backoff(self.address, backoff_duration_ms) + self._backoff_connection_time = backoff_duration_sec + time.monotonic() # Log the pool backoff message. if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - message=_ConnectionStatusMessage.POOL_BACKOFF % self._backoff, + message=_ConnectionStatusMessage.POOL_BACKOFF % backoff_duration_ms, clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/auth-network-error-fail.json index 8d37dc9844..9b096d2f53 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/auth-network-error-fail.json @@ -129,34 +129,22 @@ "poolReadyEvent": {} }, { - "poolBackoffEvent": { - "attempt": 1 - } + "poolBackoffEvent": {} }, { - "poolBackoffEvent": { - "attempt": 2 - } + "poolBackoffEvent": {} }, { - "poolBackoffEvent": { - "attempt": 3 - } + "poolBackoffEvent": {} }, { - "poolBackoffEvent": { - "attempt": 4 - } + "poolBackoffEvent": {} }, { - "poolBackoffEvent": { - "attempt": 5 - } + "poolBackoffEvent": {} }, { - "poolBackoffEvent": { - "attempt": 6 - } + "poolBackoffEvent": {} } ] } diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json index 515d1aac5a..ebc9af1baf 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -119,9 +119,7 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": { - "attempt": 1 - } + "poolBackoffEvent": {} }, "count": 1 } diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json index 6b8b29b039..f18c3ea7a6 100644 --- a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -119,9 +119,7 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": { - "attempt": 1 - } + "poolBackoffEvent": {} }, "count": 1 } @@ -132,9 +130,7 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": { - "attempt": 2 - } + "poolBackoffEvent": {} }, "count": 1 } diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 99d36a7b35..72a18c141b 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -621,7 +621,7 @@ def match_event(self, expectation, actual): self.test.assertIsInstance(actual, PoolClosedEvent) elif name == "poolBackoffEvent": self.test.assertIsInstance(actual, PoolBackoffEvent) - self.test.assertIsInstance(actual.attempt, int) + self.test.assertIsInstance(actual.duration_ms, int) elif name == "connectionCreatedEvent": self.test.assertIsInstance(actual, ConnectionCreatedEvent) elif name == "connectionReadyEvent": From 873d1f14b2e6767dde2b192e2de7d81747c5d931 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 16:15:32 -0500 Subject: [PATCH 14/46] add test that transitions from backoff to clear --- .../unified/auth-network-error-fail.json | 153 +++++++++++++++++- .../unified/auth-network-error.json | 2 +- .../unified/auth-network-timeout-error.json | 2 +- 3 files changed, 153 insertions(+), 4 deletions(-) diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/auth-network-error-fail.json index 9b096d2f53..dcf280549e 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/auth-network-error-fail.json @@ -37,7 +37,7 @@ ], "tests": [ { - "description": "Backoff and fail after network connection error during authentication", + "description": "Backoff and fail after network connection errors during authentication", "operations": [ { "name": "failPoint", @@ -72,7 +72,7 @@ "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", - "poolClearEvent" + "poolClearedEvent" ], "uriOptions": { "retryWrites": false, @@ -149,6 +149,155 @@ ] } ] + }, + { + "description": "Backoff and clear the pool after network failures followed by server error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 6 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authNetworkErrorFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorFailTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error-fail" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authNetworkErrorFailTest", + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolClearedEvent": {} + } + ] + } + ] } ] } diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json index ebc9af1baf..11d0af3d02 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -72,7 +72,7 @@ "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", - "poolClearEvent" + "poolClearedEvent" ], "uriOptions": { "retryWrites": false, diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json index f18c3ea7a6..02ca9ae3d3 100644 --- a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -72,7 +72,7 @@ "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", - "poolClearEvent" + "poolClearedEvent" ], "uriOptions": { "retryWrites": false, From 02aec913a0e6d8c3d4ddc369fb6ed66aeff31948 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 16:30:51 -0500 Subject: [PATCH 15/46] clean up the tests --- .../unified/auth-network-error-fail.json | 90 ++++++------------- .../unified/auth-network-error.json | 2 +- .../unified/auth-network-timeout-error.json | 13 +-- 3 files changed, 28 insertions(+), 77 deletions(-) diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/auth-network-error-fail.json index dcf280549e..537a1cce25 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/auth-network-error-fail.json @@ -113,40 +113,17 @@ "expectError": { "isError": true } - } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "command", - "events": [] }, { - "client": "client", - "eventType": "cmap", - "events": [ - { - "poolReadyEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { "poolBackoffEvent": {} }, - { - "poolBackoffEvent": {} - } - ] + "count": 5 + } } ] }, @@ -228,6 +205,17 @@ "isError": true } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": {} + }, + "count": 5 + } + }, { "name": "failPoint", "object": "testRunner", @@ -259,43 +247,17 @@ "expectError": { "isError": true } - } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "command", - "events": [] }, { - "client": "client", - "eventType": "cmap", - "events": [ - { - "poolReadyEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { "poolClearedEvent": {} - } - ] + }, + "count": 1 + } } ] } diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json index 11d0af3d02..03cb557215 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -121,7 +121,7 @@ "event": { "poolBackoffEvent": {} }, - "count": 1 + "count": 2 } } ], diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json index 02ca9ae3d3..b7e1485e04 100644 --- a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -121,18 +121,7 @@ "event": { "poolBackoffEvent": {} }, - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolBackoffEvent": {} - }, - "count": 1 + "count": 2 } }, { From 73ff3d6b21f32b718a1cf30bdd311b03f6dce9f3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 16:37:39 -0500 Subject: [PATCH 16/46] update logging test --- pymongo/asynchronous/pool.py | 3 ++- pymongo/logger.py | 2 +- pymongo/synchronous/pool.py | 3 ++- test/connection_logging/connection-logging.json | 5 ++++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index d356c0f3f9..f789235d53 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1061,10 +1061,11 @@ def backoff(self): if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - message=_ConnectionStatusMessage.POOL_BACKOFF % backoff_duration_ms, + message=_ConnectionStatusMessage.POOL_BACKOFF, clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], + durationMS=backoff_duration_ms, reason=_verbose_connection_error_reason(ConnectionClosedReason.POOL_BACKOFF), error=ConnectionClosedReason.POOL_BACKOFF, ) diff --git a/pymongo/logger.py b/pymongo/logger.py index 052d042766..ccfc45ed88 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -42,7 +42,7 @@ class _ConnectionStatusMessage(str, enum.Enum): POOL_READY = "Connection pool ready" POOL_CLOSED = "Connection pool closed" POOL_CLEARED = "Connection pool cleared" - POOL_BACKOFF = "Connection pool backoff %sms" + POOL_BACKOFF = "Connection pool backoff" CONN_CREATED = "Connection created" CONN_READY = "Connection ready" diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 762ca554ba..6387294051 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1057,10 +1057,11 @@ def backoff(self): if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - message=_ConnectionStatusMessage.POOL_BACKOFF % backoff_duration_ms, + message=_ConnectionStatusMessage.POOL_BACKOFF, clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], + durationMS=backoff_duration_ms, reason=_verbose_connection_error_reason(ConnectionClosedReason.POOL_BACKOFF), error=ConnectionClosedReason.POOL_BACKOFF, ) diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 2d66c8b2cc..4928af2ef0 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -691,10 +691,13 @@ "level": "debug", "component": "connection", "data": { - "message": "Connection pool backoff attempt number 1", + "message": "Connection pool backoff", "serverHost": { "$$type": "string" }, + "durationMS": { + "$$type": "int" + }, "serverPort": { "$$type": [ "int", From c70b66c305c139a33e577ff4805fda0e1ce859e3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 16:47:31 -0500 Subject: [PATCH 17/46] fix typing --- pymongo/asynchronous/pool.py | 10 +++++----- pymongo/monitoring.py | 2 +- pymongo/synchronous/pool.py | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index f789235d53..648012aab3 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -793,7 +793,7 @@ def __init__( self._pending = 0 self._client_id = client_id self._backoff = 0 - self._backoff_connection_time = -1 + self._backoff_connection_time = 0.0 if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_created( @@ -1041,11 +1041,11 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: # If found, set backoff and add error labels. if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return - error._add_error_label("SystemOverloadedError") - error._add_error_label("RetryableError") + error._add_error_label("SystemOverloadedError") # type:ignore[attr-defined] + error._add_error_label("RetryableError") # type:ignore[attr-defined] self.backoff() - def backoff(self): + def backoff(self) -> None: """Set/increase backoff mode.""" self._backoff += 1 backoff_duration_sec = _backoff(self._backoff) @@ -1346,7 +1346,7 @@ async def _get_conn( timeout = 0.01 if not await _async_cond_wait(self._max_connecting_cond, timeout): # Check whether we should continue to wait for the backoff condition. - if self._backoff and deadline is None or deadline < time.monotonic(): + if self._backoff and (deadline is None or deadline < time.monotonic()): if self._backoff_connection_time > time.monotonic(): continue break diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 781439c213..5d7dcd37ed 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -943,7 +943,7 @@ def __init__(self, address: _Address, duration_ms: int) -> None: self.__duration_ms = duration_ms @property - def duration_ms(self) -> Optional[ObjectId]: + def duration_ms(self) -> int: """The backoff duration in ms.""" return self.__duration_ms diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 6387294051..edee12a329 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -791,7 +791,7 @@ def __init__( self._pending = 0 self._client_id = client_id self._backoff = 0 - self._backoff_connection_time = -1 + self._backoff_connection_time = 0.0 if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_created( @@ -1037,11 +1037,11 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: # If found, set backoff and add error labels. if self.is_sdam or type(error) not in (AutoReconnect, NetworkTimeout): return - error._add_error_label("SystemOverloadedError") - error._add_error_label("RetryableError") + error._add_error_label("SystemOverloadedError") # type:ignore[attr-defined] + error._add_error_label("RetryableError") # type:ignore[attr-defined] self.backoff() - def backoff(self): + def backoff(self) -> None: """Set/increase backoff mode.""" self._backoff += 1 backoff_duration_sec = _backoff(self._backoff) @@ -1342,7 +1342,7 @@ def _get_conn( timeout = 0.01 if not _cond_wait(self._max_connecting_cond, timeout): # Check whether we should continue to wait for the backoff condition. - if self._backoff and deadline is None or deadline < time.monotonic(): + if self._backoff and (deadline is None or deadline < time.monotonic()): if self._backoff_connection_time > time.monotonic(): continue break From f20cc0a671da18262d2bc954655836582faea06e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 22:05:21 -0500 Subject: [PATCH 18/46] add final test --- pymongo/asynchronous/pool.py | 13 +- pymongo/asynchronous/topology.py | 4 +- pymongo/synchronous/pool.py | 15 +- pymongo/synchronous/topology.py | 4 +- ...t-failure-does-not-clear-backoff-pool.json | 200 ++++++++++++++++++ 5 files changed, 230 insertions(+), 6 deletions(-) create mode 100644 test/discovery_and_monitoring/unified/auth-heartbeat-failure-does-not-clear-backoff-pool.json diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 648012aab3..38ebc1258a 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -847,11 +847,14 @@ async def _reset( pause: bool = True, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False, + from_server_description: bool = False, ) -> None: old_state = self.state async with self.size_cond: if self.closed: return + if from_server_description and self.state == PoolState.BACKOFF: + return # Clear the backoff amount. self._backoff = 0 if self.opts.pause_enabled and pause and not self.opts.load_balanced: @@ -951,10 +954,16 @@ async def update_is_writable(self, is_writable: Optional[bool]) -> None: _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] async def reset( - self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + self, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + from_server_description: bool = False, ) -> None: await self._reset( - close=False, service_id=service_id, interrupt_connections=interrupt_connections + close=False, + service_id=service_id, + interrupt_connections=interrupt_connections, + from_server_description=from_server_description, ) async def reset_without_pause(self) -> None: diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 1e91bbe79b..e7c74adb22 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -555,7 +555,9 @@ async def on_change( if reset_pool: server = self._servers.get(server_description.address) if server: - await server.pool.reset(interrupt_connections=interrupt_connections) + await server.pool.reset( + interrupt_connections=interrupt_connections, from_server_description=True + ) async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index edee12a329..94a3ff051c 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -845,11 +845,14 @@ def _reset( pause: bool = True, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False, + from_server_description: bool = False, ) -> None: old_state = self.state with self.size_cond: if self.closed: return + if from_server_description and self.state == PoolState.BACKOFF: + return # Clear the backoff amount. self._backoff = 0 if self.opts.pause_enabled and pause and not self.opts.load_balanced: @@ -949,9 +952,17 @@ def update_is_writable(self, is_writable: Optional[bool]) -> None: _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] def reset( - self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + self, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + from_server_description: bool = False, ) -> None: - self._reset(close=False, service_id=service_id, interrupt_connections=interrupt_connections) + self._reset( + close=False, + service_id=service_id, + interrupt_connections=interrupt_connections, + from_server_description=from_server_description, + ) def reset_without_pause(self) -> None: self._reset(close=False, pause=False) diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 0f6592dfc0..4c86b9b797 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -555,7 +555,9 @@ def on_change( if reset_pool: server = self._servers.get(server_description.address) if server: - server.pool.reset(interrupt_connections=interrupt_connections) + server.pool.reset( + interrupt_connections=interrupt_connections, from_server_description=True + ) def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. diff --git a/test/discovery_and_monitoring/unified/auth-heartbeat-failure-does-not-clear-backoff-pool.json b/test/discovery_and_monitoring/unified/auth-heartbeat-failure-does-not-clear-backoff-pool.json new file mode 100644 index 0000000000..a28bf3da04 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-heartbeat-failure-does-not-clear-backoff-pool.json @@ -0,0 +1,200 @@ +{ + "description": "heartbeat-failure-does-not-clear-backoff-pool", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "heartbeat-backoff-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "A heartbeat failure during backoff should not clear the pool", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "heartbeatBackoffFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearedEvent", + "serverHeartbeatFailedEvent", + "serverHeartbeatSucceededEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "heartbeatBackoffFailTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "heartbeat-backoff-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "appName": "heartbeatBackoffFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + } + ] + } + ] + } + ] +} From e905b9b5a20a6470ee6f57bb31ddf985f4219e76 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Oct 2025 22:10:26 -0500 Subject: [PATCH 19/46] fix ready condition --- pymongo/asynchronous/pool.py | 1 + pymongo/synchronous/pool.py | 1 + 2 files changed, 2 insertions(+) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 38ebc1258a..9675a5de9f 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1169,6 +1169,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A # Clear the backoff state. if self._backoff: + self._backoff = 0 await self.ready() return conn diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 94a3ff051c..616e18087e 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1167,6 +1167,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect # Clear the backoff state. if self._backoff: + self._backoff = 0 self.ready() return conn From d228f08eeafadf6749f07c97def316970e807ad6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Oct 2025 17:21:35 -0500 Subject: [PATCH 20/46] wip incorporate design changes --- pymongo/asynchronous/pool.py | 47 +++-- pymongo/asynchronous/topology.py | 7 +- pymongo/monitoring.py | 15 +- pymongo/synchronous/pool.py | 39 ++-- pymongo/synchronous/topology.py | 7 +- .../unified/auth-network-error.json | 83 ++++++-- .../unified/auth-network-timeout-error.json | 86 ++++++-- ...son => backoff-heartbeat-clears-pool.json} | 0 ...l.json => backoff-network-error-fail.json} | 0 .../unified/backoff-network-error.json | 175 ++++++++++++++++ .../backoff-network-timeout-error.json | 186 ++++++++++++++++++ 11 files changed, 554 insertions(+), 91 deletions(-) rename test/discovery_and_monitoring/unified/{auth-heartbeat-failure-does-not-clear-backoff-pool.json => backoff-heartbeat-clears-pool.json} (100%) rename test/discovery_and_monitoring/unified/{auth-network-error-fail.json => backoff-network-error-fail.json} (100%) create mode 100644 test/discovery_and_monitoring/unified/backoff-network-error.json create mode 100644 test/discovery_and_monitoring/unified/backoff-network-timeout-error.json diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 9675a5de9f..16c8f1d441 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -820,8 +820,7 @@ def __init__( async def ready(self) -> None: # Take the lock to avoid the race condition described in PYTHON-2699. async with self.lock: - # Do not set the pool as ready if in backoff. - if self._backoff: + if self.state == PoolState.BACKOFF: return if self.state != PoolState.READY: self.state = PoolState.READY @@ -847,14 +846,11 @@ async def _reset( pause: bool = True, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False, - from_server_description: bool = False, ) -> None: old_state = self.state async with self.size_cond: if self.closed: return - if from_server_description and self.state == PoolState.BACKOFF: - return # Clear the backoff amount. self._backoff = 0 if self.opts.pause_enabled and pause and not self.opts.load_balanced: @@ -954,16 +950,12 @@ async def update_is_writable(self, is_writable: Optional[bool]) -> None: _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] async def reset( - self, - service_id: Optional[ObjectId] = None, - interrupt_connections: bool = False, - from_server_description: bool = False, + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False ) -> None: await self._reset( close=False, service_id=service_id, interrupt_connections=interrupt_connections, - from_server_description=from_server_description, ) async def reset_without_pause(self) -> None: @@ -1044,7 +1036,7 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: self.requests -= 1 self.size_cond.notify() - def _handle_connection_error(self, error: BaseException, phase: str) -> None: + async def _handle_connection_error(self, error: BaseException, phase: str) -> None: # Handle system overload condition for non-sdam pools. # Look for an AutoReconnect or NetworkTimeout error. # If found, set backoff and add error labels. @@ -1052,19 +1044,22 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: return error._add_error_label("SystemOverloadedError") # type:ignore[attr-defined] error._add_error_label("RetryableError") # type:ignore[attr-defined] - self.backoff() + await self.backoff() - def backoff(self) -> None: + async def backoff(self) -> None: """Set/increase backoff mode.""" - self._backoff += 1 - backoff_duration_sec = _backoff(self._backoff) - backoff_duration_ms = int(backoff_duration_sec * 1000) - if self.state != PoolState.BACKOFF: - self.state = PoolState.BACKOFF - if self.enabled_for_cmap: - assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_pool_backoff(self.address, backoff_duration_ms) - self._backoff_connection_time = backoff_duration_sec + time.monotonic() + async with self.lock: + self._backoff += 1 + backoff_duration_sec = _backoff(self._backoff) + backoff_duration_ms = int(backoff_duration_sec * 1000) + if self.state != PoolState.BACKOFF: + self.state = PoolState.BACKOFF + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_backoff( + self.address, self._backoff, backoff_duration_ms + ) + self._backoff_connection_time = backoff_duration_sec + time.monotonic() # Log the pool backoff message. if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): @@ -1074,6 +1069,7 @@ def backoff(self) -> None: clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], + attempt=self._backoff, durationMS=backoff_duration_ms, reason=_verbose_connection_error_reason(ConnectionClosedReason.POOL_BACKOFF), error=ConnectionClosedReason.POOL_BACKOFF, @@ -1136,7 +1132,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A error=ConnectionClosedReason.ERROR, ) if context["has_created_socket"]: - self._handle_connection_error(error, "handshake") + await self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) @@ -1148,9 +1144,11 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A self.active_contexts.discard(tmp_context) if tmp_context.cancelled: conn.cancel_context.cancel() + has_completed_hello = False try: if not self.is_sdam: await conn.hello() + has_completed_hello = True self.is_writable = conn.is_writable if handler: handler.contribute_socket(conn, completed_handshake=False) @@ -1160,7 +1158,8 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A except BaseException as e: async with self.lock: self.active_contexts.discard(conn.cancel_context) - self._handle_connection_error(e, "hello") + if not has_completed_hello: + await self._handle_connection_error(e, "hello") await conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index e7c74adb22..76cd2f4cb0 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -57,6 +57,7 @@ _SDAMStatusMessage, _ServerSelectionStatusMessage, ) +from pymongo.pool import PoolState from pymongo.pool_options import PoolOptions from pymongo.server_description import ServerDescription from pymongo.server_selectors import ( @@ -485,7 +486,7 @@ async def _process_change( server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single ): server = self._servers.get(server_description.address) - if server: + if server and server.pool.state != PoolState.BACKOFF: await server.pool.ready() suppress_event = sd_old == server_description @@ -555,9 +556,7 @@ async def on_change( if reset_pool: server = self._servers.get(server_description.address) if server: - await server.pool.reset( - interrupt_connections=interrupt_connections, from_server_description=True - ) + await server.pool.reset(interrupt_connections=interrupt_connections) async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 5d7dcd37ed..3f2dc9d06a 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -931,17 +931,24 @@ class PoolBackoffEvent(_PoolEvent): :param address: The address (host, port) pair of the server this Pool is attempting to connect to. + :param attempt: The backoff attempt. :param duration_ms: The backoff duration in ms. .. versionadded:: 4.16 """ - __slots__ = ("__duration_ms",) + __slots__ = ("__attempt", "__duration_ms") - def __init__(self, address: _Address, duration_ms: int) -> None: + def __init__(self, address: _Address, attempt: int, duration_ms: int) -> None: super().__init__(address) + self.__attempt = attempt self.__duration_ms = duration_ms + @property + def attempt(self) -> int: + """The backoff attempt.""" + return self.__attempt + @property def duration_ms(self) -> int: """The backoff duration in ms.""" @@ -1864,9 +1871,9 @@ def publish_pool_closed(self, address: _Address) -> None: except Exception: _handle_exception() - def publish_pool_backoff(self, address: _Address, attempt: int) -> None: + def publish_pool_backoff(self, address: _Address, attempt: int, duration_ms: int) -> None: """Publish a :class:`PoolBackoffEvent` to all pool listeners.""" - event = PoolBackoffEvent(address, attempt) + event = PoolBackoffEvent(address, attempt, duration_ms) for subscriber in self.__cmap_listeners: try: subscriber.pool_backoff(event) diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 616e18087e..3ad099faf9 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -818,8 +818,7 @@ def __init__( def ready(self) -> None: # Take the lock to avoid the race condition described in PYTHON-2699. with self.lock: - # Do not set the pool as ready if in backoff. - if self._backoff: + if self.state == PoolState.BACKOFF: return if self.state != PoolState.READY: self.state = PoolState.READY @@ -845,14 +844,11 @@ def _reset( pause: bool = True, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False, - from_server_description: bool = False, ) -> None: old_state = self.state with self.size_cond: if self.closed: return - if from_server_description and self.state == PoolState.BACKOFF: - return # Clear the backoff amount. self._backoff = 0 if self.opts.pause_enabled and pause and not self.opts.load_balanced: @@ -952,16 +948,12 @@ def update_is_writable(self, is_writable: Optional[bool]) -> None: _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] def reset( - self, - service_id: Optional[ObjectId] = None, - interrupt_connections: bool = False, - from_server_description: bool = False, + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False ) -> None: self._reset( close=False, service_id=service_id, interrupt_connections=interrupt_connections, - from_server_description=from_server_description, ) def reset_without_pause(self) -> None: @@ -1054,15 +1046,18 @@ def _handle_connection_error(self, error: BaseException, phase: str) -> None: def backoff(self) -> None: """Set/increase backoff mode.""" - self._backoff += 1 - backoff_duration_sec = _backoff(self._backoff) - backoff_duration_ms = int(backoff_duration_sec * 1000) - if self.state != PoolState.BACKOFF: - self.state = PoolState.BACKOFF - if self.enabled_for_cmap: - assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_pool_backoff(self.address, backoff_duration_ms) - self._backoff_connection_time = backoff_duration_sec + time.monotonic() + with self.lock: + self._backoff += 1 + backoff_duration_sec = _backoff(self._backoff) + backoff_duration_ms = int(backoff_duration_sec * 1000) + if self.state != PoolState.BACKOFF: + self.state = PoolState.BACKOFF + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_backoff( + self.address, self._backoff, backoff_duration_ms + ) + self._backoff_connection_time = backoff_duration_sec + time.monotonic() # Log the pool backoff message. if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): @@ -1072,6 +1067,7 @@ def backoff(self) -> None: clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], + attempt=self._backoff, durationMS=backoff_duration_ms, reason=_verbose_connection_error_reason(ConnectionClosedReason.POOL_BACKOFF), error=ConnectionClosedReason.POOL_BACKOFF, @@ -1146,9 +1142,11 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect self.active_contexts.discard(tmp_context) if tmp_context.cancelled: conn.cancel_context.cancel() + has_completed_hello = False try: if not self.is_sdam: conn.hello() + has_completed_hello = True self.is_writable = conn.is_writable if handler: handler.contribute_socket(conn, completed_handshake=False) @@ -1158,7 +1156,8 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect except BaseException as e: with self.lock: self.active_contexts.discard(conn.cancel_context) - self._handle_connection_error(e, "hello") + if not has_completed_hello: + self._handle_connection_error(e, "hello") conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 4c86b9b797..62c9485f4a 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -53,6 +53,7 @@ _SDAMStatusMessage, _ServerSelectionStatusMessage, ) +from pymongo.pool import PoolState from pymongo.pool_options import PoolOptions from pymongo.server_description import ServerDescription from pymongo.server_selectors import ( @@ -485,7 +486,7 @@ def _process_change( server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single ): server = self._servers.get(server_description.address) - if server: + if server and server.pool.state != PoolState.BACKOFF: server.pool.ready() suppress_event = sd_old == server_description @@ -555,9 +556,7 @@ def on_change( if reset_pool: server = self._servers.get(server_description.address) if server: - server.pool.reset( - interrupt_connections=interrupt_connections, from_server_description=True - ) + server.pool.reset(interrupt_connections=interrupt_connections) def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json index 03cb557215..c278665d68 100644 --- a/test/discovery_and_monitoring/unified/auth-network-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -37,7 +37,7 @@ ], "tests": [ { - "description": "Backoff and retry after network connection error during authentication", + "description": "Reset server and pool after network error during authentication", "operations": [ { "name": "failPoint", @@ -47,7 +47,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 1 }, "data": { "failCommands": [ @@ -70,15 +70,12 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", - "poolBackoffEvent", - "poolReadyEvent", + "serverDescriptionChangedEvent", "poolClearedEvent" ], "uriOptions": { "retryWrites": false, - "appname": "authNetworkErrorTest", - "connectTimeoutMS": 250, - "socketTimeoutMS": 250 + "appname": "authNetworkErrorTest" } } }, @@ -111,6 +108,9 @@ "_id": 4 } ] + }, + "expectError": { + "isError": true } }, { @@ -119,9 +119,64 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} }, - "count": 2 + "count": 1 } } ], @@ -136,10 +191,10 @@ "insert": "auth-network-error", "documents": [ { - "_id": 3 + "_id": 5 }, { - "_id": 4 + "_id": 6 } ] }, @@ -162,14 +217,14 @@ "_id": 2 }, { - "_id": 3 + "_id": 5 }, { - "_id": 4 + "_id": 6 } ] } ] } ] -} +} \ No newline at end of file diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json index b7e1485e04..c278665d68 100644 --- a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -1,5 +1,5 @@ { - "description": "auth-network-timeout-error", + "description": "auth-network-error", "schemaVersion": "1.4", "runOnRequirements": [ { @@ -23,7 +23,7 @@ ], "initialData": [ { - "collectionName": "auth-network-timeout-error", + "collectionName": "auth-network-error", "databaseName": "sdam-tests", "documents": [ { @@ -37,7 +37,7 @@ ], "tests": [ { - "description": "Backoff and retry after network timeout error during authentication", + "description": "Reset server and pool after network error during authentication", "operations": [ { "name": "failPoint", @@ -47,14 +47,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 1 }, "data": { "failCommands": [ "saslContinue" ], "closeConnection": true, - "appName": "authNetworkTimeoutErrorTest" + "appName": "authNetworkErrorTest" } } } @@ -70,15 +70,12 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", - "poolBackoffEvent", - "poolReadyEvent", + "serverDescriptionChangedEvent", "poolClearedEvent" ], "uriOptions": { "retryWrites": false, - "appname": "authNetworkTimeoutErrorTest", - "connectTimeoutMS": 250, - "socketTimeoutMS": 250 + "appname": "authNetworkErrorTest" } } }, @@ -93,7 +90,7 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "auth-network-timeout-error" + "collectionName": "auth-network-error" } } ] @@ -111,6 +108,9 @@ "_id": 4 } ] + }, + "expectError": { + "isError": true } }, { @@ -119,9 +119,13 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } }, - "count": 2 + "count": 1 } }, { @@ -130,7 +134,47 @@ "arguments": { "client": "client", "event": { - "poolReadyEvent": {} + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} }, "count": 1 } @@ -144,13 +188,13 @@ { "commandStartedEvent": { "command": { - "insert": "auth-network-timeout-error", + "insert": "auth-network-error", "documents": [ { - "_id": 3 + "_id": 5 }, { - "_id": 4 + "_id": 6 } ] }, @@ -163,7 +207,7 @@ ], "outcome": [ { - "collectionName": "auth-network-timeout-error", + "collectionName": "auth-network-error", "databaseName": "sdam-tests", "documents": [ { @@ -173,14 +217,14 @@ "_id": 2 }, { - "_id": 3 + "_id": 5 }, { - "_id": 4 + "_id": 6 } ] } ] } ] -} +} \ No newline at end of file diff --git a/test/discovery_and_monitoring/unified/auth-heartbeat-failure-does-not-clear-backoff-pool.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-clears-pool.json similarity index 100% rename from test/discovery_and_monitoring/unified/auth-heartbeat-failure-does-not-clear-backoff-pool.json rename to test/discovery_and_monitoring/unified/backoff-heartbeat-clears-pool.json diff --git a/test/discovery_and_monitoring/unified/auth-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json similarity index 100% rename from test/discovery_and_monitoring/unified/auth-network-error-fail.json rename to test/discovery_and_monitoring/unified/backoff-network-error-fail.json diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json new file mode 100644 index 0000000000..03cb557215 --- /dev/null +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -0,0 +1,175 @@ +{ + "description": "auth-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Backoff and retry after network connection error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json new file mode 100644 index 0000000000..b7e1485e04 --- /dev/null +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -0,0 +1,186 @@ +{ + "description": "auth-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Backoff and retry after network timeout error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-timeout-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": {} + }, + "count": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} From 73e78b62d900e5b1003e6821a033fe44a4c957ad Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 06:30:32 -0500 Subject: [PATCH 21/46] update tests --- .../connection-logging.json | 19 +- .../unified/backoff-heartbeat-failure.json | 175 ++++++++++++++++++ ...ol.json => backoff-heartbeat-success.json} | 67 +++---- .../unified/backoff-network-error-fail.json | 120 +++++++----- .../unified/backoff-network-error.json | 67 ++++--- .../backoff-network-timeout-error.json | 66 ++++--- 6 files changed, 371 insertions(+), 143 deletions(-) create mode 100644 test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json rename test/discovery_and_monitoring/unified/{backoff-heartbeat-clears-pool.json => backoff-heartbeat-success.json} (83%) diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 4928af2ef0..adb3d68e7f 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -539,8 +539,11 @@ "uriOptions": { "retryReads": true, "appname": "clientAppName", - "heartbeatFrequencyMS": 10000 + "heartbeatFrequencyMS": 5000 }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ], "observeLogMessages": { "connection": "debug" } @@ -556,6 +559,17 @@ ] } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, { "name": "failPoint", "object": "testRunner", @@ -568,7 +582,8 @@ }, "data": { "failCommands": [ - "saslContinue" + "isMaster", + "hello" ], "closeConnection": true, "appName": "clientAppName" diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json new file mode 100644 index 0000000000..5a43b02076 --- /dev/null +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -0,0 +1,175 @@ +{ + "description": "heartbeat-failure-clears-backoff-pool", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "heartbeat-backoff-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "A heartbeat failure during backoff should clear the pool", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolBackoffEvent", + "poolReadyEvent", + "poolClearedEvent", + "serverHeartbeatFailedEvent", + "serverHeartbeatSucceededEvent" + ], + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 5000, + "appname": "heartbeatBackoffFailTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "heartbeat-backoff-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "appName": "heartbeatBackoffFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolBackoffEvent": {} + }, + { + "poolClearedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-clears-pool.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json similarity index 83% rename from test/discovery_and_monitoring/unified/backoff-heartbeat-clears-pool.json rename to test/discovery_and_monitoring/unified/backoff-heartbeat-success.json index a28bf3da04..c2aaef483a 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-clears-pool.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json @@ -1,5 +1,5 @@ { - "description": "heartbeat-failure-does-not-clear-backoff-pool", + "description": "heartbeat-success-backoff", "schemaVersion": "1.4", "runOnRequirements": [ { @@ -22,7 +22,7 @@ ], "initialData": [ { - "collectionName": "heartbeat-backoff-error", + "collectionName": "heartbeat-backoff-success", "databaseName": "sdam-tests", "documents": [ { @@ -36,26 +36,8 @@ ], "tests": [ { - "description": "A heartbeat failure during backoff should not clear the pool", + "description": "A heartbeat success during backoff not mark the pool as ready", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "alwaysOn", - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "heartbeatBackoffFailTest", - "closeConnection": true - } - } - } - }, { "name": "createEntities", "object": "testRunner", @@ -75,6 +57,7 @@ ], "uriOptions": { "retryWrites": false, + "heartbeatFrequencyMS": 5000, "appname": "heartbeatBackoffFailTest" } } @@ -90,27 +73,21 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "heartbeat-backoff-error" + "collectionName": "heartbeat-backoff-success" } } ] } }, { - "name": "insertMany", - "object": "collection", + "name": "waitForEvent", + "object": "testRunner", "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "expectError": { - "isError": true + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 } }, { @@ -133,14 +110,20 @@ } }, { - "name": "waitForEvent", - "object": "testRunner", + "name": "insertMany", + "object": "collection", "arguments": { - "client": "client", - "event": { - "serverHeartbeatFailedEvent": {} - }, - "count": 1 + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true } }, { diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 537a1cce25..6edcade36b 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -1,10 +1,9 @@ { - "description": "auth-network-error-fail", + "description": "backoff-network-error-fail", "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.4", - "auth": true, "serverless": "forbid", "topologies": [ "single", @@ -23,7 +22,7 @@ ], "initialData": [ { - "collectionName": "auth-network-error-fail", + "collectionName": "backoff-network-error-fail", "databaseName": "sdam-tests", "documents": [ { @@ -37,28 +36,8 @@ ], "tests": [ { - "description": "Backoff and fail after network connection errors during authentication", + "description": "Backoff and fail after network connection errors during connection establishment", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 6 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authNetworkErrorFailTest", - "closeConnection": true - } - } - } - }, { "name": "createEntities", "object": "testRunner", @@ -69,6 +48,7 @@ "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "serverHeartbeatSucceededEvent", "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", @@ -76,6 +56,7 @@ ], "uriOptions": { "retryWrites": false, + "heartbeatFrequencyMS": 100000, "appname": "authNetworkErrorFailTest" } } @@ -91,12 +72,44 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "auth-network-error-fail" + "collectionName": "backoff-network-error-fail" } } ] } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 6 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "appName": "authNetworkErrorFailTest", + "closeConnection": true + } + } + } + }, { "name": "insertMany", "object": "collection", @@ -130,26 +143,7 @@ { "description": "Backoff and clear the pool after network failures followed by server error", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 6 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authNetworkErrorFailTest", - "closeConnection": true - } - } - } - }, + { "name": "createEntities", "object": "testRunner", @@ -160,6 +154,7 @@ "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "serverHeartbeatSucceededEvent", "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", @@ -167,6 +162,7 @@ ], "uriOptions": { "retryWrites": false, + "heartbeatFrequencyMS": 100000, "appname": "authNetworkErrorFailTest" } } @@ -182,12 +178,44 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "auth-network-error-fail" + "collectionName": "backoff-network-error-fail" } } ] } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 6 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "appName": "authNetworkErrorFailTest", + "closeConnection": true + } + } + } + }, { "name": "insertMany", "object": "collection", diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index 03cb557215..c63c9f0065 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -1,10 +1,9 @@ { - "description": "auth-network-error", + "description": "backoff-network-error", "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.4", - "auth": true, "serverless": "forbid", "topologies": [ "single", @@ -23,7 +22,7 @@ ], "initialData": [ { - "collectionName": "auth-network-error", + "collectionName": "backoff-network-error", "databaseName": "sdam-tests", "documents": [ { @@ -37,28 +36,8 @@ ], "tests": [ { - "description": "Backoff and retry after network connection error during authentication", + "description": "Backoff and retry after network connection error during connection establishment", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "closeConnection": true, - "appName": "authNetworkErrorTest" - } - } - } - }, { "name": "createEntities", "object": "testRunner", @@ -69,6 +48,7 @@ "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "serverHeartbeatSucceededEvent", "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", @@ -77,6 +57,7 @@ "uriOptions": { "retryWrites": false, "appname": "authNetworkErrorTest", + "heartbeatFrequencyMS": 100000, "connectTimeoutMS": 250, "socketTimeoutMS": 250 } @@ -93,12 +74,44 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "auth-network-error" + "collectionName": "backoff-network-error" } } ] } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, { "name": "insertMany", "object": "collection", @@ -133,7 +146,7 @@ { "commandStartedEvent": { "command": { - "insert": "auth-network-error", + "insert": "backoff-network-error", "documents": [ { "_id": 3 @@ -152,7 +165,7 @@ ], "outcome": [ { - "collectionName": "auth-network-error", + "collectionName": "backoff-network-error", "databaseName": "sdam-tests", "documents": [ { diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index b7e1485e04..1b4ec97838 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -1,5 +1,5 @@ { - "description": "auth-network-timeout-error", + "description": "backoff-network-timeout-error", "schemaVersion": "1.4", "runOnRequirements": [ { @@ -23,7 +23,7 @@ ], "initialData": [ { - "collectionName": "auth-network-timeout-error", + "collectionName": "backoff-network-timeout-error", "databaseName": "sdam-tests", "documents": [ { @@ -37,28 +37,8 @@ ], "tests": [ { - "description": "Backoff and retry after network timeout error during authentication", + "description": "Backoff and retry after network timeout error during connection establishment", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "closeConnection": true, - "appName": "authNetworkTimeoutErrorTest" - } - } - } - }, { "name": "createEntities", "object": "testRunner", @@ -69,6 +49,7 @@ "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "serverHeartbeatSucceededEvent", "commandStartedEvent", "poolBackoffEvent", "poolReadyEvent", @@ -76,6 +57,7 @@ ], "uriOptions": { "retryWrites": false, + "heartbeatFrequencyMS": 100000, "appname": "authNetworkTimeoutErrorTest", "connectTimeoutMS": 250, "socketTimeoutMS": 250 @@ -93,12 +75,44 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "auth-network-timeout-error" + "collectionName": "backoff-network-timeout-error" } } ] } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, { "name": "insertMany", "object": "collection", @@ -144,7 +158,7 @@ { "commandStartedEvent": { "command": { - "insert": "auth-network-timeout-error", + "insert": "backoff-network-timeout-error", "documents": [ { "_id": 3 @@ -163,7 +177,7 @@ ], "outcome": [ { - "collectionName": "auth-network-timeout-error", + "collectionName": "backoff-network-timeout-error", "databaseName": "sdam-tests", "documents": [ { From adc137535a37015e07a094fca3c994140fcef3aa Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 24 Oct 2025 15:26:46 -0400 Subject: [PATCH 22/46] PYTHON-5627 - Update feedback link (#2601) (cherry picked from commit 0c8a22b87d40d2afcec65de08c5f76505fa03091) --- doc/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.rst b/doc/index.rst index 85812d1b14..9a2c3eb6b2 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -37,7 +37,7 @@ project. Feature Requests / Feedback --------------------------- -Use our `feedback engine `_ +Use our `feedback engine `_ to send us feature requests and general feedback about PyMongo. Contributing From d6d43e7e6789a543b9fccc1cf1e0109025cd5143 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 06:48:45 -0500 Subject: [PATCH 23/46] fix tests --- test/asynchronous/utils.py | 1 + .../connection-logging.json | 3 +++ .../unified/backoff-network-error.json | 19 +++++++++++++++++-- test/unified_format_shared.py | 1 + test/utils.py | 1 + 5 files changed, 23 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py index 02ba46c71a..29ef4a3a2a 100644 --- a/test/asynchronous/utils.py +++ b/test/asynchronous/utils.py @@ -242,6 +242,7 @@ def __init__(self, address, options, is_sdam=False, client_id=None): self.opts = options self.operation_count = 0 self.conns = [] + self.state = 0 def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index adb3d68e7f..207618b4a6 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -713,6 +713,9 @@ "durationMS": { "$$type": "int" }, + "attempt": { + "$$type": "int" + }, "serverPort": { "$$type": [ "int", diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index c63c9f0065..740d0c06a3 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -132,9 +132,24 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "poolBackoffEvent": { + "attempt": 1 + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": { + "attempt": 2 + } }, - "count": 2 + "count": 1 } } ], diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 72a18c141b..1bc60a34ec 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -622,6 +622,7 @@ def match_event(self, expectation, actual): elif name == "poolBackoffEvent": self.test.assertIsInstance(actual, PoolBackoffEvent) self.test.assertIsInstance(actual.duration_ms, int) + self.test.assertIsInstance(actual.attempt, int) elif name == "connectionCreatedEvent": self.test.assertIsInstance(actual, ConnectionCreatedEvent) elif name == "connectionReadyEvent": diff --git a/test/utils.py b/test/utils.py index bfc606fe83..80491f4d26 100644 --- a/test/utils.py +++ b/test/utils.py @@ -240,6 +240,7 @@ def __init__(self, address, options, is_sdam=False, client_id=None): self.opts = options self.operation_count = 0 self.conns = [] + self.state = 0 def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) From 36d4490cfa938da9452ef008d852bda322e45cc6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 07:46:54 -0500 Subject: [PATCH 24/46] fix tests --- pymongo/asynchronous/pool.py | 5 +++-- pymongo/synchronous/pool.py | 5 +++-- test/asynchronous/test_pooling.py | 4 ++-- .../unified/backoff-heartbeat-failure.json | 4 ---- .../unified/backoff-heartbeat-success.json | 4 ---- .../unified/backoff-network-error-fail.json | 15 ++++++++++++--- test/test_pooling.py | 2 +- 7 files changed, 21 insertions(+), 18 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 16c8f1d441..2ee75ec1ae 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1355,8 +1355,9 @@ async def _get_conn( timeout = 0.01 if not await _async_cond_wait(self._max_connecting_cond, timeout): # Check whether we should continue to wait for the backoff condition. - if self._backoff and (deadline is None or deadline < time.monotonic()): - if self._backoff_connection_time > time.monotonic(): + curr_time = time.monotonic() + if self._backoff and (deadline is None or curr_time < deadline): + if self._backoff_connection_time > curr_time: continue break # Timed out, notify the next thread to ensure a diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 3ad099faf9..559936f3d9 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1353,8 +1353,9 @@ def _get_conn( timeout = 0.01 if not _cond_wait(self._max_connecting_cond, timeout): # Check whether we should continue to wait for the backoff condition. - if self._backoff and (deadline is None or deadline < time.monotonic()): - if self._backoff_connection_time > time.monotonic(): + curr_time = time.monotonic() + if self._backoff and (deadline is None or curr_time < deadline): + if self._backoff_connection_time > curr_time: continue break # Timed out, notify the next thread to ensure a diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 230fae7217..0d48746243 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -517,7 +517,7 @@ async def test_connection_timeout_message(self): async def test_pool_check_backoff(self): # Test that Pool recovers from two connection failures in a row. # This exercises code at the end of Pool._check(). - cx_pool = await self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) + cx_pool = await self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=10) self.addAsyncCleanup(cx_pool.close) async with cx_pool.checkout() as conn: @@ -526,7 +526,7 @@ async def test_pool_check_backoff(self): await conn.conn.close() # Enable backoff. - cx_pool.backoff() + await cx_pool.backoff() # Swap pool's address with a bad one. address, cx_pool.address = cx_pool.address, ("foo.com", 1234) diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index 5a43b02076..176974b711 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -50,7 +50,6 @@ "observeEvents": [ "commandStartedEvent", "poolBackoffEvent", - "poolReadyEvent", "poolClearedEvent", "serverHeartbeatFailedEvent", "serverHeartbeatSucceededEvent" @@ -143,9 +142,6 @@ "client": "client", "eventType": "cmap", "events": [ - { - "poolReadyEvent": {} - }, { "poolBackoffEvent": {} }, diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json index c2aaef483a..d423f0e60d 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json @@ -50,7 +50,6 @@ "observeEvents": [ "commandStartedEvent", "poolBackoffEvent", - "poolReadyEvent", "poolClearedEvent", "serverHeartbeatFailedEvent", "serverHeartbeatSucceededEvent" @@ -154,9 +153,6 @@ "client": "client", "eventType": "cmap", "events": [ - { - "poolReadyEvent": {} - }, { "poolBackoffEvent": {} }, diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 6edcade36b..b85a6f1db7 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -202,9 +202,7 @@ "client": "setupClient", "failPoint": { "configureFailPoint": "failCommand", - "mode": { - "times": 6 - }, + "mode": "always", "data": { "failCommands": [ "isMaster", @@ -244,6 +242,17 @@ "count": 5 } }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + } + } + }, { "name": "failPoint", "object": "testRunner", diff --git a/test/test_pooling.py b/test/test_pooling.py index e386130cda..4d17139953 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -515,7 +515,7 @@ def test_connection_timeout_message(self): def test_pool_check_backoff(self): # Test that Pool recovers from two connection failures in a row. # This exercises code at the end of Pool._check(). - cx_pool = self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) + cx_pool = self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=10) self.addCleanup(cx_pool.close) with cx_pool.checkout() as conn: From f936b1b6e8b8706f73e44280216bb9fe27a3c960 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 08:28:09 -0500 Subject: [PATCH 25/46] update backoff logic and fix test --- pymongo/asynchronous/pool.py | 18 +++++++----------- pymongo/synchronous/pool.py | 18 +++++++----------- .../unified/backoff-network-error-fail.json | 2 +- 3 files changed, 15 insertions(+), 23 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 2ee75ec1ae..f21975a790 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1348,17 +1348,16 @@ async def _get_conn( async with self._max_connecting_cond: self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): - timeout = deadline - time.monotonic() if deadline else None + curr_time = time.monotonic() + timeout = deadline - curr_time if deadline else None if self._backoff: - if self._backoff_connection_time < time.monotonic(): + if self._backoff_connection_time < curr_time: break - timeout = 0.01 + if deadline is None or deadline > self._backoff_connection_time: + timeout = self._backoff_connection_time - curr_time if not await _async_cond_wait(self._max_connecting_cond, timeout): - # Check whether we should continue to wait for the backoff condition. - curr_time = time.monotonic() - if self._backoff and (deadline is None or curr_time < deadline): - if self._backoff_connection_time > curr_time: - continue + # Check whether a backoff period has expired. + if self._backoff and time.monotonic() > self._backoff_connection_time: break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. @@ -1376,9 +1375,6 @@ async def _get_conn( if await self._perished(conn): conn = None continue - # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time > time.monotonic()): - continue else: # We need to create a new connection try: conn = await self.connect(handler=handler) diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 559936f3d9..8d66177bdc 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1346,17 +1346,16 @@ def _get_conn( with self._max_connecting_cond: self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self.max_connecting): - timeout = deadline - time.monotonic() if deadline else None + curr_time = time.monotonic() + timeout = deadline - curr_time if deadline else None if self._backoff: - if self._backoff_connection_time < time.monotonic(): + if self._backoff_connection_time < curr_time: break - timeout = 0.01 + if deadline is None or deadline > self._backoff_connection_time: + timeout = self._backoff_connection_time - curr_time if not _cond_wait(self._max_connecting_cond, timeout): - # Check whether we should continue to wait for the backoff condition. - curr_time = time.monotonic() - if self._backoff and (deadline is None or curr_time < deadline): - if self._backoff_connection_time > curr_time: - continue + # Check whether a backoff period has expired. + if self._backoff and time.monotonic() > self._backoff_connection_time: break # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. @@ -1374,9 +1373,6 @@ def _get_conn( if self._perished(conn): conn = None continue - # See if we need to wait for the backoff period. - elif self._backoff and (self._backoff_connection_time > time.monotonic()): - continue else: # We need to create a new connection try: conn = self.connect(handler=handler) diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index b85a6f1db7..fe0da5beba 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -202,7 +202,7 @@ "client": "setupClient", "failPoint": { "configureFailPoint": "failCommand", - "mode": "always", + "mode": "alwaysOn", "data": { "failCommands": [ "isMaster", From 714bc3106af8179a43234ab378662af472ac5981 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 10:06:59 -0500 Subject: [PATCH 26/46] fix test --- .../unified/backoff-network-error-fail.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index fe0da5beba..834877315c 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -265,10 +265,11 @@ }, "data": { "failCommands": [ - "saslContinue" + "hello", + "isMaster" ], "appName": "authNetworkErrorFailTest", - "errorCode": 18 + "errorCode": 1 } } } From 2748749f800a8ac6d79aab53f01b3384c65255cb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 14:36:17 -0500 Subject: [PATCH 27/46] address failure --- .../unified/backoff-network-error-fail.json | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 834877315c..764b317c09 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -96,9 +96,7 @@ "client": "setupClient", "failPoint": { "configureFailPoint": "failCommand", - "mode": { - "times": 6 - }, + "mode": "alwaysOn", "data": { "failCommands": [ "isMaster", From 2c3c9add22fdab5b416c50bf1eba337606f586af Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 14:45:54 -0500 Subject: [PATCH 28/46] revert changes to lb test --- test/load_balancer/sdam-error-handling.json | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 28823d5b95..0dd33d1db6 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -282,7 +282,7 @@ "isMaster", "hello" ], - "errorCode": 18, + "closeConnection": true, "appName": "lbSDAMErrorTestClient" } } @@ -297,7 +297,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -345,7 +345,7 @@ "failCommands": [ "saslContinue" ], - "errorCode": 18, + "closeConnection": true, "appName": "lbSDAMErrorTestClient" } } @@ -406,8 +406,7 @@ "failCommands": [ "getMore" ], - "closeConnection": true, - "appName": "lbSDAMErrorTestClient" + "closeConnection": true } } } @@ -512,4 +511,4 @@ ] } ] -} +} \ No newline at end of file From 84f3b68f0caa391ed8602625365d3952afd8f92d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 16:30:40 -0500 Subject: [PATCH 29/46] more test cleanup --- .../unified/backoff-heartbeat-failure.json | 2 +- .../unified/backoff-network-error-fail.json | 23 +++++++++- test/load_balancer/sdam-error-handling.json | 43 ++++++++++++++++--- 3 files changed, 61 insertions(+), 7 deletions(-) diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index 176974b711..939d3d244a 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -56,7 +56,7 @@ ], "uriOptions": { "retryWrites": false, - "heartbeatFrequencyMS": 5000, + "heartbeatFrequencyMS": 10000, "appname": "heartbeatBackoffFailTest" } } diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 764b317c09..16b4d327ac 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -38,6 +38,17 @@ { "description": "Backoff and fail after network connection errors during connection establishment", "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + } + } + }, { "name": "createEntities", "object": "testRunner", @@ -141,7 +152,17 @@ { "description": "Backoff and clear the pool after network failures followed by server error", "operations": [ - + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + } + } + }, { "name": "createEntities", "object": "testRunner", diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 0dd33d1db6..bca4ffdbfc 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -41,7 +41,10 @@ "connectionCheckOutFailedEvent", "connectionCheckedInEvent", "connectionClosedEvent", - "poolClearedEvent" + "poolClearedEvent", + "poolBackoffEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" ] } }, @@ -260,13 +263,24 @@ ] }, { - "description": "errors during the initial connection hello are ignored", + "description": "errors during the initial connection hello trigger backoff", "runOnRequirements": [ { "minServerVersion": "4.4.7" } ], "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "singleClient", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, { "name": "failPoint", "object": "testRunner", @@ -295,9 +309,17 @@ "document": { "x": 1 } - }, - "expectError": { - "isClientError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": {} + }, + "count": 1 } } ], @@ -318,6 +340,17 @@ "connectionCheckOutFailedEvent": { "reason": "connectionError" } + }, + { + "poolBackoffEvent": { + "attempt": 1 + } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} } ] } From ca6c981b3e7bfb94d1a2003ef9cf60ede5aa1b6b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 17:04:15 -0500 Subject: [PATCH 30/46] more test cleanup --- test/asynchronous/unified_format.py | 5 +++-- test/load_balancer/sdam-error-handling.json | 15 +-------------- test/unified_format.py | 5 +++-- 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 09bf7e83ea..8640c1f86d 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -1397,9 +1397,10 @@ async def run_scenario(self, spec, uri=None): for reason, flaky_test in flaky_tests: if re.match(flaky_test.lower(), self.id().lower()) is not None: func_name = self.id() - options = dict(reason=reason, reset_func=self.asyncSetUp, func_name=func_name) + options = dict( + reason=reason, reset_func=self.asyncSetUp, func_name=func_name, max_runs=3 + ) if "csot" in func_name.lower(): - options["max_runs"] = 3 options["affects_cpython_linux"] = True decorator = flaky(**options) await decorator(self._run_scenario)(spec, uri) diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index bca4ffdbfc..332f1b73f5 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -42,9 +42,7 @@ "connectionCheckedInEvent", "connectionClosedEvent", "poolClearedEvent", - "poolBackoffEvent", - "serverHeartbeatSucceededEvent", - "serverHeartbeatFailedEvent" + "poolBackoffEvent" ] } }, @@ -270,17 +268,6 @@ } ], "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "singleClient", - "event": { - "serverHeartbeatSucceededEvent": {} - }, - "count": 1 - } - }, { "name": "failPoint", "object": "testRunner", diff --git a/test/unified_format.py b/test/unified_format.py index 3496b2ad44..4c67b9b972 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1384,9 +1384,10 @@ def run_scenario(self, spec, uri=None): for reason, flaky_test in flaky_tests: if re.match(flaky_test.lower(), self.id().lower()) is not None: func_name = self.id() - options = dict(reason=reason, reset_func=self.setUp, func_name=func_name) + options = dict( + reason=reason, reset_func=self.setUp, func_name=func_name, max_runs=3 + ) if "csot" in func_name.lower(): - options["max_runs"] = 3 options["affects_cpython_linux"] = True decorator = flaky(**options) decorator(self._run_scenario)(spec, uri) From 94bc9a35b398a8b43653daad7d5d53c01ae72718 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 17:33:04 -0500 Subject: [PATCH 31/46] fix load balancer test --- test/load_balancer/sdam-error-handling.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 332f1b73f5..97c7e156f3 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -302,7 +302,7 @@ "name": "waitForEvent", "object": "testRunner", "arguments": { - "client": "client", + "client": "singleClient", "event": { "poolBackoffEvent": {} }, From 85d2a6b25f65e81290c5e088ae0aa794feda8733 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Oct 2025 17:39:06 -0500 Subject: [PATCH 32/46] fix load balancer test --- test/load_balancer/sdam-error-handling.json | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 97c7e156f3..5ff0fd73a9 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -323,15 +323,21 @@ "reason": "error" } }, + { + "poolBackoffEvent": { + "attempt": 1 + } + }, { "connectionCheckOutFailedEvent": { "reason": "connectionError" } }, { - "poolBackoffEvent": { - "attempt": 1 - } + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} }, { "connectionCheckedOutEvent": {} From c75ea231332cff727a6f8c16d2ed8e8348678cc8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 06:30:15 -0500 Subject: [PATCH 33/46] clean up tests --- .../unified/backoff-network-error-fail.json | 34 ++++--------------- .../unified/backoff-network-error.json | 4 +-- .../backoff-network-timeout-error.json | 6 ++-- 3 files changed, 11 insertions(+), 33 deletions(-) diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 16b4d327ac..5d02e01d81 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -38,17 +38,6 @@ { "description": "Backoff and fail after network connection errors during connection establishment", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "off" - } - } - }, { "name": "createEntities", "object": "testRunner", @@ -68,7 +57,7 @@ "uriOptions": { "retryWrites": false, "heartbeatFrequencyMS": 100000, - "appname": "authNetworkErrorFailTest" + "appname": "backoffNetworkErrorFailTest" } } }, @@ -113,7 +102,7 @@ "isMaster", "hello" ], - "appName": "authNetworkErrorFailTest", + "appName": "backoffNetworkErrorFailTest", "closeConnection": true } } @@ -152,17 +141,6 @@ { "description": "Backoff and clear the pool after network failures followed by server error", "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "off" - } - } - }, { "name": "createEntities", "object": "testRunner", @@ -182,7 +160,7 @@ "uriOptions": { "retryWrites": false, "heartbeatFrequencyMS": 100000, - "appname": "authNetworkErrorFailTest" + "appname": "backoffNetworkErrorFailClearTest" } } }, @@ -197,7 +175,7 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "backoff-network-error-fail" + "collectionName": "backoff-network-error-fail-clear" } } ] @@ -227,7 +205,7 @@ "isMaster", "hello" ], - "appName": "authNetworkErrorFailTest", + "appName": "backoffNetworkErrorFailClearTest", "closeConnection": true } } @@ -287,7 +265,7 @@ "hello", "isMaster" ], - "appName": "authNetworkErrorFailTest", + "appName": "backoffNetworkErrorFailClearTest", "errorCode": 1 } } diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index 740d0c06a3..78b30ea1b1 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -56,7 +56,7 @@ ], "uriOptions": { "retryWrites": false, - "appname": "authNetworkErrorTest", + "appname": "backoffNetworkErrorTest", "heartbeatFrequencyMS": 100000, "connectTimeoutMS": 250, "socketTimeoutMS": 250 @@ -107,7 +107,7 @@ "hello" ], "closeConnection": true, - "appName": "authNetworkErrorTest" + "appName": "backoffNetworkErrorTest" } } } diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index 1b4ec97838..a6127c8f47 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -4,7 +4,7 @@ "runOnRequirements": [ { "minServerVersion": "4.4", - "auth": true, + "backoff": true, "serverless": "forbid", "topologies": [ "single", @@ -58,7 +58,7 @@ "uriOptions": { "retryWrites": false, "heartbeatFrequencyMS": 100000, - "appname": "authNetworkTimeoutErrorTest", + "appname": "backoffNetworkTimeoutErrorTest", "connectTimeoutMS": 250, "socketTimeoutMS": 250 } @@ -108,7 +108,7 @@ "hello" ], "closeConnection": true, - "appName": "authNetworkTimeoutErrorTest" + "appName": "backoffNetworkTimeoutErrorTest" } } } From b2b4507e01fa1077f1bafa2d6efa1db3fdfb4317 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 07:37:04 -0500 Subject: [PATCH 34/46] try pypy 3.11 --- .evergreen/generated_configs/tasks.yml | 120 ++++++++++---------- .evergreen/scripts/generate_config_utils.py | 2 +- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 65813db1cf..657c4a30ca 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -297,13 +297,13 @@ tasks: vars: PYTHON_VERSION: "3.14" tags: [test-no-orchestration, python-3.14] - - name: test-no-orchestration-pypy3.10 + - name: test-no-orchestration-pypy3.11 commands: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: pypy3.10 - tags: [test-no-orchestration, python-pypy3.10] + PYTHON_VERSION: pypy3.11 + tags: [test-no-orchestration, python-pypy3.11] # No toolchain tests - name: test-no-toolchain-sync-noauth-nossl-standalone @@ -2492,7 +2492,7 @@ tasks: - python-3.14 - standalone-noauth-ssl - async - - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone + - name: test-server-version-pypy3.11-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -2504,11 +2504,11 @@ tasks: AUTH: noauth SSL: nossl TOPOLOGY: standalone - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - sync - pr @@ -2639,7 +2639,7 @@ tasks: - python-3.14 - replica_set-noauth-ssl - sync - - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set + - name: test-server-version-pypy3.11-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -2651,11 +2651,11 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - async - name: test-server-version-python3.9-sync-noauth-nossl-replica-set-cov @@ -2788,7 +2788,7 @@ tasks: - python-3.14 - sharded_cluster-auth-nossl - async - - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster + - name: test-server-version-pypy3.11-sync-noauth-ssl-sharded-cluster commands: - func: run server vars: @@ -2800,11 +2800,11 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-noauth-ssl - sync - name: test-server-version-python3.9-async-noauth-ssl-sharded-cluster-cov @@ -3080,7 +3080,7 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - async - - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster + - name: test-server-version-pypy3.11-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3092,14 +3092,14 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - sync - - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-server-version-pypy3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3111,11 +3111,11 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - async @@ -3186,7 +3186,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - sync - - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster + - name: test-standard-v4.2-pypy3.11-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3200,12 +3200,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - sync - pypy @@ -3319,7 +3319,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - async - - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-standard-v4.4-pypy3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3333,12 +3333,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - async - pypy @@ -3694,7 +3694,7 @@ tasks: - python-3.11 - standalone-noauth-nossl - sync - - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone + - name: test-standard-v7.0-pypy3.11-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3708,12 +3708,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - sync - pypy @@ -3805,7 +3805,7 @@ tasks: - python-3.11 - standalone-noauth-nossl - async - - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone + - name: test-standard-v8.0-pypy3.11-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3819,12 +3819,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - async - pypy @@ -3851,7 +3851,7 @@ tasks: - replica_set-noauth-ssl - async - pr - - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set + - name: test-standard-latest-pypy3.11-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3865,12 +3865,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard - server-latest - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - async - pypy @@ -3965,7 +3965,7 @@ tasks: - python-3.11 - replica_set-noauth-ssl - sync - - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set + - name: test-standard-rapid-pypy3.11-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3979,12 +3979,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - sync - pypy @@ -4563,7 +4563,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pr - - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-v4.2-pypy3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4577,15 +4577,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-4.2 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-v4.4-pypy3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4599,15 +4599,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-4.4 - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4621,15 +4621,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-5.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-v6.0-pypy3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4643,15 +4643,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-6.0 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-v7.0-pypy3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4665,15 +4665,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-7.0 - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4687,15 +4687,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-8.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-rapid-pypy3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4709,15 +4709,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-rapid - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-latest-pypy3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4731,11 +4731,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-latest - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - noauth - pypy diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 632d34ea6f..c5054b6f48 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -23,7 +23,7 @@ ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"] -PYPYS = ["pypy3.10"] +PYPYS = ["pypy3.11"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] BATCHTIME_WEEK = 10080 From 7411be60f9b71c8d65583300bca7ab4f1386521c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 12:58:02 -0500 Subject: [PATCH 35/46] add logic for multiple pending connections --- pymongo/asynchronous/pool.py | 10 ++++++++-- pymongo/synchronous/pool.py | 10 ++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index f21975a790..74aa903596 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1083,6 +1083,10 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A Note that the pool does not keep a reference to the socket -- you must call checkin() when you're done with it. """ + # Mark whether we were in ready state before starting the process, to + # handle the case of multiple pending connections. + was_ready = self.state == PoolState.READY + async with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 @@ -1131,7 +1135,9 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), error=ConnectionClosedReason.ERROR, ) - if context["has_created_socket"]: + if context["has_created_socket"] and not ( + was_ready and self.state == PoolState.BACKOFF + ): await self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) @@ -1158,7 +1164,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A except BaseException as e: async with self.lock: self.active_contexts.discard(conn.cancel_context) - if not has_completed_hello: + if not has_completed_hello and not (was_ready and self.state == PoolState.BACKOFF): await self._handle_connection_error(e, "hello") await conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 8d66177bdc..dff4fb7ac7 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1081,6 +1081,10 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect Note that the pool does not keep a reference to the socket -- you must call checkin() when you're done with it. """ + # Mark whether we were in ready state before starting the process, to + # handle the case of multiple pending connections. + was_ready = self.state == PoolState.READY + with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 @@ -1129,7 +1133,9 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), error=ConnectionClosedReason.ERROR, ) - if context["has_created_socket"]: + if context["has_created_socket"] and not ( + was_ready and self.state == PoolState.BACKOFF + ): self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) @@ -1156,7 +1162,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect except BaseException as e: with self.lock: self.active_contexts.discard(conn.cancel_context) - if not has_completed_hello: + if not has_completed_hello and not (was_ready and self.state == PoolState.BACKOFF): self._handle_connection_error(e, "hello") conn.close_conn(ConnectionClosedReason.ERROR) raise From c2c8d408e3b8fa2157df489717ad78bd281076e5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 16:25:50 -0500 Subject: [PATCH 36/46] fix race condition in tests --- .../unified/backoff-heartbeat-failure.json | 1 + .../unified/backoff-heartbeat-success.json | 5 +++-- .../unified/backoff-network-error-fail.json | 6 ++++-- .../unified/backoff-network-error.json | 3 ++- .../unified/backoff-network-timeout-error.json | 3 ++- 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index 939d3d244a..effcf69b48 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -57,6 +57,7 @@ "uriOptions": { "retryWrites": false, "heartbeatFrequencyMS": 10000, + "serverMonitoringMode": "poll", "appname": "heartbeatBackoffFailTest" } } diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json index d423f0e60d..bd5b95124b 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json @@ -57,7 +57,8 @@ "uriOptions": { "retryWrites": false, "heartbeatFrequencyMS": 5000, - "appname": "heartbeatBackoffFailTest" + "serverMonitoringMode": "poll", + "appname": "heartbeatBackoffSuccessTest" } } }, @@ -102,7 +103,7 @@ "isMaster", "hello" ], - "appName": "heartbeatBackoffFailTest", + "appName": "heartbeatBackoffSuccessTest", "closeConnection": true } } diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 5d02e01d81..e6c89724de 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -56,7 +56,8 @@ ], "uriOptions": { "retryWrites": false, - "heartbeatFrequencyMS": 100000, + "heartbeatFrequencyMS": 10000, + "serverMonitoringMode": "poll", "appname": "backoffNetworkErrorFailTest" } } @@ -159,7 +160,8 @@ ], "uriOptions": { "retryWrites": false, - "heartbeatFrequencyMS": 100000, + "heartbeatFrequencyMS": 10000, + "serverMonitoringMode": "poll", "appname": "backoffNetworkErrorFailClearTest" } } diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index 78b30ea1b1..e6b71d4e13 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -57,7 +57,8 @@ "uriOptions": { "retryWrites": false, "appname": "backoffNetworkErrorTest", - "heartbeatFrequencyMS": 100000, + "heartbeatFrequencyMS": 10000, + "serverMonitoringMode": "poll", "connectTimeoutMS": 250, "socketTimeoutMS": 250 } diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index a6127c8f47..96a35143bc 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -57,8 +57,9 @@ ], "uriOptions": { "retryWrites": false, - "heartbeatFrequencyMS": 100000, + "heartbeatFrequencyMS": 10000, "appname": "backoffNetworkTimeoutErrorTest", + "serverMonitoringMode": "poll", "connectTimeoutMS": 250, "socketTimeoutMS": 250 } From d0aa7c745f2a1e5eca31a4570f033acdbd402afa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 16:26:17 -0500 Subject: [PATCH 37/46] undo change to flaky condition --- test/asynchronous/unified_format.py | 5 ++--- test/unified_format.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 8640c1f86d..09bf7e83ea 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -1397,10 +1397,9 @@ async def run_scenario(self, spec, uri=None): for reason, flaky_test in flaky_tests: if re.match(flaky_test.lower(), self.id().lower()) is not None: func_name = self.id() - options = dict( - reason=reason, reset_func=self.asyncSetUp, func_name=func_name, max_runs=3 - ) + options = dict(reason=reason, reset_func=self.asyncSetUp, func_name=func_name) if "csot" in func_name.lower(): + options["max_runs"] = 3 options["affects_cpython_linux"] = True decorator = flaky(**options) await decorator(self._run_scenario)(spec, uri) diff --git a/test/unified_format.py b/test/unified_format.py index 4c67b9b972..3496b2ad44 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1384,10 +1384,9 @@ def run_scenario(self, spec, uri=None): for reason, flaky_test in flaky_tests: if re.match(flaky_test.lower(), self.id().lower()) is not None: func_name = self.id() - options = dict( - reason=reason, reset_func=self.setUp, func_name=func_name, max_runs=3 - ) + options = dict(reason=reason, reset_func=self.setUp, func_name=func_name) if "csot" in func_name.lower(): + options["max_runs"] = 3 options["affects_cpython_linux"] = True decorator = flaky(**options) decorator(self._run_scenario)(spec, uri) From 65eb2dcebfa33f4d54e55767f01c8b8acad3f844 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 21:34:43 -0500 Subject: [PATCH 38/46] fix test format --- .../unified/backoff-network-timeout-error.json | 1 - 1 file changed, 1 deletion(-) diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index 96a35143bc..cdb67a7194 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -4,7 +4,6 @@ "runOnRequirements": [ { "minServerVersion": "4.4", - "backoff": true, "serverless": "forbid", "topologies": [ "single", From 063238dc54027082b0efcc8fda580539d2007552 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 21:49:48 -0500 Subject: [PATCH 39/46] update schema version --- .../unified/backoff-heartbeat-failure.json | 2 +- .../unified/backoff-heartbeat-success.json | 2 +- .../unified/backoff-network-error-fail.json | 2 +- .../discovery_and_monitoring/unified/backoff-network-error.json | 2 +- .../unified/backoff-network-timeout-error.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index effcf69b48..2d4e0471f9 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -1,6 +1,6 @@ { "description": "heartbeat-failure-clears-backoff-pool", - "schemaVersion": "1.4", + "schemaVersion": "1.28", "runOnRequirements": [ { "minServerVersion": "4.4", diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json index bd5b95124b..4f790007b5 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json @@ -1,6 +1,6 @@ { "description": "heartbeat-success-backoff", - "schemaVersion": "1.4", + "schemaVersion": "1.28", "runOnRequirements": [ { "minServerVersion": "4.4", diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index e6c89724de..c128198415 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -1,6 +1,6 @@ { "description": "backoff-network-error-fail", - "schemaVersion": "1.4", + "schemaVersion": "1.28", "runOnRequirements": [ { "minServerVersion": "4.4", diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index e6b71d4e13..a3f62fd674 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -1,6 +1,6 @@ { "description": "backoff-network-error", - "schemaVersion": "1.4", + "schemaVersion": "1.28", "runOnRequirements": [ { "minServerVersion": "4.4", diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index cdb67a7194..87c50bc2f2 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -1,6 +1,6 @@ { "description": "backoff-network-timeout-error", - "schemaVersion": "1.4", + "schemaVersion": "1.28", "runOnRequirements": [ { "minServerVersion": "4.4", From 0e9c29a4b3ab0e8c009fb286833545980b2c14d3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 21:53:37 -0500 Subject: [PATCH 40/46] formatting --- test/connection_logging/connection-logging.json | 6 +++--- .../unified/backoff-network-error.json | 2 +- test/load_balancer/sdam-error-handling.json | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 207618b4a6..303ac80784 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -711,10 +711,10 @@ "$$type": "string" }, "durationMS": { - "$$type": "int" + "$$type": "int" }, "attempt": { - "$$type": "int" + "$$type": "int" }, "serverPort": { "$$type": [ @@ -793,7 +793,7 @@ } } }, -{ + { "level": "debug", "component": "connection", "data": { diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index a3f62fd674..b3b230ca6d 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -150,7 +150,7 @@ "attempt": 2 } }, - "count": 1 + "count": 1 } } ], diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 5ff0fd73a9..45b969d8d4 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -537,4 +537,4 @@ ] } ] -} \ No newline at end of file +} From a2aec86ab871b9de5f2e0cc049e0fb124bf0c632 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Oct 2025 22:00:21 -0500 Subject: [PATCH 41/46] update tests --- test/connection_logging/connection-logging.json | 1 - test/load_balancer/sdam-error-handling.json | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 303ac80784..f9f34ae95b 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -523,7 +523,6 @@ "description": "Connection enters backoff on closed connection", "runOnRequirements": [ { - "auth": true, "minServerVersion": "4.4" } ], diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 45b969d8d4..7654eff6fb 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -1,6 +1,6 @@ { "description": "state change errors are correctly handled", - "schemaVersion": "1.4", + "schemaVersion": "1.28", "runOnRequirements": [ { "topologies": [ From fd6359768631cf3abc6421ba54692079943d40df Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 30 Oct 2025 07:28:08 -0500 Subject: [PATCH 42/46] fix supported schema version --- test/asynchronous/unified_format.py | 2 +- test/unified_format.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 09bf7e83ea..94ae84953a 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -430,7 +430,7 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.22") + SCHEMA_VERSION = Version.from_string("1.28") RUN_ON_LOAD_BALANCER = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes diff --git a/test/unified_format.py b/test/unified_format.py index 3496b2ad44..630a525c14 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -429,7 +429,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.22") + SCHEMA_VERSION = Version.from_string("1.28") RUN_ON_LOAD_BALANCER = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes From 09647f0f0975b0eeb6530d0fd45d7d9bf4e88603 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 31 Oct 2025 13:02:10 -0500 Subject: [PATCH 43/46] address review --- pymongo/asynchronous/mongo_client.py | 6 +- pymongo/asynchronous/pool.py | 13 ++-- pymongo/synchronous/mongo_client.py | 6 +- pymongo/synchronous/pool.py | 13 ++-- .../unified/backoff-heartbeat-failure.json | 38 +++--------- .../unified/backoff-heartbeat-success.json | 37 ++++------- .../unified/backoff-network-error-fail.json | 21 ++++++- .../unified/backoff-network-error.json | 61 +------------------ .../backoff-network-timeout-error.json | 52 ++-------------- 9 files changed, 66 insertions(+), 181 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 20ed199b20..aa8adc0037 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2802,7 +2802,7 @@ async def run(self) -> T: if isinstance(exc, (ConnectionFailure, OperationFailure)): # ConnectionFailures do not supply a code property exc_code = getattr(exc, "code", None) - always_retryable = exc.has_error_label("RetryableError") + always_retryable = exc.has_error_label("RetryableError") and self._retryable overloaded = exc.has_error_label("SystemOverloadedError") if not always_retryable and ( self._is_not_eligible_for_retry() @@ -2825,7 +2825,9 @@ async def run(self) -> T: ): exc_to_check = exc.error retryable_write_label = exc_to_check.has_error_label("RetryableWriteError") - always_retryable = exc_to_check.has_error_label("RetryableError") + always_retryable = ( + exc_to_check.has_error_label("RetryableError") and self._retryable + ) overloaded = exc_to_check.has_error_label("SystemOverloadedError") if not self._retryable and not always_retryable: raise diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 74aa903596..8d1630d9b2 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1054,6 +1054,9 @@ async def backoff(self) -> None: backoff_duration_ms = int(backoff_duration_sec * 1000) if self.state != PoolState.BACKOFF: self.state = PoolState.BACKOFF + # Cancel other pending connections. + for context in self.active_contexts: + context.cancel() if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_backoff( @@ -1083,10 +1086,6 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A Note that the pool does not keep a reference to the socket -- you must call checkin() when you're done with it. """ - # Mark whether we were in ready state before starting the process, to - # handle the case of multiple pending connections. - was_ready = self.state == PoolState.READY - async with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 @@ -1135,9 +1134,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), error=ConnectionClosedReason.ERROR, ) - if context["has_created_socket"] and not ( - was_ready and self.state == PoolState.BACKOFF - ): + if context["has_created_socket"]: await self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) @@ -1164,7 +1161,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A except BaseException as e: async with self.lock: self.active_contexts.discard(conn.cancel_context) - if not has_completed_hello and not (was_ready and self.state == PoolState.BACKOFF): + if not has_completed_hello: await self._handle_connection_error(e, "hello") await conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index e4a6003c1c..addd3414c3 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2792,7 +2792,7 @@ def run(self) -> T: if isinstance(exc, (ConnectionFailure, OperationFailure)): # ConnectionFailures do not supply a code property exc_code = getattr(exc, "code", None) - always_retryable = exc.has_error_label("RetryableError") + always_retryable = exc.has_error_label("RetryableError") and self._retryable overloaded = exc.has_error_label("SystemOverloadedError") if not always_retryable and ( self._is_not_eligible_for_retry() @@ -2815,7 +2815,9 @@ def run(self) -> T: ): exc_to_check = exc.error retryable_write_label = exc_to_check.has_error_label("RetryableWriteError") - always_retryable = exc_to_check.has_error_label("RetryableError") + always_retryable = ( + exc_to_check.has_error_label("RetryableError") and self._retryable + ) overloaded = exc_to_check.has_error_label("SystemOverloadedError") if not self._retryable and not always_retryable: raise diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index dff4fb7ac7..d05557f907 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1052,6 +1052,9 @@ def backoff(self) -> None: backoff_duration_ms = int(backoff_duration_sec * 1000) if self.state != PoolState.BACKOFF: self.state = PoolState.BACKOFF + # Cancel other pending connections. + for context in self.active_contexts: + context.cancel() if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_backoff( @@ -1081,10 +1084,6 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect Note that the pool does not keep a reference to the socket -- you must call checkin() when you're done with it. """ - # Mark whether we were in ready state before starting the process, to - # handle the case of multiple pending connections. - was_ready = self.state == PoolState.READY - with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 @@ -1133,9 +1132,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), error=ConnectionClosedReason.ERROR, ) - if context["has_created_socket"] and not ( - was_ready and self.state == PoolState.BACKOFF - ): + if context["has_created_socket"]: self._handle_connection_error(error, "handshake") if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) @@ -1162,7 +1159,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect except BaseException as e: with self.lock: self.active_contexts.discard(conn.cancel_context) - if not has_completed_hello and not (was_ready and self.state == PoolState.BACKOFF): + if not has_completed_hello: self._handle_connection_error(e, "hello") conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index 2d4e0471f9..b2a98fa4f6 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -132,39 +132,21 @@ "arguments": { "client": "client", "event": { - "serverHeartbeatFailedEvent": {} + "poolBackoffEvent": {} }, "count": 1 } - } - ], - "expectEvents": [ + }, { - "client": "client", - "eventType": "cmap", - "events": [ - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} }, - { - "poolClearedEvent": {} - } - ] + "count": 1 + } } ] } diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json index 4f790007b5..ef07ab5d90 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json @@ -126,6 +126,17 @@ "isError": true } }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": {} + }, + "count": 1 + } + }, { "name": "failPoint", "object": "testRunner", @@ -148,32 +159,6 @@ "count": 1 } } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "cmap", - "events": [ - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - }, - { - "poolBackoffEvent": {} - } - ] - } ] } ] diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index c128198415..43d84ad8f7 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -126,6 +126,23 @@ "isError": true } }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, { "name": "waitForEvent", "object": "testRunner", @@ -134,7 +151,7 @@ "event": { "poolBackoffEvent": {} }, - "count": 5 + "count": 1 } } ] @@ -238,7 +255,7 @@ "event": { "poolBackoffEvent": {} }, - "count": 5 + "count": 1 } }, { diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index b3b230ca6d..6da5973eaa 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -125,6 +125,9 @@ "_id": 4 } ] + }, + "expectError": { + "isError": true } }, { @@ -139,64 +142,6 @@ }, "count": 1 } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolBackoffEvent": { - "attempt": 2 - } - }, - "count": 1 - } - } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "command", - "events": [ - { - "commandStartedEvent": { - "command": { - "insert": "backoff-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "commandName": "insert", - "databaseName": "sdam-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "backoff-network-error", - "databaseName": "sdam-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] } ] } diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index 87c50bc2f2..ee9daf224c 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -100,7 +100,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 1 }, "data": { "failCommands": [ @@ -125,6 +125,9 @@ "_id": 4 } ] + }, + "expectError": { + "isError": true } }, { @@ -135,7 +138,7 @@ "event": { "poolBackoffEvent": {} }, - "count": 2 + "count": 1 } }, { @@ -149,51 +152,6 @@ "count": 1 } } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "command", - "events": [ - { - "commandStartedEvent": { - "command": { - "insert": "backoff-network-timeout-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "commandName": "insert", - "databaseName": "sdam-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "backoff-network-timeout-error", - "databaseName": "sdam-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } ] } ] From 1e6973b41b61c601f3ebac3361fd6756f9fa9b9c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 31 Oct 2025 15:58:27 -0500 Subject: [PATCH 44/46] add error label check and add runOnRequirement --- test/asynchronous/unified_format.py | 6 ++++++ .../unified/backoff-heartbeat-failure.json | 7 ++++++- .../unified/backoff-heartbeat-success.json | 7 ++++++- .../unified/backoff-network-error-fail.json | 13 +++++++++++-- .../unified/backoff-network-error.json | 7 ++++++- .../unified/backoff-network-timeout-error.json | 7 ++++++- test/unified_format.py | 6 ++++++ 7 files changed, 47 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 94ae84953a..c51d1ac814 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -157,6 +157,11 @@ async def is_run_on_requirement_satisfied(requirement): min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + pool_backoff_statisfied = True + req_pool_backoff = requirement.get("supportsPoolBackoff") + if req_pool_backoff is False: + pool_backoff_statisfied = False + return ( topology_satisfied and min_version_satisfied @@ -164,6 +169,7 @@ async def is_run_on_requirement_satisfied(requirement): and params_satisfied and auth_satisfied and csfle_satisfied + and pool_backoff_statisfied ) diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index b2a98fa4f6..2e93054149 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -5,6 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", + "supportsPoolBackoff": true, "topologies": [ "single", "replicaset", @@ -123,7 +124,11 @@ ] }, "expectError": { - "isError": true + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] } }, { diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json index ef07ab5d90..eb8576ddba 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json @@ -5,6 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", + "supportsPoolBackoff": true, "topologies": [ "single", "replicaset", @@ -123,7 +124,11 @@ ] }, "expectError": { - "isError": true + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] } }, { diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 43d84ad8f7..97cd223300 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -5,6 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", + "supportsPoolBackoff": true, "topologies": [ "single", "replicaset", @@ -123,7 +124,11 @@ ] }, "expectError": { - "isError": true + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] } }, { @@ -244,7 +249,11 @@ ] }, "expectError": { - "isError": true + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] } }, { diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index 6da5973eaa..74f6b2c6e8 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -5,6 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", + "supportsPoolBackoff": true, "topologies": [ "single", "replicaset", @@ -127,7 +128,11 @@ ] }, "expectError": { - "isError": true + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] } }, { diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index ee9daf224c..d5ee64b91b 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -5,6 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", + "supportsPoolBackoff": true, "topologies": [ "single", "replicaset", @@ -127,7 +128,11 @@ ] }, "expectError": { - "isError": true + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] } }, { diff --git a/test/unified_format.py b/test/unified_format.py index 630a525c14..578c6c98ce 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -156,6 +156,11 @@ def is_run_on_requirement_satisfied(requirement): min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + pool_backoff_statisfied = True + req_pool_backoff = requirement.get("supportsPoolBackoff") + if req_pool_backoff is False: + pool_backoff_statisfied = False + return ( topology_satisfied and min_version_satisfied @@ -163,6 +168,7 @@ def is_run_on_requirement_satisfied(requirement): and params_satisfied and auth_satisfied and csfle_satisfied + and pool_backoff_statisfied ) From d72f279b9fe6eb85a8018f6b82c73bc47e082833 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Nov 2025 08:10:45 -0600 Subject: [PATCH 45/46] update retry behavior and tests --- pymongo/asynchronous/mongo_client.py | 8 +- pymongo/synchronous/mongo_client.py | 8 +- .../test_connection_monitoring.py | 19 + test/asynchronous/unified_format.py | 2 +- .../connection-logging.json | 3 +- ...koff-interrupting-pending-connections.json | 76 +++ ...ol-create-min-size-error-with-backoff.json | 65 +++ .../pool-create-min-size-error.json | 17 +- .../unified/backoff-heartbeat-failure.json | 6 +- .../unified/backoff-heartbeat-success.json | 170 ------ .../unified/backoff-network-error-fail.json | 10 +- .../unified/backoff-network-error.json | 36 +- .../backoff-network-timeout-error.json | 33 +- .../sdam-error-handling-pool-backoff.json | 541 ++++++++++++++++++ test/load_balancer/sdam-error-handling.json | 47 +- test/test_connection_monitoring.py | 19 + test/unified_format.py | 2 +- 17 files changed, 832 insertions(+), 230 deletions(-) create mode 100644 test/connection_monitoring/pool-backoff-interrupting-pending-connections.json create mode 100644 test/connection_monitoring/pool-create-min-size-error-with-backoff.json delete mode 100644 test/discovery_and_monitoring/unified/backoff-heartbeat-success.json create mode 100644 test/load_balancer/sdam-error-handling-pool-backoff.json diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index aa8adc0037..23b680ed69 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2802,7 +2802,10 @@ async def run(self) -> T: if isinstance(exc, (ConnectionFailure, OperationFailure)): # ConnectionFailures do not supply a code property exc_code = getattr(exc, "code", None) - always_retryable = exc.has_error_label("RetryableError") and self._retryable + always_retryable = ( + exc.has_error_label("RetryableError") + and self._client._options.retry_reads + ) overloaded = exc.has_error_label("SystemOverloadedError") if not always_retryable and ( self._is_not_eligible_for_retry() @@ -2826,7 +2829,8 @@ async def run(self) -> T: exc_to_check = exc.error retryable_write_label = exc_to_check.has_error_label("RetryableWriteError") always_retryable = ( - exc_to_check.has_error_label("RetryableError") and self._retryable + exc_to_check.has_error_label("RetryableError") + and self._client._options.retry_writes ) overloaded = exc_to_check.has_error_label("SystemOverloadedError") if not self._retryable and not always_retryable: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index addd3414c3..96fd41d642 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2792,7 +2792,10 @@ def run(self) -> T: if isinstance(exc, (ConnectionFailure, OperationFailure)): # ConnectionFailures do not supply a code property exc_code = getattr(exc, "code", None) - always_retryable = exc.has_error_label("RetryableError") and self._retryable + always_retryable = ( + exc.has_error_label("RetryableError") + and self._client._options.retry_reads + ) overloaded = exc.has_error_label("SystemOverloadedError") if not always_retryable and ( self._is_not_eligible_for_retry() @@ -2816,7 +2819,8 @@ def run(self) -> T: exc_to_check = exc.error retryable_write_label = exc_to_check.has_error_label("RetryableWriteError") always_retryable = ( - exc_to_check.has_error_label("RetryableError") and self._retryable + exc_to_check.has_error_label("RetryableError") + and self._client._options.retry_writes ) overloaded = exc_to_check.has_error_label("SystemOverloadedError") if not self._retryable and not always_retryable: diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py index f2502a7d54..e765fd68fa 100644 --- a/test/asynchronous/test_connection_monitoring.py +++ b/test/asynchronous/test_connection_monitoring.py @@ -32,6 +32,7 @@ async_wait_until, camel_to_snake, ) +from test.version import Version from bson.objectid import ObjectId from bson.son import SON @@ -145,6 +146,10 @@ async def ready(self, op): """Run the 'ready' operation.""" await self.pool.ready() + async def backoff(self, op): + """Run the 'backoff' operation.""" + await self.pool.backoff() + async def clear(self, op): """Run the 'clear' operation.""" if "interruptInUseConnections" in op: @@ -224,6 +229,20 @@ async def run_scenario(self, scenario_def, test): self.listener = CMAPListener() self._ops: list = [] + if "runOn" in test: + for run_reqs in test["runOn"]: + if "minServerVersion" in run_reqs: + other_version = Version.from_string(run_reqs["minServerVersion"]) + if async_client_context.version < other_version: + self.skipTest(f"Server version must be at least {other_version}") + if "maxServerVersion" in run_reqs: + other_version = Version.from_string(run_reqs["maxServerVersion"]) + if async_client_context.version > other_version: + self.skipTest(f"Server version must be at most {other_version}") + if "poolBackoff" in run_reqs: + if run_reqs["poolBackoff"] is False: + self.skipTest("We support poolBackoff") + # Configure the fail point before creating the client. if "failPoint" in test: fp = test["failPoint"] diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index c51d1ac814..2eb4f665db 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -158,7 +158,7 @@ async def is_run_on_requirement_satisfied(requirement): csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied pool_backoff_statisfied = True - req_pool_backoff = requirement.get("supportsPoolBackoff") + req_pool_backoff = requirement.get("poolBackoff") if req_pool_backoff is False: pool_backoff_statisfied = False diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index f9f34ae95b..23e8ba60b0 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -523,7 +523,8 @@ "description": "Connection enters backoff on closed connection", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "poolBackoff": true } ], "operations": [ diff --git a/test/connection_monitoring/pool-backoff-interrupting-pending-connections.json b/test/connection_monitoring/pool-backoff-interrupting-pending-connections.json new file mode 100644 index 0000000000..808f51838c --- /dev/null +++ b/test/connection_monitoring/pool-backoff-interrupting-pending-connections.json @@ -0,0 +1,76 @@ +{ + "version": 1, + "style": "integration", + "description": "backoff closes pending connections", + "runOn": [ + { + "minServerVersion": "4.9.0", + "poolBackoff": true + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 10000 + } + }, + "poolOptions": { + "minPoolSize": 0 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "backoff" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionPoolBackoff" + }, + { + "type": "ConnectionClosed" + }, + { + "type": "ConnectionCheckOutFailed" + } + ], + "ignore": [ + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_monitoring/pool-create-min-size-error-with-backoff.json b/test/connection_monitoring/pool-create-min-size-error-with-backoff.json new file mode 100644 index 0000000000..a9debb8d38 --- /dev/null +++ b/test/connection_monitoring/pool-create-min-size-error-with-backoff.json @@ -0,0 +1,65 @@ +{ + "version": 1, + "style": "integration", + "description": "error during minPoolSize population clears pool", + "runOn": [ + { + "minServerVersion": "4.9.0", + "poolBackoff": true + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "errorCode": 18, + "appName": "poolCreateMinSizeErrorTest" + } + }, + "poolOptions": { + "minPoolSize": 1, + "backgroundThreadIntervalMS": 50, + "appName": "poolCreateMinSizeErrorTest" + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionPoolCleared", + "count": 1 + }, + { + "name": "wait", + "ms": 200 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/connection_monitoring/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json index da9357b963..b1bca0c3fd 100644 --- a/test/connection_monitoring/pool-create-min-size-error.json +++ b/test/connection_monitoring/pool-create-min-size-error.json @@ -4,18 +4,21 @@ "description": "error during minPoolSize population clears pool", "runOn": [ { - "minServerVersion": "4.9.0" + "minServerVersion": "4.9.0", + "poolBackoff": false } ], "failPoint": { "configureFailPoint": "failCommand", - "mode": "alwaysOn", + "mode": { + "times": 50 + }, "data": { "failCommands": [ "isMaster", "hello" ], - "errorCode": 18, + "closeConnection": true, "appName": "poolCreateMinSizeErrorTest" } }, @@ -47,15 +50,15 @@ "type": "ConnectionCreated", "address": 42 }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, { "type": "ConnectionClosed", "address": 42, "connectionId": 42, "reason": "error" - }, - { - "type": "ConnectionPoolCleared", - "address": 42 } ], "ignore": [ diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json index 2e93054149..aa253edc71 100644 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json +++ b/test/discovery_and_monitoring/unified/backoff-heartbeat-failure.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", - "supportsPoolBackoff": true, + "poolBackoff": true, "topologies": [ "single", "replicaset", @@ -137,7 +137,9 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "poolBackoffEvent": { + "attempt": 1 + } }, "count": 1 } diff --git a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json b/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json deleted file mode 100644 index eb8576ddba..0000000000 --- a/test/discovery_and_monitoring/unified/backoff-heartbeat-success.json +++ /dev/null @@ -1,170 +0,0 @@ -{ - "description": "heartbeat-success-backoff", - "schemaVersion": "1.28", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "serverless": "forbid", - "supportsPoolBackoff": true, - "topologies": [ - "single", - "replicaset", - "sharded" - ] - } - ], - "createEntities": [ - { - "client": { - "id": "setupClient", - "useMultipleMongoses": false - } - } - ], - "initialData": [ - { - "collectionName": "heartbeat-backoff-success", - "databaseName": "sdam-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - ], - "tests": [ - { - "description": "A heartbeat success during backoff not mark the pool as ready", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "client": { - "id": "client", - "useMultipleMongoses": false, - "observeEvents": [ - "commandStartedEvent", - "poolBackoffEvent", - "poolClearedEvent", - "serverHeartbeatFailedEvent", - "serverHeartbeatSucceededEvent" - ], - "uriOptions": { - "retryWrites": false, - "heartbeatFrequencyMS": 5000, - "serverMonitoringMode": "poll", - "appname": "heartbeatBackoffSuccessTest" - } - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "sdam-tests" - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "heartbeat-backoff-success" - } - } - ] - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "serverHeartbeatSucceededEvent": {} - }, - "count": 1 - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "alwaysOn", - "data": { - "failCommands": [ - "isMaster", - "hello" - ], - "appName": "heartbeatBackoffSuccessTest", - "closeConnection": true - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "expectError": { - "isError": true, - "errorLabelsContain": [ - "SystemOverloadedError", - "RetryableError" - ] - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolBackoffEvent": {} - }, - "count": 1 - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "off" - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "serverHeartbeatSucceededEvent": {} - }, - "count": 1 - } - } - ] - } - ] -} diff --git a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json index 97cd223300..648008e944 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error-fail.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error-fail.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", - "supportsPoolBackoff": true, + "poolBackoff": true, "topologies": [ "single", "replicaset", @@ -154,7 +154,9 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "poolBackoffEvent": { + "attempt": 1 + } }, "count": 1 } @@ -262,7 +264,9 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "poolBackoffEvent": { + "attempt": 1 + } }, "count": 1 } diff --git a/test/discovery_and_monitoring/unified/backoff-network-error.json b/test/discovery_and_monitoring/unified/backoff-network-error.json index 74f6b2c6e8..3101451d8b 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-error.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", - "supportsPoolBackoff": true, + "poolBackoff": true, "topologies": [ "single", "replicaset", @@ -147,6 +147,40 @@ }, "count": 1 } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolBackoffEvent": { + "attempt": 2 + } + }, + "count": 1 + } } ] } diff --git a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json index d5ee64b91b..7969e4b908 100644 --- a/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json +++ b/test/discovery_and_monitoring/unified/backoff-network-timeout-error.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.4", "serverless": "forbid", - "supportsPoolBackoff": true, + "poolBackoff": true, "topologies": [ "single", "replicaset", @@ -101,7 +101,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ @@ -141,18 +141,43 @@ "arguments": { "client": "client", "event": { - "poolBackoffEvent": {} + "poolBackoffEvent": { + "attempt": 1 + } }, "count": 1 } }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] + } + }, { "name": "waitForEvent", "object": "testRunner", "arguments": { "client": "client", "event": { - "poolReadyEvent": {} + "poolBackoffEvent": { + "attempt": 2 + } }, "count": 1 } diff --git a/test/load_balancer/sdam-error-handling-pool-backoff.json b/test/load_balancer/sdam-error-handling-pool-backoff.json new file mode 100644 index 0000000000..fa04b03bfb --- /dev/null +++ b/test/load_balancer/sdam-error-handling-pool-backoff.json @@ -0,0 +1,541 @@ +{ + "description": "state change errors are correctly handled", + "schemaVersion": "1.28", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "poolBackoff": true + } + ], + "_yamlAnchors": { + "observedEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + }, + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "singleClient", + "useMultipleMongoses": false, + "uriOptions": { + "appname": "lbSDAMErrorTestClient", + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent", + "poolBackoffEvent" + ] + } + }, + { + "database": { + "id": "singleDB", + "client": "singleClient", + "databaseName": "singleDB" + } + }, + { + "collection": { + "id": "singleColl", + "database": "singleDB", + "collectionName": "singleColl" + } + }, + { + "client": { + "id": "multiClient", + "useMultipleMongoses": true, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "multiDB", + "client": "multiClient", + "databaseName": "multiDB" + } + }, + { + "collection": { + "id": "multiColl", + "database": "multiDB", + "collectionName": "multiColl" + } + } + ], + "initialData": [ + { + "collectionName": "singleColl", + "databaseName": "singleDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "multiColl", + "databaseName": "multiDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "only connections for a specific serviceId are closed when pools are cleared", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "close", + "object": "cursor1" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "multiClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 11600 + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "multiClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "stale" + } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "errors during the initial connection hello trigger backoff", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "singleClient", + "event": { + "poolBackoffEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "poolBackoffEvent": { + "attempt": 1 + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "errors during authentication are processed", + "runOnRequirements": [ + { + "auth": true + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + }, + { + "poolClearedEvent": {} + } + ] + } + ] + }, + { + "description": "stale errors are ignored", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor1" + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 7654eff6fb..7c471abbda 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -1,11 +1,12 @@ { "description": "state change errors are correctly handled", - "schemaVersion": "1.28", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ "load-balanced" - ] + ], + "poolBackoff": false } ], "_yamlAnchors": { @@ -41,8 +42,7 @@ "connectionCheckOutFailedEvent", "connectionCheckedInEvent", "connectionClosedEvent", - "poolClearedEvent", - "poolBackoffEvent" + "poolClearedEvent" ] } }, @@ -261,7 +261,7 @@ ] }, { - "description": "errors during the initial connection hello trigger backoff", + "description": "errors during the initial connection hello are ignored", "runOnRequirements": [ { "minServerVersion": "4.4.7" @@ -296,17 +296,9 @@ "document": { "x": 1 } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "singleClient", - "event": { - "poolBackoffEvent": {} - }, - "count": 1 + }, + "expectError": { + "isClientError": true } } ], @@ -323,27 +315,10 @@ "reason": "error" } }, - { - "poolBackoffEvent": { - "attempt": 1 - } - }, { "connectionCheckOutFailedEvent": { "reason": "connectionError" } - }, - { - "connectionCreatedEvent": {} - }, - { - "connectionReadyEvent": {} - }, - { - "connectionCheckedOutEvent": {} - }, - { - "connectionCheckedInEvent": {} } ] } @@ -398,6 +373,9 @@ { "connectionCreatedEvent": {} }, + { + "poolClearedEvent": {} + }, { "connectionClosedEvent": { "reason": "error" @@ -407,9 +385,6 @@ "connectionCheckOutFailedEvent": { "reason": "connectionError" } - }, - { - "poolClearedEvent": {} } ] } diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 580d214541..127b2bb140 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -32,6 +32,7 @@ wait_until, ) from test.utils_spec_runner import SpecRunnerThread, SpecTestCreator +from test.version import Version from bson.objectid import ObjectId from bson.son import SON @@ -145,6 +146,10 @@ def ready(self, op): """Run the 'ready' operation.""" self.pool.ready() + def backoff(self, op): + """Run the 'backoff' operation.""" + self.pool.backoff() + def clear(self, op): """Run the 'clear' operation.""" if "interruptInUseConnections" in op: @@ -224,6 +229,20 @@ def run_scenario(self, scenario_def, test): self.listener = CMAPListener() self._ops: list = [] + if "runOn" in test: + for run_reqs in test["runOn"]: + if "minServerVersion" in run_reqs: + other_version = Version.from_string(run_reqs["minServerVersion"]) + if client_context.version < other_version: + self.skipTest(f"Server version must be at least {other_version}") + if "maxServerVersion" in run_reqs: + other_version = Version.from_string(run_reqs["maxServerVersion"]) + if client_context.version > other_version: + self.skipTest(f"Server version must be at most {other_version}") + if "poolBackoff" in run_reqs: + if run_reqs["poolBackoff"] is False: + self.skipTest("We support poolBackoff") + # Configure the fail point before creating the client. if "failPoint" in test: fp = test["failPoint"] diff --git a/test/unified_format.py b/test/unified_format.py index 578c6c98ce..b715d18d63 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -157,7 +157,7 @@ def is_run_on_requirement_satisfied(requirement): csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied pool_backoff_statisfied = True - req_pool_backoff = requirement.get("supportsPoolBackoff") + req_pool_backoff = requirement.get("poolBackoff") if req_pool_backoff is False: pool_backoff_statisfied = False From 476c373903c12ec53b4dfa2616c819b5f57897a6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Nov 2025 08:41:39 -0600 Subject: [PATCH 46/46] add connection-logging-pool-backoff tests --- .../connection-logging-pool-backoff.json | 379 ++++++++++++++++++ .../connection-logging.json | 364 +---------------- 2 files changed, 382 insertions(+), 361 deletions(-) create mode 100644 test/connection_logging/connection-logging-pool-backoff.json diff --git a/test/connection_logging/connection-logging-pool-backoff.json b/test/connection_logging/connection-logging-pool-backoff.json new file mode 100644 index 0000000000..6dcad2824d --- /dev/null +++ b/test/connection_logging/connection-logging-pool-backoff.json @@ -0,0 +1,379 @@ +{ + "description": "connection-logging", + "schemaVersion": "1.28", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "poolBackoff": true + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "Connection enters backoff on closed connection", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "poolBackoff": true + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryReads": true, + "appname": "clientAppName", + "heartbeatFrequencyMS": 5000 + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ], + "observeLogMessages": { + "connection": "debug" + } + } + }, + { + "database": { + "id": "database0", + "client": "client", + "databaseName": "ci-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "clientAppName" + } + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "command": { + "find": "test" + }, + "commandName": "find" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while using the connection", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool backoff", + "serverHost": { + "$$type": "string" + }, + "durationMS": { + "$$type": "int" + }, + "attempt": { + "$$type": "int" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "Connection pool is in backoff", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout failed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while trying to establish a new connection", + "error": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 23e8ba60b0..3482f59777 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -355,7 +355,7 @@ "failCommands": [ "saslContinue" ], - "errorCode": 18, + "closeConnection": true, "appName": "clientAppName" } } @@ -368,7 +368,7 @@ "filter": {} }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -446,59 +446,6 @@ } } }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection closed", - "driverConnectionId": { - "$$type": [ - "int", - "long" - ] - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - }, - "reason": "An error occurred while using the connection", - "error": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection checkout failed", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - }, - "reason": "An error occurred while trying to establish a new connection", - "error": { - "$$exists": true - }, - "durationMS": { - "$$type": [ - "double", - "int", - "long" - ] - } - } - }, { "level": "debug", "component": "connection", @@ -514,167 +461,6 @@ ] } } - } - ] - } - ] - }, - { - "description": "Connection enters backoff on closed connection", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "poolBackoff": true - } - ], - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "client": { - "id": "client", - "uriOptions": { - "retryReads": true, - "appname": "clientAppName", - "heartbeatFrequencyMS": 5000 - }, - "observeEvents": [ - "serverHeartbeatSucceededEvent" - ], - "observeLogMessages": { - "connection": "debug" - } - } - }, - { - "database": { - "id": "database0", - "client": "client", - "databaseName": "ci-tests" - } - } - ] - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "serverHeartbeatSucceededEvent": {} - }, - "count": 1 - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "isMaster", - "hello" - ], - "closeConnection": true, - "appName": "clientAppName" - } - } - } - }, - { - "object": "database0", - "name": "runCommand", - "arguments": { - "command": { - "find": "test" - }, - "commandName": "find" - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection pool created", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection pool ready", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection checkout started", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection created", - "driverConnectionId": { - "$$type": [ - "int", - "long" - ] - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } }, { "level": "debug", @@ -702,32 +488,6 @@ } } }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection pool backoff", - "serverHost": { - "$$type": "string" - }, - "durationMS": { - "$$type": "int" - }, - "attempt": { - "$$type": "int" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - }, - "reason": "Connection pool is in backoff", - "error": { - "$$exists": true - } - } - }, { "level": "debug", "component": "connection", @@ -754,128 +514,10 @@ ] } } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection checkout started", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection created", - "driverConnectionId": { - "$$type": [ - "int", - "long" - ] - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection ready", - "driverConnectionId": { - "$$type": [ - "int", - "long" - ] - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - }, - "durationMS": { - "$$type": [ - "double", - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection checked out", - "driverConnectionId": { - "$$type": [ - "int", - "long" - ] - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - }, - "durationMS": { - "$$type": [ - "double", - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection checked in", - "driverConnectionId": { - "$$type": [ - "int", - "long" - ] - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } } ] } ] } ] -} +} \ No newline at end of file