From 30c839bac746956881aea7b112e490791e4f88bf Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 16 Mar 2023 10:46:24 +0100 Subject: [PATCH 001/146] fix `hasOverflow` for multiple streams (#370) Instead of repeatedly checking stream 1, properly check all of them. --- chronos/apps/http/httpbodyrw.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/apps/http/httpbodyrw.nim b/chronos/apps/http/httpbodyrw.nim index b0cda668d..ef9060fee 100644 --- a/chronos/apps/http/httpbodyrw.nim +++ b/chronos/apps/http/httpbodyrw.nim @@ -148,7 +148,7 @@ proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [Defect].} = # ``BoundedStreamReader`` at EOF. if bstream.streams[0].atEof(): for i in 1 ..< len(bstream.streams): - if not(bstream.streams[1].atEof()): + if not(bstream.streams[i].atEof()): return true false else: From 0688d2ef8f7a4c26c0fb23055d8b6679ed61a89f Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 21 Mar 2023 15:10:35 +0200 Subject: [PATCH 002/146] Add idle connection timeouts for HTTP client's connections pool. (#324) * Add idle connection timeouts for HTTP client's connections pool. Add timestamps and duration for both HTTP client requests/responses. Add test. * Add comments on `connectionFlag` decisions. * Address review comments. Adjust default idle connection timeout to 60 seconds. * Increase timeout for test. * Adjust timeout to lower value. * Address review comments. --- chronos/apps/http/httpclient.nim | 253 ++++++++++++++++++++++--------- tests/testhttpclient.nim | 63 ++++++++ 2 files changed, 246 insertions(+), 70 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index f63b24c4e..2997d0429 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -22,6 +22,12 @@ const ## Timeout for connecting to host (12 sec) HttpHeadersTimeout* = 120.seconds ## Timeout for receiving response headers (120 sec) + HttpConnectionIdleTimeout* = 60.seconds + ## Time after which idle connections are removed from the HttpSession's + ## connections pool (120 sec) + HttpConnectionCheckPeriod* = 10.seconds + ## Period of time between idle connections checks in HttpSession's + ## connection pool (10 sec) HttpMaxRedirections* = 10 ## Maximum number of Location redirections. HttpClientConnectionTrackerName* = "httpclient.connection" @@ -100,6 +106,7 @@ type error*: ref HttpError remoteHostname*: string flags*: set[HttpClientConnectionFlag] + timestamp*: Moment HttpClientConnectionRef* = ref HttpClientConnection @@ -109,6 +116,9 @@ type maxRedirections*: int connectTimeout*: Duration headersTimeout*: Duration + idleTimeout: Duration + idlePeriod: Duration + watcherFut: Future[void] connectionBufferSize*: int maxConnections*: int connectionsCount*: int @@ -140,6 +150,8 @@ type buffer*: seq[byte] writer*: HttpBodyWriter redirectCount: int + timestamp*: Moment + duration*: Duration HttpClientRequestRef* = ref HttpClientRequest @@ -160,6 +172,8 @@ type transferEncoding*: set[TransferEncodingFlags] contentLength*: uint64 contentType*: Opt[ContentTypeData] + timestamp*: Moment + duration*: Duration HttpClientResponseRef* = ref HttpClientResponse @@ -284,29 +298,75 @@ template checkClosed(reqresp: untyped): untyped = reqresp.setError(e) raise e +template setTimestamp(conn: HttpClientConnectionRef, + moment: Moment): untyped = + if not(isNil(conn)): + conn.timestamp = moment + +template setTimestamp( + reqresp: HttpClientRequestRef|HttpClientRequestRef + ): untyped = + if not(isNil(reqresp)): + let timestamp = Moment.now() + reqresp.timestamp = timestamp + reqresp.connection.setTimestamp(timestamp) + +template setTimestamp(resp: HttpClientResponseRef, moment: Moment): untyped = + if not(isNil(resp)): + resp.timestamp = moment + resp.connection.setTimestamp(moment) + +template setDuration( + reqresp: HttpClientRequestRef|HttpClientResponseRef + ): untyped = + if not(isNil(reqresp)): + let timestamp = Moment.now() + reqresp.duration = timestamp - reqresp.timestamp + reqresp.connection.setTimestamp(timestamp) + +template isReady(conn: HttpClientConnectionRef): bool = + (conn.state == HttpClientConnectionState.Ready) and + (HttpClientConnectionFlag.KeepAlive in conn.flags) and + (HttpClientConnectionFlag.Request notin conn.flags) and + (HttpClientConnectionFlag.Response notin conn.flags) + +template isIdle(conn: HttpClientConnectionRef, timestamp: Moment, + timeout: Duration): bool = + (timestamp - conn.timestamp) >= timeout + +proc sessionWatcher(session: HttpSessionRef) {.async.} + proc new*(t: typedesc[HttpSessionRef], flags: HttpClientFlags = {}, maxRedirections = HttpMaxRedirections, connectTimeout = HttpConnectTimeout, headersTimeout = HttpHeadersTimeout, connectionBufferSize = DefaultStreamBufferSize, - maxConnections = -1): HttpSessionRef {. + maxConnections = -1, + idleTimeout = HttpConnectionIdleTimeout, + idlePeriod = HttpConnectionCheckPeriod): HttpSessionRef {. raises: [Defect] .} = ## Create new HTTP session object. ## ## ``maxRedirections`` - maximum number of HTTP 3xx redirections ## ``connectTimeout`` - timeout for ongoing HTTP connection ## ``headersTimeout`` - timeout for receiving HTTP response headers + ## ``idleTimeout`` - timeout to consider HTTP connection as idle + ## ``idlePeriod`` - period of time to check HTTP connections for inactivity doAssert(maxRedirections >= 0, "maxRedirections should not be negative") - HttpSessionRef( + var res = HttpSessionRef( flags: flags, maxRedirections: maxRedirections, connectTimeout: connectTimeout, headersTimeout: headersTimeout, connectionBufferSize: connectionBufferSize, maxConnections: maxConnections, - connections: initTable[string, seq[HttpClientConnectionRef]]() + idleTimeout: idleTimeout, + idlePeriod: idlePeriod, + connections: initTable[string, seq[HttpClientConnectionRef]](), ) + res.watcherFut = sessionWatcher(res) + res proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [Defect] .} = var res: set[TLSFlags] @@ -583,52 +643,6 @@ proc connect(session: HttpSessionRef, # If all attempts to connect to the remote host have failed. raiseHttpConnectionError("Could not connect to remote host") -proc acquireConnection( - session: HttpSessionRef, - ha: HttpAddress, - flags: set[HttpClientRequestFlag] - ): Future[HttpClientConnectionRef] {.async.} = - ## Obtain connection from ``session`` or establish a new one. - if (HttpClientFlag.NewConnectionAlways in session.flags) or - (HttpClientRequestFlag.DedicatedConnection in flags): - var default: seq[HttpClientConnectionRef] - let res = - try: - await session.connect(ha).wait(session.connectTimeout) - except AsyncTimeoutError: - raiseHttpConnectionError("Connection timed out") - res[].state = HttpClientConnectionState.Acquired - session.connections.mgetOrPut(ha.id, default).add(res) - inc(session.connectionsCount) - return res - else: - let conn = - block: - let conns = session.connections.getOrDefault(ha.id) - if len(conns) > 0: - var res: HttpClientConnectionRef = nil - for item in conns: - if item.state == HttpClientConnectionState.Ready: - res = item - break - res - else: - nil - if not(isNil(conn)): - conn[].state = HttpClientConnectionState.Acquired - return conn - else: - var default: seq[HttpClientConnectionRef] - let res = - try: - await session.connect(ha).wait(session.connectTimeout) - except AsyncTimeoutError: - raiseHttpConnectionError("Connection timed out") - res[].state = HttpClientConnectionState.Acquired - session.connections.mgetOrPut(ha.id, default).add(res) - inc(session.connectionsCount) - return res - proc removeConnection(session: HttpSessionRef, conn: HttpClientConnectionRef) {.async.} = let removeHost = @@ -644,6 +658,35 @@ proc removeConnection(session: HttpSessionRef, dec(session.connectionsCount) await conn.closeWait() +proc acquireConnection( + session: HttpSessionRef, + ha: HttpAddress, + flags: set[HttpClientRequestFlag] + ): Future[HttpClientConnectionRef] {.async.} = + ## Obtain connection from ``session`` or establish a new one. + var default: seq[HttpClientConnectionRef] + if (HttpClientFlag.NewConnectionAlways notin session.flags) and + (HttpClientRequestFlag.DedicatedConnection notin flags): + # Trying to reuse existing connection from our connection's pool. + let timestamp = Moment.now() + # We looking for non-idle connection at `Ready` state, all idle connections + # will be freed by sessionWatcher(). + for connection in session.connections.getOrDefault(ha.id): + if connection.isReady() and + not(connection.isIdle(timestamp, session.idleTimeout)): + connection.state = HttpClientConnectionState.Acquired + return connection + + let connection = + try: + await session.connect(ha).wait(session.connectTimeout) + except AsyncTimeoutError: + raiseHttpConnectionError("Connection timed out") + connection.state = HttpClientConnectionState.Acquired + session.connections.mgetOrPut(ha.id, default).add(connection) + inc(session.connectionsCount) + return connection + proc releaseConnection(session: HttpSessionRef, connection: HttpClientConnectionRef) {.async.} = ## Return connection back to the ``session``. @@ -676,7 +719,9 @@ proc releaseConnection(session: HttpSessionRef, await session.removeConnection(connection) else: connection.state = HttpClientConnectionState.Ready - connection.flags = {} + connection.flags.excl({HttpClientConnectionFlag.Request, + HttpClientConnectionFlag.Response, + HttpClientConnectionFlag.NoBody}) proc releaseConnection(request: HttpClientRequestRef) {.async.} = let @@ -707,11 +752,55 @@ proc closeWait*(session: HttpSessionRef) {.async.} = ## ## This closes all the connections opened to remote servers. var pending: seq[Future[void]] - for items in session.connections.values(): - for item in items: - pending.add(closeWait(item)) + # Closing sessionWatcher to avoid race condition. + await cancelAndWait(session.watcherFut) + for connections in session.connections.values(): + for conn in connections: + pending.add(closeWait(conn)) await allFutures(pending) +proc sessionWatcher(session: HttpSessionRef) {.async.} = + while true: + let firstBreak = + try: + await sleepAsync(session.idlePeriod) + false + except CancelledError: + true + + if firstBreak: + break + + var idleConnections: seq[HttpClientConnectionRef] + let timestamp = Moment.now() + for _, connections in session.connections.mpairs(): + connections.keepItIf( + if isNil(it): + false + else: + if it.isReady() and it.isIdle(timestamp, session.idleTimeout): + idleConnections.add(it) + false + else: + true + ) + + if len(idleConnections) > 0: + dec(session.connectionsCount, len(idleConnections)) + var pending: seq[Future[void]] + let secondBreak = + try: + pending = idleConnections.mapIt(it.closeWait()) + await allFutures(pending) + false + except CancelledError: + # We still want to close connections to avoid socket leaks. + await allFutures(pending) + true + + if secondBreak: + break + proc closeWait*(request: HttpClientRequestRef) {.async.} = if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: request.state = HttpReqRespState.Closing @@ -791,14 +880,26 @@ proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] let connectionFlag = block: case resp.version - of HttpVersion11, HttpVersion20: + of HttpVersion11: + # Keeping a connection open is the default on HTTP/1.1 requests. + # https://www.rfc-editor.org/rfc/rfc2068.html#section-19.7.1 let header = toLowerAscii(headers.getString(ConnectionHeader)) - if header == "keep-alive": - true - else: + if header == "close": false - else: + else: + true + of HttpVersion10: + # This is the default on HTTP/1.0 requests. false + else: + # HTTP/2 does not use the Connection header field (Section 7.6.1 of + # [HTTP]) to indicate connection-specific header fields. + # https://httpwg.org/specs/rfc9113.html#rfc.section.8.2.2 + # + # HTTP/3 does not use the Connection header field to indicate + # connection-specific fields; + # https://httpwg.org/specs/rfc9114.html#rfc.section.4.2 + true let contentType = block: @@ -836,22 +937,25 @@ proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. async.} = var buffer: array[HttpMaxHeadersSize, byte] - let bytesRead = - try: - await req.connection.reader.readUntil(addr buffer[0], - len(buffer), HeadersMark).wait( - req.session.headersTimeout) - except CancelledError as exc: - raise exc - except AsyncTimeoutError: - raiseHttpReadError("Reading response headers timed out") - except AsyncStreamError: - raiseHttpReadError("Could not read response headers") + let timestamp = Moment.now() + req.connection.setTimestamp(timestamp) + let + bytesRead = + try: + await req.connection.reader.readUntil(addr buffer[0], + len(buffer), HeadersMark).wait( + req.session.headersTimeout) + except AsyncTimeoutError: + raiseHttpReadError("Reading response headers timed out") + except AsyncStreamError: + raiseHttpReadError("Could not read response headers") let response = prepareResponse(req, buffer.toOpenArray(0, bytesRead - 1)) if response.isErr(): raiseHttpProtocolError(response.error()) - return response.get() + let res = response.get() + res.setTimestamp(timestamp) + return res proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, meth: HttpMethod = MethodGet, @@ -1029,6 +1133,7 @@ proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {. let headers = request.prepareRequest() request.connection.state = HttpClientConnectionState.RequestHeadersSending request.state = HttpReqRespState.Open + request.setTimestamp() await request.connection.writer.write(headers) request.connection.state = HttpClientConnectionState.RequestHeadersSent request.connection.state = HttpClientConnectionState.RequestBodySending @@ -1036,10 +1141,13 @@ proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {. await request.connection.writer.write(request.buffer) request.connection.state = HttpClientConnectionState.RequestBodySent request.state = HttpReqRespState.Finished + request.setDuration() except CancelledError as exc: + request.setDuration() request.setError(newHttpInterruptError()) raise exc except AsyncStreamError: + request.setDuration() let error = newHttpWriteError("Could not send request headers") request.setError(error) raise error @@ -1079,13 +1187,16 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {. try: let headers = request.prepareRequest() request.connection.state = HttpClientConnectionState.RequestHeadersSending + request.setTimestamp() await request.connection.writer.write(headers) request.connection.state = HttpClientConnectionState.RequestHeadersSent except CancelledError as exc: + request.setDuration() request.setError(newHttpInterruptError()) raise exc except AsyncStreamError: let error = newHttpWriteError("Could not send request headers") + request.setDuration() request.setError(error) raise error @@ -1123,6 +1234,7 @@ proc finish*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {. "Body writer instance must be closed before finish(request) call") request.state = HttpReqRespState.Finished request.connection.state = HttpClientConnectionState.RequestBodySent + request.setDuration() let response = try: await request.getResponse() @@ -1190,6 +1302,7 @@ proc finish*(response: HttpClientResponseRef) {.async.} = "Body reader instance must be closed before finish(response) call") response.connection.state = HttpClientConnectionState.ResponseBodyReceived response.state = HttpReqRespState.Finished + response.setDuration() proc getBodyBytes*(response: HttpClientResponseRef): Future[seq[byte]] {. async.} = diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index ec55ddf44..15f77d3f8 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -863,6 +863,65 @@ suite "HTTP client testing suite": return true + proc testIdleConnection(address: TransportAddress): Future[bool] {. + async.} = + let + ha = getAddress(address, HttpClientScheme.NonSecure, "/test") + + proc test( + session: HttpSessionRef, + a: HttpAddress + ): Future[TestResponseTuple] {.async.} = + + var + data: HttpResponseTuple + request = HttpClientRequestRef.new(session, a, version = HttpVersion11) + try: + data = await request.fetch() + finally: + await request.closeWait() + return (data.status, data.data.bytesToString(), 0) + + proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + if r.isOk(): + let request = r.get() + case request.uri.path + of "/test": + return await request.respond(Http200, "ok") + else: + return await request.respond(Http404, "Page not found") + else: + return dumbResponse() + + var server = createServer(address, process, false) + server.start() + let session = HttpSessionRef.new(idleTimeout = 1.seconds, + idlePeriod = 200.milliseconds) + try: + var f1 = test(session, ha) + var f2 = test(session, ha) + await allFutures(f1, f2) + check: + f1.finished() + f1.done() + f2.finished() + f2.done() + f1.read() == (200, "ok", 0) + f2.read() == (200, "ok", 0) + session.connectionsCount == 2 + + await sleepAsync(1500.milliseconds) + let resp = await test(session, ha) + check: + resp == (200, "ok", 0) + session.connectionsCount == 1 + finally: + await session.closeWait() + await server.stop() + await server.closeWait() + + return true + test "HTTP all request methods test": let address = initTAddress("127.0.0.1:30080") check waitFor(testMethods(address, false)) == 18 @@ -934,6 +993,10 @@ suite "HTTP client testing suite": let address = initTAddress("127.0.0.1:30080") check waitFor(testConnectionManagement(address)) == true + test "HTTP client idle connection test": + let address = initTAddress("127.0.0.1:30080") + check waitFor(testIdleConnection(address)) == true + test "Leaks test": proc getTrackerLeaks(tracker: string): bool = let tracker = getTracker(tracker) From b0af576c7c1f2cf98d1e13b4fcef1df3f55d05db Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 24 Mar 2023 17:34:45 +0200 Subject: [PATCH 003/146] Address #320 issue. (#372) --- chronos/transports/stream.nim | 192 +++++++++++++++++++--------------- 1 file changed, 105 insertions(+), 87 deletions(-) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index a5f9788eb..ef9641698 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1065,60 +1065,69 @@ when defined(windows): return retFuture proc continuationSocket(udata: pointer) {.gcsafe.} = - var ovl = cast[PtrCustomOverlapped](udata) - var server = cast[StreamServer](ovl.data.udata) + if retFuture.finished(): + # `retFuture` could become finished in 2 cases: + # 1. OS sends IOCP notification about failure, but we already failed + # `retFuture` with proper error. + # 2. `accept()` call has been cancelled. Cancellation callback closed + # accepting socket, so OS sends IOCP notification with an + # `ERROR_OPERATION_ABORTED` error. + return + var + ovl = cast[PtrCustomOverlapped](udata) + server = cast[StreamServer](ovl.data.udata) server.apending = false - if not(retFuture.finished()): - if server.status in {ServerStatus.Stopped, ServerStatus.Closed}: + + if server.status in {ServerStatus.Stopped, ServerStatus.Closed}: + retFuture.fail(getServerUseClosedError()) + server.asock.closeSocket() + server.clean() + else: + case ovl.data.errCode + of OSErrorCode(-1): + if setsockopt(SocketHandle(server.asock), cint(osdefs.SOL_SOCKET), + cint(osdefs.SO_UPDATE_ACCEPT_CONTEXT), + addr server.sock, + SockLen(sizeof(SocketHandle))) != 0'i32: + let err = osLastError() + server.asock.closeSocket() + if err == osdefs.WSAENOTSOCK: + # This can be happened when server get closed, but continuation + # was already scheduled, so we failing it not with OS error. + retFuture.fail(getServerUseClosedError()) + else: + let errorMsg = osErrorMsg(err) + retFuture.fail(getConnectionAbortedError(errorMsg)) + else: + var ntransp: StreamTransport + if not(isNil(server.init)): + let transp = server.init(server, server.asock) + ntransp = newStreamSocketTransport(server.asock, + server.bufferSize, + transp) + else: + ntransp = newStreamSocketTransport(server.asock, + server.bufferSize, nil) + # Start tracking transport + trackStream(ntransp) + retFuture.complete(ntransp) + of OSErrorCode(osdefs.ERROR_OPERATION_ABORTED): + # CancelIO() interrupt or close. + server.asock.closeSocket() retFuture.fail(getServerUseClosedError()) + server.clean() + of OSErrorCode(osdefs.WSAENETDOWN), + OSErrorCode(osdefs.WSAENETRESET), + OSErrorCode(osdefs.WSAECONNABORTED), + OSErrorCode(osdefs.WSAECONNRESET), + OSErrorCode(osdefs.WSAETIMEDOUT): server.asock.closeSocket() + retFuture.fail(getConnectionAbortedError(int(ovl.data.errCode))) server.clean() else: - case ovl.data.errCode - of OSErrorCode(-1): - if setsockopt(SocketHandle(server.asock), cint(osdefs.SOL_SOCKET), - cint(osdefs.SO_UPDATE_ACCEPT_CONTEXT), - addr server.sock, - SockLen(sizeof(SocketHandle))) != 0'i32: - let err = osLastError() - server.asock.closeSocket() - if err == osdefs.WSAENOTSOCK: - # This can be happened when server get closed, but continuation - # was already scheduled, so we failing it not with OS error. - retFuture.fail(getServerUseClosedError()) - else: - let errorMsg = osErrorMsg(err) - retFuture.fail(getConnectionAbortedError(errorMsg)) - else: - var ntransp: StreamTransport - if not(isNil(server.init)): - let transp = server.init(server, server.asock) - ntransp = newStreamSocketTransport(server.asock, - server.bufferSize, - transp) - else: - ntransp = newStreamSocketTransport(server.asock, - server.bufferSize, nil) - # Start tracking transport - trackStream(ntransp) - retFuture.complete(ntransp) - of OSErrorCode(osdefs.ERROR_OPERATION_ABORTED): - # CancelIO() interrupt or close. - server.asock.closeSocket() - retFuture.fail(getServerUseClosedError()) - server.clean() - of OSErrorCode(osdefs.WSAENETDOWN), - OSErrorCode(osdefs.WSAENETRESET), - OSErrorCode(osdefs.WSAECONNABORTED), - OSErrorCode(osdefs.WSAECONNRESET), - OSErrorCode(osdefs.WSAETIMEDOUT): - server.asock.closeSocket() - retFuture.fail(getConnectionAbortedError(int(ovl.data.errCode))) - server.clean() - else: - server.asock.closeSocket() - retFuture.fail(getTransportOsError(ovl.data.errCode)) + server.asock.closeSocket() + retFuture.fail(getTransportOsError(ovl.data.errCode)) proc cancellationSocket(udata: pointer) {.gcsafe.} = if server.apending: @@ -1126,50 +1135,59 @@ when defined(windows): server.asock.closeSocket() proc continuationPipe(udata: pointer) {.gcsafe.} = - var ovl = cast[PtrCustomOverlapped](udata) - var server = cast[StreamServer](ovl.data.udata) + if retFuture.finished(): + # `retFuture` could become finished in 2 cases: + # 1. OS sends IOCP notification about failure, but we already failed + # `retFuture` with proper error. + # 2. `accept()` call has been cancelled. Cancellation callback closed + # accepting socket, so OS sends IOCP notification with an + # `ERROR_OPERATION_ABORTED` error. + return + var + ovl = cast[PtrCustomOverlapped](udata) + server = cast[StreamServer](ovl.data.udata) server.apending = false - if not(retFuture.finished()): - if server.status in {ServerStatus.Stopped, ServerStatus.Closed}: + + if server.status in {ServerStatus.Stopped, ServerStatus.Closed}: + retFuture.fail(getServerUseClosedError()) + server.sock.closeHandle() + server.clean() + else: + if ovl.data.errCode == OSErrorCode(-1): + var ntransp: StreamTransport + var flags = {WinServerPipe} + if NoPipeFlash in server.flags: + flags.incl(WinNoPipeFlash) + if not(isNil(server.init)): + var transp = server.init(server, server.sock) + ntransp = newStreamPipeTransport(server.sock, server.bufferSize, + transp, flags) + else: + ntransp = newStreamPipeTransport(server.sock, server.bufferSize, + nil, flags) + server.sock = server.createAcceptPipe().valueOr: + server.sock = asyncInvalidSocket + server.errorCode = error + retFuture.fail(getTransportOsError(error)) + return + + trackStream(ntransp) + retFuture.complete(ntransp) + + elif int32(ovl.data.errCode) in {osdefs.ERROR_OPERATION_ABORTED, + osdefs.ERROR_PIPE_NOT_CONNECTED}: + # CancelIO() interrupt or close call. retFuture.fail(getServerUseClosedError()) - server.sock.closeHandle() server.clean() else: - if ovl.data.errCode == OSErrorCode(-1): - var ntransp: StreamTransport - var flags = {WinServerPipe} - if NoPipeFlash in server.flags: - flags.incl(WinNoPipeFlash) - if not(isNil(server.init)): - var transp = server.init(server, server.sock) - ntransp = newStreamPipeTransport(server.sock, server.bufferSize, - transp, flags) - else: - ntransp = newStreamPipeTransport(server.sock, server.bufferSize, - nil, flags) - server.sock = server.createAcceptPipe().valueOr: - server.sock = asyncInvalidSocket - server.errorCode = error - retFuture.fail(getTransportOsError(error)) - return - - trackStream(ntransp) - retFuture.complete(ntransp) - - elif int32(ovl.data.errCode) in {osdefs.ERROR_OPERATION_ABORTED, - osdefs.ERROR_PIPE_NOT_CONNECTED}: - # CancelIO() interrupt or close call. - retFuture.fail(getServerUseClosedError()) - server.clean() - else: - discard closeHandle(HANDLE(server.sock)) - server.sock = server.createAcceptPipe().valueOr: - server.sock = asyncInvalidSocket - server.errorCode = error - retFuture.fail(getTransportOsError(error)) - return - retFuture.fail(getTransportOsError(ovl.data.errCode)) + discard closeHandle(HANDLE(server.sock)) + server.sock = server.createAcceptPipe().valueOr: + server.sock = asyncInvalidSocket + server.errorCode = error + retFuture.fail(getTransportOsError(error)) + return + retFuture.fail(getTransportOsError(ovl.data.errCode)) proc cancellationPipe(udata: pointer) {.gcsafe.} = if server.apending: From 1394c9e04957928afc1db33d2e0965cfb677a1e0 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 24 Mar 2023 17:52:55 +0200 Subject: [PATCH 004/146] IOSelectors refactoring to properly support signals and processes. (AsyncProc 2) (#366) * ioselectors_epoll() refactoring. * ioselectors_kqueue() refactoring. * ioselectors_poll() initial refactor. * Remove `s.count` because it inconsistent and not used in `chronos`. * Remove Windows version of select() engine. * Add ability to switch event queue engine via `asyncEventEngine` command line option. * Make it possible to switch between engines. * Fix epoll regression. * Fix poll() engine issues. * Address review comments. * Add proper trick. * Address review comments. * Bump version to 3.1.0. --- chronos.nimble | 2 +- chronos/asyncloop.nim | 207 ++-- chronos/ioselects/ioselectors_epoll.nim | 1221 +++++++++++++--------- chronos/ioselects/ioselectors_kqueue.nim | 1149 ++++++++++---------- chronos/ioselects/ioselectors_poll.nim | 569 +++++----- chronos/ioselects/ioselectors_select.nim | 465 -------- chronos/osdefs.nim | 34 +- chronos/selectors2.nim | 168 ++- 8 files changed, 1855 insertions(+), 1960 deletions(-) delete mode 100644 chronos/ioselects/ioselectors_select.nim diff --git a/chronos.nimble b/chronos.nimble index 639d1d399..18f7a47d8 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "3.0.11" +version = "3.1.0" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index ed8b43dda..ca1655433 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -567,24 +567,24 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or # We are ignoring SIGPIPE signal, because we are working with EPIPE. posix.signal(cint(SIGPIPE), SIG_IGN) - proc initAPI(disp: PDispatcher) {.raises: [Defect].} = + proc initAPI(disp: PDispatcher) = discard - proc newDispatcher*(): PDispatcher {.raises: [Defect].} = + proc newDispatcher*(): PDispatcher = ## Create new dispatcher. let selector = - try: - newSelector[SelectorData]() - except IOSelectorsException as exc: - raiseAsDefect exc, "Could not initialize selector" - except CatchableError as exc: - raiseAsDefect exc, "Could not initialize selector" + block: + let res = Selector.new(SelectorData) + if res.isErr(): raiseOsDefect(res.error(), + "Could not initialize selector") + res.get() + var res = PDispatcher( selector: selector, timers: initHeapQueue[TimerCallback](), - callbacks: initDeque[AsyncCallback](64), + callbacks: initDeque[AsyncCallback](asyncEventsCount), idlers: initDeque[AsyncCallback](), - keys: newSeq[ReadyKey](64), + keys: newSeq[ReadyKey](asyncEventsCount), trackers: initTable[string, TrackerBase]() ) res.callbacks.addLast(SentinelCallback) @@ -600,28 +600,18 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or ## Returns system specific OS queue. disp.selector + proc contains*(disp: PDispatcher, fd: AsyncFD): bool {.inline.} = + ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. + cint(fd) in disp.selector + proc register2*(fd: AsyncFD): Result[void, OSErrorCode] = ## Register file descriptor ``fd`` in thread's dispatcher. - let loop = getThreadDispatcher() - try: - var data: SelectorData - loop.selector.registerHandle(int(fd), {}, data) - except CatchableError: - return err(osLastError()) - ok() + var data: SelectorData + getThreadDispatcher().selector.registerHandle2(cint(fd), {}, data) proc unregister2*(fd: AsyncFD): Result[void, OSErrorCode] = ## Unregister file descriptor ``fd`` from thread's dispatcher. - let loop = getThreadDispatcher() - try: - loop.selector.unregister(int(fd)) - except CatchableError: - return err(osLastError()) - ok() - - proc contains*(disp: PDispatcher, fd: AsyncFD): bool {.inline.} = - ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. - int(fd) in disp.selector + getThreadDispatcher().selector.unregister2(cint(fd)) proc addReader2*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil): Result[void, OSErrorCode] = @@ -629,37 +619,27 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or ## call the callback ``cb`` with specified argument ``udata``. let loop = getThreadDispatcher() var newEvents = {Event.Read} - withData(loop.selector, int(fd), adata) do: + withData(loop.selector, cint(fd), adata) do: let acb = AsyncCallback(function: cb, udata: udata) adata.reader = acb if not(isNil(adata.writer.function)): newEvents.incl(Event.Write) do: return err(OSErrorCode(osdefs.EBADF)) - - try: - loop.selector.updateHandle(int(fd), newEvents) - except CatchableError: - return err(osLastError()) - ok() + loop.selector.updateHandle2(cint(fd), newEvents) proc removeReader2*(fd: AsyncFD): Result[void, OSErrorCode] = ## Stop watching the file descriptor ``fd`` for read availability. let loop = getThreadDispatcher() var newEvents: set[Event] - withData(loop.selector, int(fd), adata) do: + withData(loop.selector, cint(fd), adata) do: # We need to clear `reader` data, because `selectors` don't do it adata.reader = default(AsyncCallback) if not(isNil(adata.writer.function)): newEvents.incl(Event.Write) do: return err(OSErrorCode(osdefs.EBADF)) - - try: - loop.selector.updateHandle(int(fd), newEvents) - except CatchableError: - return err(osLastError()) - ok() + loop.selector.updateHandle2(cint(fd), newEvents) proc addWriter2*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil): Result[void, OSErrorCode] = @@ -667,37 +647,27 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or ## call the callback ``cb`` with specified argument ``udata``. let loop = getThreadDispatcher() var newEvents = {Event.Write} - withData(loop.selector, int(fd), adata) do: + withData(loop.selector, cint(fd), adata) do: let acb = AsyncCallback(function: cb, udata: udata) adata.writer = acb if not(isNil(adata.reader.function)): newEvents.incl(Event.Read) do: return err(OSErrorCode(osdefs.EBADF)) - - try: - loop.selector.updateHandle(int(fd), newEvents) - except CatchableError: - return err(osLastError()) - ok() + loop.selector.updateHandle2(cint(fd), newEvents) proc removeWriter2*(fd: AsyncFD): Result[void, OSErrorCode] = ## Stop watching the file descriptor ``fd`` for write availability. let loop = getThreadDispatcher() var newEvents: set[Event] - withData(loop.selector, int(fd), adata) do: + withData(loop.selector, cint(fd), adata) do: # We need to clear `writer` data, because `selectors` don't do it adata.writer = default(AsyncCallback) if not(isNil(adata.reader.function)): newEvents.incl(Event.Read) do: return err(OSErrorCode(osdefs.EBADF)) - - try: - loop.selector.updateHandle(int(fd), newEvents) - except CatchableError: - return err(osLastError()) - ok() + loop.selector.updateHandle2(cint(fd), newEvents) proc register*(fd: AsyncFD) {.raises: [Defect, OSError].} = ## Register file descriptor ``fd`` in thread's dispatcher. @@ -766,7 +736,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if not isNil(aftercb): aftercb(cast[pointer](param)) - withData(loop.selector, int(fd), adata) do: + withData(loop.selector, cint(fd), adata) do: # We are scheduling reader and writer callbacks to be called # explicitly, so they can get an error and continue work. # Callbacks marked as deleted so we don't need to get REAL notifications @@ -795,27 +765,59 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or ## You can execute ``aftercb`` before actual socket close operation. closeSocket(fd, aftercb) - when ioselSupportedPlatform: - proc addSignal*(signal: int, cb: CallbackFunc, - udata: pointer = nil): int {. - raises: [Defect, IOSelectorsException, ValueError, OSError].} = + when asyncEventEngine in ["epoll", "kqueue"]: + proc addSignal2*(signal: int, cb: CallbackFunc, + udata: pointer = nil): Result[int, OSErrorCode] = ## Start watching signal ``signal``, and when signal appears, call the ## callback ``cb`` with specified argument ``udata``. Returns signal ## identifier code, which can be used to remove signal callback ## via ``removeSignal``. let loop = getThreadDispatcher() var data: SelectorData - result = loop.selector.registerSignal(signal, data) - withData(loop.selector, result, adata) do: + let sigfd = ? loop.selector.registerSignal(signal, data) + withData(loop.selector, sigfd, adata) do: + adata.reader = AsyncCallback(function: cb, udata: udata) + do: + return err(OSErrorCode(osdefs.EBADF)) + ok(sigfd) + + proc addProcess2*(pid: int, cb: CallbackFunc, + udata: pointer = nil): Result[int, OSErrorCode] = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process' descriptor, which can be + ## used to clear process callback via ``removeProcess``. + let loop = getThreadDispatcher() + var data: SelectorData + let procfd = ? loop.selector.registerProcess(pid, data) + withData(loop.selector, procfd, adata) do: adata.reader = AsyncCallback(function: cb, udata: udata) do: - raise newException(ValueError, "File descriptor not registered.") + return err(OSErrorCode(osdefs.EBADF)) + ok(procfd) - proc removeSignal*(sigfd: int) {. - raises: [Defect, IOSelectorsException].} = + proc removeSignal2*(sigfd: int): Result[void, OSErrorCode] = ## Remove watching signal ``signal``. - let loop = getThreadDispatcher() - loop.selector.unregister(sigfd) + getThreadDispatcher().selector.unregister2(cint(sigfd)) + + proc removeProcess2*(procfd: int): Result[void, OSErrorCode] = + ## Remove process' watching using process' descriptor ``procfd``. + getThreadDispatcher().selector.unregister2(cint(procfd)) + + proc addSignal*(signal: int, cb: CallbackFunc, + udata: pointer = nil): int {.raises: [Defect, OSError].} = + ## Start watching signal ``signal``, and when signal appears, call the + ## callback ``cb`` with specified argument ``udata``. Returns signal + ## identifier code, which can be used to remove signal callback + ## via ``removeSignal``. + addSignal2(signal, cb, udata).tryGet() + + proc removeSignal*(sigfd: int) {.raises: [Defect, OSError].} = + ## Remove watching signal ``signal``. + removeSignal2(sigfd).tryGet() + + proc removeProcess*(procfd: int) {.raises: [Defect, OSError].} = + ## Remove process' watching using process' descriptor ``procfd``. + removeProcess2(procfd).tryGet() proc poll*() {.gcsafe.} = ## Perform single asynchronous step. @@ -823,10 +825,6 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or var curTime = Moment.now() var curTimeout = 0 - when ioselSupportedPlatform: - let customSet = {Event.Timer, Event.Signal, Event.Process, - Event.Vnode} - # On reentrant `poll` calls from `processCallbacks`, e.g., `waitFor`, # complete pending work of the outer `processCallbacks` call. # On non-reentrant `poll` calls, this only removes sentinel element. @@ -837,15 +835,17 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or # Processing IO descriptors and all hardware events. let count = - try: - loop.selector.selectInto(curTimeout, loop.keys) - except IOSelectorsException: - raiseOsDefect(osLastError(), "poll(): Unable to get OS events") - for i in 0..", pure, final.} = object - ssi_signo*: uint32 - ssi_errno*: int32 - ssi_code*: int32 - ssi_pid*: uint32 - ssi_uid*: uint32 - ssi_fd*: int32 - ssi_tid*: uint32 - ssi_band*: uint32 - ssi_overrun*: uint32 - ssi_trapno*: uint32 - ssi_status*: int32 - ssi_int*: int32 - ssi_ptr*: uint64 - ssi_utime*: uint64 - ssi_stime*: uint64 - ssi_addr*: uint64 - pad* {.importc: "__pad".}: array[0..47, uint8] - -proc timerfd_create(clock_id: ClockId, flags: cint): cint - {.cdecl, importc: "timerfd_create", header: "".} -proc timerfd_settime(ufd: cint, flags: cint, - utmr: var Itimerspec, otmr: var Itimerspec): cint - {.cdecl, importc: "timerfd_settime", header: "".} -proc eventfd(count: cuint, flags: cint): cint - {.cdecl, importc: "eventfd", header: "".} - -when not defined(android): - proc signalfd(fd: cint, mask: var Sigset, flags: cint): cint - {.cdecl, importc: "signalfd", header: "".} - -when hasThreadSupport: - type - SelectorImpl[T] = object - epollFD: cint - numFD: int - fds: ptr SharedArray[SelectorKey[T]] - count: int - Selector*[T] = ptr SelectorImpl[T] -else: - type - SelectorImpl[T] = object - epollFD: cint - numFD: int - fds: seq[SelectorKey[T]] - count: int - Selector*[T] = ref SelectorImpl[T] type + SelectorImpl[T] = object + epollFd: cint + sigFd: Opt[cint] + pidFd: Opt[cint] + fds: Table[int32, SelectorKey[T]] + signals: Table[int32, SelectorKey[T]] + processes: Table[int32, SelectorKey[T]] + signalMask: Sigset + virtualHoles: Deque[int32] + virtualId: int32 + childrenExited: bool + pendingEvents: Deque[ReadyKey] + + Selector*[T] = ref SelectorImpl[T] + SelectEventImpl = object efd: cint + SelectEvent* = ptr SelectEventImpl -proc newSelector*[T](): Selector[T] {.raises: [Defect, OSError].} = - # Retrieve the maximum fd count (for current OS) via getrlimit() - var a = RLimit() - # Start with a reasonable size, checkFd() will grow this on demand - const numFD = 1024 - - var epollFD = epoll_create(MAX_EPOLL_EVENTS) - if epollFD < 0: - raiseOSError(osLastError()) - - when hasThreadSupport: - result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T]))) - result.epollFD = epollFD - result.numFD = numFD - result.fds = allocSharedArray[SelectorKey[T]](numFD) +proc getVirtualId[T](s: Selector[T]): SelectResult[int32] = + if len(s.virtualHoles) > 0: + ok(s.virtualHoles.popLast()) else: - result = Selector[T]() - result.epollFD = epollFD - result.numFD = numFD - result.fds = newSeq[SelectorKey[T]](numFD) - - for i in 0 ..< numFD: - result.fds[i].ident = InvalidIdent - -proc close*[T](s: Selector[T]) = - let res = posix.close(s.epollFD) - when hasThreadSupport: - deallocSharedArray(s.fds) - deallocShared(cast[pointer](s)) - if res != 0: - raiseIOSelectorsError(osLastError()) - -proc newSelectEvent*(): SelectEvent {.raises: [Defect, OSError, IOSelectorsException].} = - let fdci = eventfd(0, 0) - if fdci == -1: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fdci) - result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) - result.efd = fdci - -proc trigger*(ev: SelectEvent) {.raises: [Defect, IOSelectorsException].} = - var data: uint64 = 1 - if posix.write(ev.efd, addr data, sizeof(uint64)) == -1: - raiseIOSelectorsError(osLastError()) - -proc close*(ev: SelectEvent) {.raises: [Defect, IOSelectorsException].} = - let res = posix.close(ev.efd) - deallocShared(cast[pointer](ev)) - if res != 0: - raiseIOSelectorsError(osLastError()) - -template checkFd(s, f) = - if f >= s.numFD: - var numFD = s.numFD - while numFD <= f: numFD *= 2 - when hasThreadSupport: - s.fds = reallocSharedArray(s.fds, numFD) + if s.virtualId == low(int32): + err(OSErrorCode(EMFILE)) else: - s.fds.setLen(numFD) - for i in s.numFD ..< numFD: - s.fds[i].ident = InvalidIdent - s.numFD = numFD + dec(s.virtualId) + ok(s.virtualId) -proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event], data: T) {. - raises: [Defect, IOSelectorsException].} = - let fdi = int(fd) - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent, "Descriptor " & $fdi & " already registered") - s.setKey(fdi, events, 0, data) - if events != {}: - var epv = EpollEvent(events: EPOLLRDHUP) - epv.data.u64 = fdi.uint - if Event.Read in events: epv.events = epv.events or EPOLLIN - if Event.Write in events: epv.events = epv.events or EPOLLOUT - if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - inc(s.count) - -proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle, events: set[Event]) {. - raises: [Defect, IOSelectorsException].} = - let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, - Event.User, Event.Oneshot, Event.Error} - let fdi = int(fd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, - "Descriptor " & $fdi & " is not registered in the selector!") - doAssert(pkey.events * maskEvents == {}) - if pkey.events != events: - var epv = EpollEvent(events: EPOLLRDHUP) - epv.data.u64 = fdi.uint - - if Event.Read in events: epv.events = epv.events or EPOLLIN - if Event.Write in events: epv.events = epv.events or EPOLLOUT - - if pkey.events == {}: - if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - inc(s.count) +proc isVirtualId(ident: int32): bool = + ident < 0'i32 + +proc toString(key: int32|cint|SocketHandle|int): string = + let fdi32 = when key is int32: key else: int32(key) + if isVirtualId(fdi32): + if fdi32 == -1: + "InvalidIdent" else: - if events != {}: - if epoll_ctl(s.epollFD, EPOLL_CTL_MOD, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - else: - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - dec(s.count) - pkey.events = events - -proc unregister*[T](s: Selector[T], fd: int|SocketHandle) {.raises: [Defect, IOSelectorsException].} = - let fdi = int(fd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) + "V" & Base10.toString(uint32(-fdi32)) + else: + Base10.toString(uint32(fdi32)) + +template addKey[T](s: Selector[T], key: int32, skey: SelectorKey[T]) = + if s.fds.hasKeyOrPut(key, skey): + raiseAssert "Descriptor [" & key.toString() & + "] is already registered in the selector!" + +template getKey[T](s: Selector[T], key: int32): SelectorKey[T] = + let + defaultKey = SelectorKey[T](ident: InvalidIdent) + pkey = s.fds.getOrDefault(key, defaultKey) doAssert(pkey.ident != InvalidIdent, - "Descriptor " & $fdi & " is not registered in the selector!") - if pkey.events != {}: - when not defined(android): - if Event.Read in pkey.events or Event.Write in pkey.events or Event.User in pkey.events: - var epv = EpollEvent() - # TODO: Refactor all these EPOLL_CTL_DEL + dec(s.count) into a proc. - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - dec(s.count) - elif Event.Timer in pkey.events: - if Event.Finished notin pkey.events: - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - dec(s.count) - if posix.close(cint(fdi)) != 0: - raiseIOSelectorsError(osLastError()) - elif Event.Signal in pkey.events: - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - var nmask, omask: Sigset - discard sigemptyset(nmask) - discard sigemptyset(omask) - discard sigaddset(nmask, cint(s.fds[fdi].param)) - unblockSignals(nmask, omask) - dec(s.count) - if posix.close(cint(fdi)) != 0: - raiseIOSelectorsError(osLastError()) - elif Event.Process in pkey.events: - if Event.Finished notin pkey.events: - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - var nmask, omask: Sigset - discard sigemptyset(nmask) - discard sigemptyset(omask) - discard sigaddset(nmask, SIGCHLD) - unblockSignals(nmask, omask) - dec(s.count) - if posix.close(cint(fdi)) != 0: - raiseIOSelectorsError(osLastError()) - else: - if Event.Read in pkey.events or Event.Write in pkey.events or Event.User in pkey.events: - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - dec(s.count) - elif Event.Timer in pkey.events: - if Event.Finished notin pkey.events: - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - dec(s.count) - if posix.close(cint(fdi)) != 0: - raiseIOSelectorsError(osLastError()) - clearKey(pkey) - -proc unregister*[T](s: Selector[T], ev: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = - let fdi = int(ev.efd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, "Event is not registered in the queue!") - doAssert(Event.User in pkey.events) - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - dec(s.count) - clearKey(pkey) + "Descriptor [" & key.toString() & + "] is not registered in the selector!") + pkey + +template checkKey[T](s: Selector[T], key: int32): bool = + s.fds.contains(key) + +proc addSignal[T](s: Selector[T], signal: int, skey: SelectorKey[T]) = + if s.signals.hasKeyOrPut(int32(signal), skey): + raiseAssert "Signal [" & $signal & "] is already registered in the selector" + +template addProcess[T](s: Selector[T], pid: int, skey: SelectorKey[T]) = + if s.processes.hasKeyOrPut(int32(pid), skey): + raiseAssert "Process [" & $pid & "] is already registered in the selector" + +proc freeKey[T](s: Selector[T], key: int32) = + s.fds.del(key) + if isVirtualId(key): + s.virtualHoles.addFirst(key) + +proc freeSignal[T](s: Selector[T], ident: int32) = + s.signals.del(ident) + +proc freeProcess[T](s: Selector[T], ident: int32) = + s.processes.del(ident) + +proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] = + var nmask: Sigset + if sigemptyset(nmask) < 0: + return err(osLastError()) + let epollFd = epoll_create(asyncEventsCount) + if epollFd < 0: + return err(osLastError()) + let selector = Selector[T]( + epollFd: epollFd, + fds: initTable[int32, SelectorKey[T]](asyncInitialSize), + signalMask: nmask, + virtualId: -1'i32, # Should start with -1, because `InvalidIdent` == -1 + childrenExited: false, + virtualHoles: initDeque[int32](), + pendingEvents: initDeque[ReadyKey]() + ) + ok(selector) + +proc close2*[T](s: Selector[T]): SelectResult[void] = + s.fds.clear() + s.signals.clear() + s.processes.clear() + s.virtualHoles.clear() + s.virtualId = -1'i32 + if handleEintr(osdefs.close(s.epollFd)) != 0: + err(osLastError()) + else: + ok() -proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool, - data: T): int {. - discardable, raises: [Defect, IOSelectorsException].} = - var - newTs: Itimerspec - oldTs: Itimerspec - let fdi = timerfd_create(CLOCK_MONOTONIC, 0).int - if fdi == -1: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fdi.cint) - - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - - var events = {Event.Timer} - var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP) - epv.data.u64 = fdi.uint - - if oneshot: - newTs.it_interval.tv_sec = posix.Time(0) - newTs.it_interval.tv_nsec = 0 - newTs.it_value.tv_sec = posix.Time(timeout div 1_000) - newTs.it_value.tv_nsec = (timeout %% 1_000) * 1_000_000 - incl(events, Event.Oneshot) - epv.events = epv.events or EPOLLONESHOT +proc new*(t: typedesc[SelectEvent]): SelectResult[SelectEvent] = + let eFd = eventfd(0, EFD_CLOEXEC or EFD_NONBLOCK) + if eFd == -1: + return err(osLastError()) + var res = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) + res.efd = eFd + ok(res) + +proc trigger2*(event: SelectEvent): SelectResult[void] = + var data: uint64 = 1 + let res = handleEintr(osdefs.write(event.efd, addr data, sizeof(uint64))) + if res == -1: + err(osLastError()) + elif res != sizeof(uint64): + err(OSErrorCode(osdefs.EINVAL)) else: - newTs.it_interval.tv_sec = posix.Time(timeout div 1000) - newTs.it_interval.tv_nsec = (timeout %% 1_000) * 1_000_000 - newTs.it_value.tv_sec = newTs.it_interval.tv_sec - newTs.it_value.tv_nsec = newTs.it_interval.tv_nsec - - if timerfd_settime(fdi.cint, cint(0), newTs, oldTs) != 0: - raiseIOSelectorsError(osLastError()) - if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - s.setKey(fdi, events, 0, data) - inc(s.count) - result = fdi - -when not defined(android): - proc registerSignal*[T](s: Selector[T], signal: int, - data: T): int {. - discardable, raises: [Defect, OSError, IOSelectorsException].} = - var - nmask: Sigset - omask: Sigset - - discard sigemptyset(nmask) - discard sigemptyset(omask) - discard sigaddset(nmask, cint(signal)) - blockSignals(nmask, omask) - - let fdi = signalfd(-1, nmask, 0).int - if fdi == -1: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fdi.cint) - - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - - var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP) - epv.data.u64 = fdi.uint - if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - s.setKey(fdi, {Event.Signal}, signal, data) - inc(s.count) - result = fdi - - proc registerProcess*[T](s: Selector, pid: int, - data: T): int {. - discardable, raises: [Defect, IOSelectorsException].} = - var - nmask: Sigset - omask: Sigset - - discard sigemptyset(nmask) - discard sigemptyset(omask) - discard sigaddset(nmask, posix.SIGCHLD) - blockSignals(nmask, omask) - - let fdi = signalfd(-1, nmask, 0).int - if fdi == -1: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fdi.cint) - - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - - var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP) - epv.data.u64 = fdi.uint - epv.events = EPOLLIN or EPOLLRDHUP - if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, fdi.cint, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - s.setKey(fdi, {Event.Process, Event.Oneshot}, pid, data) - inc(s.count) - result = fdi - -proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) {.raises: [Defect, IOSelectorsException].} = - let fdi = int(ev.efd) - doAssert(s.fds[fdi].ident == InvalidIdent, "Event is already registered in the queue!") - s.setKey(fdi, {Event.User}, 0, data) - var epv = EpollEvent(events: EPOLLIN or EPOLLRDHUP) - epv.data.u64 = ev.efd.uint - if epoll_ctl(s.epollFD, EPOLL_CTL_ADD, ev.efd, addr epv) != 0: - raiseIOSelectorsError(osLastError()) - inc(s.count) + ok() + +proc close2*(event: SelectEvent): SelectResult[void] = + let evFd = event.efd + deallocShared(cast[pointer](event)) + let res = handleEintr(osdefs.close(evFd)) + if res == -1: + err(osLastError()) + else: + ok() -proc selectInto*[T](s: Selector[T], timeout: int, - results: var openArray[ReadyKey]): int {.raises: [Defect, IOSelectorsException].} = - var - resTable: array[MAX_EPOLL_EVENTS, EpollEvent] - maxres = MAX_EPOLL_EVENTS - i, k: int - - if maxres > len(results): - maxres = len(results) - - verifySelectParams(timeout) - - let count = epoll_wait(s.epollFD, addr(resTable[0]), maxres.cint, - timeout.cint) - if count < 0: - result = 0 - let err = osLastError() - if cint(err) != EINTR: - raiseIOSelectorsError(err) - elif count == 0: - result = 0 +proc init(t: typedesc[EpollEvent], fdi: cint, events: set[Event]): EpollEvent = + var res = uint32(EPOLLRDHUP) + if Event.Read in events: res = res or uint32(EPOLLIN) + if Event.Write in events: res = res or uint32(EPOLLOUT) + if Event.Oneshot in events: res = res or uint32(EPOLLONESHOT) + # We need this double conversion of type because otherwise in x64 environment + # negative cint could be converted to big uint64. + EpollEvent(events: res, data: EpollData(u64: uint64(uint32(fdi)))) + +proc registerHandle2*[T](s: Selector[T], fd: cint, events: set[Event], + data: T): SelectResult[void] = + let skey = SelectorKey[T](ident: fd, events: events, param: 0, data: data) + + s.addKey(fd, skey) + + if events != {}: + let epollEvents = EpollEvent.init(fd, events) + if epoll_ctl(s.epollFd, EPOLL_CTL_ADD, fd, unsafeAddr(epollEvents)) != 0: + s.freeKey(fd) + return err(osLastError()) + ok() + +proc updateHandle2*[T](s: Selector[T], fd: cint, + events: set[Event]): SelectResult[void] = + const EventsMask = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, + Event.User, Event.Oneshot, Event.Error} + s.fds.withValue(int32(fd), pkey): + doAssert(pkey[].events * EventsMask == {}, + "Descriptor [" & fd.toString() & "] could not be updated!") + if pkey[].events != events: + let epollEvents = EpollEvent.init(fd, events) + if pkey[].events == {}: + if epoll_ctl(s.epollFd, EPOLL_CTL_ADD, fd, + unsafeAddr(epollEvents)) != 0: + return err(osLastError()) + else: + if events != {}: + if epoll_ctl(s.epollFd, EPOLL_CTL_MOD, fd, + unsafeAddr(epollEvents)) != 0: + return err(osLastError()) + else: + if epoll_ctl(s.epollFd, EPOLL_CTL_DEL, fd, + unsafeAddr epollEvents) != 0: + return err(osLastError()) + pkey.events = events + do: + raiseAssert "Descriptor [" & fd.toString() & + "] is not registered in the selector!" + ok() + +proc blockSignal[T](s: Selector[T], signal: int): SelectResult[bool] = + let isMember = sigismember(s.signalMask, cint(signal)) + if isMember < 0: + err(osLastError()) + elif isMember > 0: + ok(false) + else: + var omask, nmask: Sigset + if sigemptyset(nmask) < 0: + return err(osLastError()) + if sigemptyset(omask) < 0: + return err(osLastError()) + if sigaddset(nmask, cint(signal)) < 0: + return err(osLastError()) + ? blockSignals(nmask, omask) + if sigaddset(s.signalMask, cint(signal)) < 0: + # Try to restore previous state of signals mask + let errorCode = osLastError() + discard unblockSignals(nmask, omask) + return err(errorCode) + ok(true) + +proc unblockSignal[T](s: Selector[T], signal: int): SelectResult[bool] = + let isMember = sigismember(s.signalMask, cint(signal)) + if isMember < 0: + err(osLastError()) + elif isMember == 0: + ok(false) else: - i = 0 - k = 0 - while i < count: - let fdi = int(resTable[i].data.u64) - let pevents = resTable[i].events - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent) - var rkey = ReadyKey(fd: fdi, events: {}) - - if (pevents and EPOLLERR) != 0 or (pevents and EPOLLHUP) != 0: - if (pevents and EPOLLHUP) != 0: - rkey.errorCode = OSErrorCode ECONNRESET + var omask, nmask: Sigset + if sigemptyset(nmask) < 0: + return err(osLastError()) + if sigemptyset(omask) < 0: + return err(osLastError()) + if sigaddset(nmask, cint(signal)) < 0: + return err(osLastError()) + ? unblockSignals(nmask, omask) + if sigdelset(s.signalMask, cint(signal)) < 0: + # Try to restore previous state of signals mask + let errorCode = osLastError() + discard blockSignals(nmask, omask) + return err(errorCode) + ok(true) + +template checkSignal(signal: int) = + doAssert((signal >= 0) and (signal <= int(high(int32))), + "Invalid signal value [" & $signal & "]") + +proc registerSignalEvent[T](s: Selector[T], signal: int, + events: set[Event], param: int, + data: T): SelectResult[cint] = + checkSignal(signal) + + let + fdi32 = ? s.getVirtualId() + selectorKey = SelectorKey[T](ident: signal, events: events, + param: param, data: data) + signalKey = SelectorKey[T](ident: fdi32, events: events, + param: param, data: data) + + s.addKey(fdi32, selectorKey) + s.addSignal(signal, signalKey) + + let mres = + block: + let res = s.blockSignal(signal) + if res.isErr(): + s.freeKey(fdi32) + s.freeSignal(int32(signal)) + return err(res.error()) + res.get() + + if not(mres): + raiseAssert "Signal [" & $signal & "] could have only one handler at " & + "the same time!" + + if s.sigFd.isSome(): + let res = signalfd(s.sigFd.get(), s.signalMask, + SFD_NONBLOCK or SFD_CLOEXEC) + if res == -1: + let errorCode = osLastError() + s.freeKey(fdi32) + s.freeSignal(int32(signal)) + discard s.unblockSignal(signal) + return err(errorCode) + else: + let sigFd = signalfd(-1, s.signalMask, SFD_NONBLOCK or SFD_CLOEXEC) + if sigFd == -1: + let errorCode = osLastError() + s.freeKey(fdi32) + s.freeSignal(int32(signal)) + discard s.unblockSignal(signal) + return err(errorCode) + + let fdKey = SelectorKey[T](ident: sigFd, events: {Event.Signal}) + s.addKey(sigFd, fdKey) + + let event = EpollEvent.init(sigFd, {Event.Read}) + if epoll_ctl(s.epollFd, EPOLL_CTL_ADD, sigFd, unsafeAddr(event)) != 0: + let errorCode = osLastError() + s.freeKey(fdi32) + s.freeSignal(int32(signal)) + s.freeKey(sigFd) + discard s.unblockSignal(signal) + discard handleEintr(osdefs.close(sigFd)) + return err(errorCode) + + s.sigFd = Opt.some(sigFd) + + ok(cint(fdi32)) + +proc registerSignal*[T](s: Selector[T], signal: int, + data: T): SelectResult[cint] = + registerSignalEvent(s, signal, {Event.Signal}, 0, data) + +proc registerTimer2*[T](s: Selector[T], timeout: int, oneshot: bool, + data: T): SelectResult[cint] = + let timerFd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC or TFD_NONBLOCK) + if timerFd == -1: + return err(osLastError()) + + let + fdi32 = int32(timerFd) + (key, event) = + if oneshot: + ( + SelectorKey[T](ident: timerFd, events: {Event.Timer, Event.Oneshot}, + param: 0, data: data), + EpollEvent.init(timerFd, {Event.Read, Event.Oneshot}) + ) + else: + ( + SelectorKey[T](ident: timerFd, events: {Event.Timer}, + param: 0, data: data), + EpollEvent.init(timerFd, {Event.Read}) + ) + var timeStruct = + if oneshot: + Itimerspec( + it_interval: Timespec(tv_sec: osdefs.Time(0), tv_nsec: 0), + it_value: Timespec(tv_sec: osdefs.Time(timeout div 1_000), + tv_nsec: (timeout %% 1000) * 1_000_000) + ) + else: + Itimerspec( + it_interval: Timespec(tv_sec: osdefs.Time(timeout div 1_000), + tv_nsec: 0), + it_value: Timespec(tv_sec: osdefs.Time(timeout div 1_000), + tv_nsec: 0), + ) + + s.addKey(fdi32, key) + + var oldTs = Itimerspec() + if timerfd_settime(timerFd, cint(0), timeStruct, oldTs) != 0: + let errorCode = osLastError() + s.freeKey(fdi32) + discard handleEintr(osdefs.close(timerFd)) + return err(errorCode) + + if epoll_ctl(s.epollFd, EPOLL_CTL_ADD, timerFd, unsafeAddr(event)) != 0: + let errorCode = osLastError() + s.freeKey(fdi32) + discard handleEintr(osdefs.close(timerFd)) + return err(errorCode) + + ok(cint(fdi32)) + +proc registerEvent2*[T](s: Selector[T], ev: SelectEvent, + data: T): SelectResult[cint] = + doAssert(not(isNil(ev))) + let + key = SelectorKey[T](ident: ev.efd, events: {Event.User}, + param: 0, data: data) + event = EpollEvent.init(ev.efd, {Event.Read}) + + s.addKey(ev.efd, key) + + if epoll_ctl(s.epollFd, EPOLL_CTL_ADD, ev.efd, unsafeAddr(event)) != 0: + s.freeKey(ev.efd) + return err(osLastError()) + + ok(ev.efd) + +template checkPid(pid: int) = + when sizeof(int) == 8: + doAssert(pid >= 0 and pid <= int(high(uint32)), + "Invalid process idientified (pid) value") + else: + doAssert(pid >= 0 and pid <= high(int32), + "Invalid process idientified (pid) value") + +proc registerProcess*[T](s: Selector, pid: int, data: T): SelectResult[cint] = + checkPid(pid) + + let + fdi32 = ? s.getVirtualId() + events = {Event.Process, Event.Oneshot} + selectorKey = SelectorKey[T](ident: pid, events: events, param: 0, + data: data) + processKey = SelectorKey[T](ident: fdi32, events: events, param: 0, + data: data) + + s.addProcess(pid, processKey) + s.addKey(fdi32, selectorKey) + + if s.pidFd.isNone(): + let res = registerSignalEvent(s, int(SIGCHLD), {Event.Signal}, 0, data) + if res.isErr(): + s.freeKey(fdi32) + s.freeProcess(int32(pid)) + return err(res.error()) + s.pidFd = Opt.some(cast[cint](res.get())) + + ok(cint(fdi32)) + +proc unregister2*[T](s: Selector[T], fd: cint): SelectResult[void] = + let + fdi32 = int32(fd) + pkey = s.getKey(fdi32) + + if pkey.events != {}: + if {Event.Read, Event.Write, Event.User} * pkey.events != {}: + if epoll_ctl(s.epollFd, EPOLL_CTL_DEL, cint(pkey.ident), nil) != 0: + return err(osLastError()) + + elif Event.Timer in pkey.events: + if Event.Finished notin pkey.events: + if epoll_ctl(s.epollFd, EPOLL_CTL_DEL, fd, nil) != 0: + let errorCode = osLastError() + discard handleEintr(osdefs.close(fd)) + return err(errorCode) + if handleEintr(osdefs.close(fd)) == -1: + return err(osLastError()) + + elif Event.Signal in pkey.events: + if not(s.signals.hasKey(int32(pkey.ident))): + raiseAssert "Signal " & pkey.ident.toString() & + " is not registered in the selector!" + let sigFd = + block: + doAssert(s.sigFd.isSome(), "signalfd descriptor is missing") + s.sigFd.get() + + s.freeSignal(int32(pkey.ident)) + + if len(s.signals) > 0: + let res = signalfd(sigFd, s.signalMask, SFD_NONBLOCK or SFD_CLOEXEC) + if res == -1: + let errorCode = osLastError() + discard s.unblockSignal(pkey.ident) + return err(errorCode) + else: + s.freeKey(sigFd) + s.sigFd = Opt.none(cint) + + if epoll_ctl(s.epollFd, EPOLL_CTL_DEL, sigFd, nil) != 0: + let errorCode = osLastError() + discard handleEintr(osdefs.close(sigFd)) + discard s.unblockSignal(pkey.ident) + return err(errorCode) + + if handleEintr(osdefs.close(sigFd)) != 0: + let errorCode = osLastError() + discard s.unblockSignal(pkey.ident) + return err(errorCode) + + let mres = ? s.unblockSignal(pkey.ident) + doAssert(mres, "Signal is not present in stored mask!") + + elif Event.Process in pkey.events: + if not(s.processes.hasKey(int32(pkey.ident))): + raiseAssert "Process " & pkey.ident.toString() & + " is not registered in the selector!" + + let pidFd = + block: + doAssert(s.pidFd.isSome(), "process descriptor is missing") + s.pidFd.get() + + s.freeProcess(int32(pkey.ident)) + + # We need to filter pending events queue for just unregistered process. + if len(s.pendingEvents) > 0: + s.pendingEvents = + block: + var res = initDeque[ReadyKey](len(s.pendingEvents)) + for item in s.pendingEvents.items(): + if item.fd != fdi32: + res.addLast(item) + res + + if len(s.processes) == 0: + s.pidFd = Opt.none(cint) + let res = s.unregister2(pidFd) + if res.isErr(): + return err(res.error()) + + s.freeKey(fdi32) + ok() + +proc unregister2*[T](s: Selector[T], event: SelectEvent): SelectResult[void] = + s.unregister2(event.efd) + +proc prepareKey[T](s: Selector[T], event: EpollEvent): Opt[ReadyKey] = + let + defaultKey = SelectorKey[T](ident: InvalidIdent) + fdi32 = + block: + doAssert(event.data.u64 <= uint64(high(uint32)), + "Invalid user data value in epoll event object") + cast[int32](event.data.u64) + + var + pkey = s.getKey(fdi32) + rkey = ReadyKey(fd: fdi32) + + if (event.events and EPOLLERR) != 0: + rkey.events.incl(Event.Error) + rkey.errorCode = OSErrorCode(ECONNRESET) + + if (event.events and EPOLLHUP) != 0 or (event.events and EPOLLRDHUP) != 0: + rkey.events.incl(Event.Error) + rkey.errorCode = OSErrorCode(ECONNRESET) + + if (event.events and EPOLLOUT) != 0: + rkey.events.incl(Event.Write) + + if (event.events and EPOLLIN) != 0: + if Event.Read in pkey.events: + rkey.events.incl(Event.Read) + + elif Event.Timer in pkey.events: + var data: uint64 + rkey.events.incl(Event.Timer) + let res = handleEintr(osdefs.read(fdi32, addr data, sizeof(uint64))) + if res != sizeof(uint64): + rkey.events.incl(Event.Error) + rkey.errorCode = osLastError() + + elif Event.Signal in pkey.events: + var data: SignalFdInfo + let res = handleEintr(osdefs.read(fdi32, addr data, sizeof(SignalFdInfo))) + if res != sizeof(SignalFdInfo): + # We could not obtain `signal` number so we can't report an error to + # proper handler. + return Opt.none(ReadyKey) + if data.ssi_signo != uint32(SIGCHLD) or len(s.processes) == 0: + let skey = s.signals.getOrDefault(cast[int32](data.ssi_signo), + defaultKey) + if skey.ident == InvalidIdent: + # We do not have any handlers for received event so we can't report + # an error to proper handler. + return Opt.none(ReadyKey) + rkey.events.incl(Event.Signal) + rkey.fd = skey.ident + else: + # Indicate that SIGCHLD has been seen. + s.childrenExited = true + # Current signal processing. + let pidKey = s.processes.getOrDefault(cast[int32](data.ssi_pid), + defaultKey) + if pidKey.ident == InvalidIdent: + # We do not have any handlers with signal's pid. + return Opt.none(ReadyKey) + rkey.events.incl({Event.Process, Event.Oneshot, Event.Finished}) + rkey.fd = pidKey.ident + # Mark process descriptor inside fds table as finished. + var fdKey = s.fds.getOrDefault(int32(pidKey.ident), defaultKey) + if fdKey.ident != InvalidIdent: + fdKey.events.incl(Event.Finished) + s.fds[int32(pidKey.ident)] = fdKey + + elif Event.User in pkey.events: + var data: uint64 + let res = handleEintr(osdefs.read(fdi32, addr data, sizeof(uint64))) + if res != sizeof(uint64): + let errorCode = osLastError() + if errorCode == EAGAIN: + return Opt.none(ReadyKey) else: - # Try reading SO_ERROR from fd. - var error: cint - var size = SockLen sizeof(error) - if getsockopt(SocketHandle fdi, SOL_SOCKET, SO_ERROR, addr(error), - addr(size)) == 0'i32: - rkey.errorCode = OSErrorCode error + rkey.events.incl({Event.User, Event.Error}) + rkey.errorCode = errorCode + else: + rkey.events.incl(Event.User) + if Event.Oneshot in rkey.events: + if Event.Timer in rkey.events: + if epoll_ctl(s.epollFd, EPOLL_CTL_DEL, fdi32, nil) != 0: rkey.events.incl(Event.Error) - if (pevents and EPOLLOUT) != 0: - rkey.events.incl(Event.Write) - when not defined(android): - if (pevents and EPOLLIN) != 0: - if Event.Read in pkey.events: - rkey.events.incl(Event.Read) - elif Event.Timer in pkey.events: - var data: uint64 = 0 - if posix.read(cint(fdi), addr data, - sizeof(uint64)) != sizeof(uint64): - raiseIOSelectorsError(osLastError()) - rkey.events.incl(Event.Timer) - elif Event.Signal in pkey.events: - var data = SignalFdInfo() - if posix.read(cint(fdi), addr data, sizeof(SignalFdInfo)) != - sizeof(SignalFdInfo): - raiseIOSelectorsError(osLastError()) - rkey.events.incl(Event.Signal) - elif Event.Process in pkey.events: - var data = SignalFdInfo() - if posix.read(cint(fdi), addr data, sizeof(SignalFdInfo)) != - sizeof(SignalFdInfo): - raiseIOSelectorsError(osLastError()) - if data.ssi_pid == uint32(pkey.param): - rkey.events.incl(Event.Process) - else: - inc(i) - continue - elif Event.User in pkey.events: - var data: uint64 = 0 - if posix.read(cint(fdi), addr data, - sizeof(uint64)) != sizeof(uint64): - let err = osLastError() - if err == OSErrorCode(EAGAIN): - inc(i) - continue - else: - raiseIOSelectorsError(err) - rkey.events.incl(Event.User) + rkey.errorCode = osLastError() + # we are marking key with `Finished` event, to avoid double decrease. + rkey.events.incl(Event.Finished) + pkey.events.incl(Event.Finished) + s.fds[fdi32] = pkey + + ok(rkey) + +proc checkProcesses[T](s: Selector[T]) = + # If SIGCHLD has been seen we need to check all processes we are monitoring + # for completion, because in Linux SIGCHLD could be masked. + # You can get more information in article "Signalfd is useless" - + # https://ldpreload.com/blog/signalfd-is-useless?reposted-on-request + if not(s.childrenExited): + return + + let + defaultKey = SelectorKey[T](ident: InvalidIdent) + flags = WNOHANG or WNOWAIT or WSTOPPED or WEXITED + s.childrenExited = false + for pid, pidKey in s.processes.pairs(): + var fdKey = s.fds.getOrDefault(int32(pidKey.ident), defaultKey) + if fdKey.ident != InvalidIdent: + if Event.Finished notin fdKey.events: + var sigInfo = SigInfo() + let res = handleEintr(osdefs.waitid(P_PID, cast[Id](pid), + sigInfo, flags)) + if (res == 0) and (cint(sigInfo.si_pid) == cint(pid)): + fdKey.events.incl(Event.Finished) + let rkey = ReadyKey(fd: pidKey.ident, events: fdKey.events) + s.pendingEvents.addLast(rkey) + s.fds[int32(pidKey.ident)] = fdKey + +proc selectInto2*[T](s: Selector[T], timeout: int, + readyKeys: var openArray[ReadyKey] + ): SelectResult[int] = + var + queueEvents: array[asyncEventsCount, EpollEvent] + k: int = 0 + + verifySelectParams(timeout, -1, int(high(cint))) + + let + maxEventsCount = min(len(queueEvents), len(readyKeys)) + maxPendingEventsCount = min(maxEventsCount, len(s.pendingEvents)) + maxNewEventsCount = max(maxEventsCount - maxPendingEventsCount, 0) + + let + eventsCount = + if maxNewEventsCount > 0: + let res = handleEintr(epoll_wait(s.epollFd, addr(queueEvents[0]), + cint(maxNewEventsCount), + cint(timeout))) + if res < 0: + return err(osLastError()) + res else: - if (pevents and EPOLLIN) != 0: - if Event.Read in pkey.events: - rkey.events.incl(Event.Read) - elif Event.Timer in pkey.events: - var data: uint64 = 0 - if posix.read(cint(fdi), addr data, - sizeof(uint64)) != sizeof(uint64): - raiseIOSelectorsError(osLastError()) - rkey.events.incl(Event.Timer) - elif Event.User in pkey.events: - var data: uint64 = 0 - if posix.read(cint(fdi), addr data, - sizeof(uint64)) != sizeof(uint64): - let err = osLastError() - if err == OSErrorCode(EAGAIN): - inc(i) - continue - else: - raiseIOSelectorsError(err) - rkey.events.incl(Event.User) - - if Event.Oneshot in pkey.events: - var epv = EpollEvent() - if epoll_ctl(s.epollFD, EPOLL_CTL_DEL, cint(fdi), addr epv) != 0: - raiseIOSelectorsError(osLastError()) - # we will not clear key until it will be unregistered, so - # application can obtain data, but we will decrease counter, - # because epoll is empty. - dec(s.count) - # we are marking key with `Finished` event, to avoid double decrease. - pkey.events.incl(Event.Finished) - - results[k] = rkey - inc(k) - inc(i) - result = k + 0 + + s.childrenExited = false + + for i in 0 ..< eventsCount: + let rkey = s.prepareKey(queueEvents[i]).valueOr: continue + readyKeys[k] = rkey + inc(k) + + s.checkProcesses() + + let pendingEventsCount = min(len(readyKeys) - eventsCount, + len(s.pendingEvents)) + + for i in 0 ..< pendingEventsCount: + readyKeys[k] = s.pendingEvents.popFirst() + inc(k) + + ok(k) + +proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] = + var res = newSeq[ReadyKey](asyncEventsCount) + let count = ? selectInto2(s, timeout, res) + res.setLen(count) + ok(res) + +proc newSelector*[T](): Selector[T] {. + raises: [Defect, OSError].} = + let res = Selector.new(T) + if res.isErr(): raiseOSError(res.error()) + res.get() + +proc close*[T](s: Selector[T]) {. + raises: [Defect, IOSelectorsException].} = + let res = s.close2() + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc newSelectEvent*(): SelectEvent {. + raises: [Defect, IOSelectorsException].} = + let res = SelectEvent.new() + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() + +proc trigger*(event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = event.trigger2() + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc close*(event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = event.close2() + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc registerHandle*[T](s: Selector[T], fd: cint | SocketHandle, + events: set[Event], data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerHandle2(s, fd, events, data) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc updateHandle*[T](s: Selector[T], fd: cint | SocketHandle, + events: set[Event]) {. + raises: [Defect, IOSelectorsException].} = + let res = updateHandle2(s, fd, events) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc unregister*[T](s: Selector[T], fd: cint | SocketHandle) {. + raises: [Defect, IOSelectorsException].} = + let res = unregister2(s, fd) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc unregister*[T](s: Selector[T], event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = unregister2(s, event) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool, + data: T): cint {. + discardable, raises: [Defect, IOSelectorsException].} = + let res = registerTimer2(s, timeout, oneshot, data) + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() + +proc registerEvent*[T](s: Selector[T], event: SelectEvent, + data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerEvent2(s, event, data) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc selectInto*[T](s: Selector[T], timeout: int, + readyKeys: var openArray[ReadyKey]): int {. + raises: [Defect, IOSelectorsException].} = + let res = selectInto2(s, timeout, readyKeys) + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] = - result = newSeq[ReadyKey](MAX_EPOLL_EVENTS) - let count = selectInto(s, timeout, result) - result.setLen(count) - -template isEmpty*[T](s: Selector[T]): bool = - (s.count == 0) - -proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} = - let fdi = int(fd) - fdi < s.numFD and s.fds[fdi].ident != InvalidIdent - -proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool = - let fdi = int(fd) - s.checkFd(fdi) - if fdi in s: - s.fds[fdi].data = data - result = true - -template withData*[T](s: Selector[T], fd: SocketHandle|int, value, - body: untyped) = - mixin checkFd - let fdi = int(fd) - if fdi in s: - var value = addr(s.fds[fdi].data) + let res = select2(s, timeout) + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() + +proc contains*[T](s: Selector[T], fd: SocketHandle|cint): bool {.inline.} = + s.checkKey(int32(fd)) + +proc setData*[T](s: Selector[T], fd: SocketHandle|cint, data: T): bool = + s.fds.withValue(int32(fd), skey): + skey[].data = data + return true + do: + return false + +template withData*[T](s: Selector[T], fd: SocketHandle|cint, value, + body: untyped) = + s.fds.withValue(int32(fd), skey): + var value = addr(skey[].data) body -template withData*[T](s: Selector[T], fd: SocketHandle|int, value, body1, - body2: untyped) = - let fdi = int(fd) - if fdi in s: - var value = addr(s.fds[fdi].data) +template withData*[T](s: Selector[T], fd: SocketHandle|cint, value, body1, + body2: untyped) = + s.fds.withValue(int32(fd), skey): + var value = addr(skey[].data) body1 - else: + do: body2 -proc getFd*[T](s: Selector[T]): int = - return s.epollFd.int +proc getFd*[T](s: Selector[T]): cint = s.epollFd diff --git a/chronos/ioselects/ioselectors_kqueue.nim b/chronos/ioselects/ioselectors_kqueue.nim index e346f8220..4ff746e58 100644 --- a/chronos/ioselects/ioselectors_kqueue.nim +++ b/chronos/ioselects/ioselectors_kqueue.nim @@ -6,66 +6,27 @@ # See the file "copying.txt", included in this # distribution, for details about the copyright. # - # This module implements BSD kqueue(). -import posix, times, kqueue +{.push raises: [Defect].} +import std/[kqueue, deques, tables] +import stew/base10 const - # Maximum number of events that can be returned. - MAX_KQUEUE_EVENTS = 64 # SIG_IGN and SIG_DFL declared in posix.nim as variables, but we need them # to be constants and GC-safe. - SIG_DFL = cast[proc(x: cint) {.raises: [],noconv,gcsafe.}](0) - SIG_IGN = cast[proc(x: cint) {.raises: [],noconv,gcsafe.}](1) + SIG_DFL = cast[proc(x: cint) {.raises: [], noconv, gcsafe.}](0) + SIG_IGN = cast[proc(x: cint) {.raises: [], noconv, gcsafe.}](1) -when defined(kqcache): - const CACHE_EVENTS = true +type + SelectorImpl[T] = object + kqFd: cint + fds: Table[int32, SelectorKey[T]] + virtualHoles: Deque[int32] + virtualId: int32 -when defined(macosx) or defined(freebsd) or defined(dragonfly): - when defined(macosx): - const MAX_DESCRIPTORS_ID = 29 # KERN_MAXFILESPERPROC (MacOS) - else: - const MAX_DESCRIPTORS_ID = 27 # KERN_MAXFILESPERPROC (FreeBSD) - proc sysctl(name: ptr cint, namelen: cuint, oldp: pointer, oldplen: ptr csize_t, - newp: pointer, newplen: csize_t): cint - {.importc: "sysctl",header: """#include - #include """} -elif defined(netbsd) or defined(openbsd): - # OpenBSD and NetBSD don't have KERN_MAXFILESPERPROC, so we are using - # KERN_MAXFILES, because KERN_MAXFILES is always bigger, - # than KERN_MAXFILESPERPROC. - const MAX_DESCRIPTORS_ID = 7 # KERN_MAXFILES - proc sysctl(name: ptr cint, namelen: cuint, oldp: pointer, oldplen: ptr csize_t, - newp: pointer, newplen: csize_t): cint - {.importc: "sysctl",header: """#include - #include """} - -when hasThreadSupport: - type - SelectorImpl[T] = object - kqFD: cint - maxFD: int - changes: ptr SharedArray[KEvent] - fds: ptr SharedArray[SelectorKey[T]] - count: int - changesLock: Lock - changesSize: int - changesLength: int - sock: cint - Selector*[T] = ptr SelectorImpl[T] -else: - type - SelectorImpl[T] = object - kqFD: cint - maxFD: int - changes: seq[KEvent] - fds: seq[SelectorKey[T]] - count: int - sock: cint - Selector*[T] = ref SelectorImpl[T] + Selector*[T] = ref SelectorImpl[T] -type SelectEventImpl = object rfd: cint wfd: cint @@ -74,272 +35,338 @@ type # SelectEvent is declared as `ptr` to be placed in `shared memory`, # so you can share one SelectEvent handle between threads. -proc getUnique[T](s: Selector[T]): int {.inline.} = - # we create duplicated handles to get unique indexes for our `fds` array. - result = posix.fcntl(s.sock, F_DUPFD, s.sock) - if result == -1: - raiseIOSelectorsError(osLastError()) - -proc newSelector*[T](): owned(Selector[T]) = - var maxFD = 0.cint - var size = csize_t(sizeof(cint)) - var namearr = [1.cint, MAX_DESCRIPTORS_ID.cint] - # Obtain maximum number of opened file descriptors for process - if sysctl(addr(namearr[0]), 2, cast[pointer](addr maxFD), addr size, - nil, 0) != 0: - raiseIOSelectorsError(osLastError()) - - var kqFD = kqueue() - if kqFD < 0: - raiseIOSelectorsError(osLastError()) - - # we allocating empty socket to duplicate it handle in future, to get unique - # indexes for `fds` array. This is needed to properly identify - # {Event.Timer, Event.Signal, Event.Process} events. - let usock = posix.socket(posix.AF_INET, posix.SOCK_STREAM, - posix.IPPROTO_TCP).cint - if usock == -1: - let err = osLastError() - discard posix.close(kqFD) - raiseIOSelectorsError(err) - - when hasThreadSupport: - result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T]))) - result.fds = allocSharedArray[SelectorKey[T]](maxFD) - result.changes = allocSharedArray[KEvent](MAX_KQUEUE_EVENTS) - result.changesSize = MAX_KQUEUE_EVENTS - initLock(result.changesLock) +proc getVirtualId[T](s: Selector[T]): SelectResult[int32] = + if len(s.virtualHoles) > 0: + ok(s.virtualHoles.popLast()) + else: + if s.virtualId == low(int32): + err(OSErrorCode(EMFILE)) + else: + dec(s.virtualId) + ok(s.virtualId) + +proc isVirtualId(ident: int32): bool = + ident < 0'i32 + +proc toString(key: int32|cint|SocketHandle|int): string = + let fdi32 = when key is int32: key else: int32(key) + if isVirtualId(fdi32): + if fdi32 == -1: + "InvalidIdent" + else: + "V" & Base10.toString(uint32(-fdi32)) else: - result = Selector[T]() - result.fds = newSeq[SelectorKey[T]](maxFD) - result.changes = newSeqOfCap[KEvent](MAX_KQUEUE_EVENTS) - - for i in 0 ..< maxFD: - result.fds[i].ident = InvalidIdent - - result.sock = usock - result.kqFD = kqFD - result.maxFD = maxFD.int - -proc close*[T](s: Selector[T]) = - let res1 = posix.close(s.kqFD) - let res2 = posix.close(s.sock) - when hasThreadSupport: - deinitLock(s.changesLock) - deallocSharedArray(s.fds) - deallocShared(cast[pointer](s)) - if res1 != 0 or res2 != 0: - raiseIOSelectorsError(osLastError()) - -proc newSelectEvent*(): SelectEvent = + Base10.toString(uint32(fdi32)) + +template addKey[T](s: Selector[T], key: int32, skey: SelectorKey[T]) = + if s.fds.hasKeyOrPut(key, skey): + raiseAssert "Descriptor [" & key.toString() & + "] is already registered in the selector!" + +template getKey[T](s: Selector[T], key: int32): SelectorKey[T] = + let + defaultKey = SelectorKey[T](ident: InvalidIdent) + pkey = s.fds.getOrDefault(key, defaultKey) + doAssert(pkey.ident != InvalidIdent, "Descriptor [" & key.toString() & + "] is not registered in the selector!") + pkey + +template checkKey[T](s: Selector[T], key: int32): bool = + s.fds.contains(key) + +proc freeKey[T](s: Selector[T], key: int32) = + s.fds.del(key) + if isVirtualId(key): + s.virtualHoles.addFirst(key) + +template getIdent(event: KEvent): int32 = + doAssert(event.ident <= uint(high(uint32)), + "Invalid event ident value [" & Base10.toString(event.ident) & + "] in the kqueue event object") + cast[int32](uint32(event.ident)) + +template getUdata(event: KEvent): int32 = + let udata = cast[uint](event.udata) + doAssert(event.ident <= uint(high(uint32)), + "Invalid event udata value [" & Base10.toString(udata) & + "] in the kqueue event object with ident [" & + Base10.toString(event.ident) & "]") + cast[int32](uint32(udata)) + +proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] = + let kqFd = + block: + let res = handleEintr(kqueue()) + if res == -1: + return err(osLastError()) + cint(res) + + let selector = Selector[T]( + kqFd: kqFd, + fds: initTable[int32, SelectorKey[T]](asyncInitialSize), + virtualId: -1'i32, # Should start with -1, because `InvalidIdent` == -1 + virtualHoles: initDeque[int32]() + ) + ok(selector) + +proc close2*[T](s: Selector[T]): SelectResult[void] = + s.fds.clear() + s.virtualHoles.clear() + s.virtualId = -1'i32 + if handleEintr(osdefs.close(s.kqFd)) != 0: + err(osLastError()) + else: + ok() + +proc new*(t: typedesc[SelectEvent]): SelectResult[SelectEvent] = var fds: array[2, cint] - if posix.pipe(fds) != 0: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fds[0]) - setNonBlocking(fds[1]) - result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) - result.rfd = fds[0] - result.wfd = fds[1] - -proc trigger*(ev: SelectEvent) = + when declared(pipe2): + if osdefs.pipe2(fds, osdefs.O_NONBLOCK or osdefs.O_CLOEXEC) == -1: + return err(osLastError()) + + var res = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) + res.rfd = fds[0] + res.wfd = fds[1] + ok(res) + else: + if osdefs.pipe(fds) == -1: + return err(osLastError()) + + let res1 = setDescriptorFlags(fds[0], true, true) + if res1.isErr(): + discard closeFd(fds[0]) + discard closeFd(fds[1]) + return err(res1.error()) + let res2 = setDescriptorFlags(fds[1], true, true) + if res2.isErr(): + discard closeFd(fds[0]) + discard closeFd(fds[1]) + return err(res2.error()) + + var res = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) + res.rfd = fds[0] + res.wfd = fds[1] + ok(res) + +proc trigger2*(event: SelectEvent): SelectResult[void] = var data: uint64 = 1 - if posix.write(ev.wfd, addr data, sizeof(uint64)) != sizeof(uint64): - raiseIOSelectorsError(osLastError()) + let res = handleEintr(osdefs.write(event.wfd, addr data, sizeof(uint64))) + if res == -1: + err(osLastError()) + elif res != sizeof(uint64): + err(OSErrorCode(osdefs.EINVAL)) + else: + ok() + +proc close2*(ev: SelectEvent): SelectResult[void] = + let + rfd = ev.rfd + wfd = ev.wfd -proc close*(ev: SelectEvent) = - let res1 = posix.close(ev.rfd) - let res2 = posix.close(ev.wfd) deallocShared(cast[pointer](ev)) - if res1 != 0 or res2 != 0: - raiseIOSelectorsError(osLastError()) - -template checkFd(s, f) = - if f >= s.maxFD: - raiseIOSelectorsError("Maximum number of descriptors is exhausted!") - -when hasThreadSupport: - template withChangeLock[T](s: Selector[T], body: untyped) = - acquire(s.changesLock) - {.locks: [s.changesLock].}: - try: - body - finally: - release(s.changesLock) -else: - template withChangeLock(s, body: untyped) = - body -when hasThreadSupport: - template modifyKQueue[T](s: Selector[T], nident: uint, nfilter: cshort, - nflags: cushort, nfflags: cuint, ndata: int, - nudata: pointer) = - mixin withChangeLock - s.withChangeLock(): - if s.changesLength == s.changesSize: - # if cache array is full, we allocating new with size * 2 - let newSize = s.changesSize shl 1 - let rdata = allocSharedArray[KEvent](newSize) - copyMem(rdata, s.changes, s.changesSize * sizeof(KEvent)) - s.changesSize = newSize - s.changes[s.changesLength] = KEvent(ident: nident, - filter: nfilter, flags: nflags, - fflags: nfflags, data: ndata, - udata: nudata) - inc(s.changesLength) - - when not declared(CACHE_EVENTS): - template flushKQueue[T](s: Selector[T]) = - mixin withChangeLock - s.withChangeLock(): - if s.changesLength > 0: - if kevent(s.kqFD, addr(s.changes[0]), cint(s.changesLength), - nil, 0, nil) == -1: - raiseIOSelectorsError(osLastError()) - s.changesLength = 0 -else: - template modifyKQueue[T](s: Selector[T], nident: uint, nfilter: cshort, - nflags: cushort, nfflags: cuint, ndata: int, - nudata: pointer) = - s.changes.add(KEvent(ident: nident, - filter: nfilter, flags: nflags, - fflags: nfflags, data: ndata, - udata: nudata)) - - when not declared(CACHE_EVENTS): - template flushKQueue[T](s: Selector[T]) = - let length = cint(len(s.changes)) - if length > 0: - if kevent(s.kqFD, addr(s.changes[0]), length, - nil, 0, nil) == -1: - raiseIOSelectorsError(osLastError()) - s.changes.setLen(0) - -proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event], data: T) = - let fdi = int(fd) - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - s.setKey(fdi, events, 0, data) + if closeFd(rfd) != 0: + let errorCode = osLastError() + discard closeFd(wfd) + err(errorCode) + else: + if closeFd(wfd) != 0: + err(osLastError()) + else: + ok() + +template modifyKQueue(changes: var openArray[KEvent], index: int, nident: uint, + nfilter: cshort, nflags: cushort, nfflags: cuint, + ndata: int, nudata: pointer) = + changes[index] = KEvent(ident: nident, filter: nfilter, flags: nflags, + fflags: nfflags, data: ndata, udata: nudata) + +proc registerHandle2*[T](s: Selector[T], fd: cint, events: set[Event], + data: T): SelectResult[void] = + let selectorKey = SelectorKey[T](ident: fd, events: events, + param: 0, data: data) + s.addKey(fd, selectorKey) if events != {}: + var + changes: array[2, KEvent] + k = 0 if Event.Read in events: - modifyKQueue(s, uint(fdi), EVFILT_READ, EV_ADD, 0, 0, nil) - inc(s.count) + changes.modifyKQueue(k, uint(uint32(fd)), EVFILT_READ, EV_ADD, 0, 0, nil) + inc(k) if Event.Write in events: - modifyKQueue(s, uint(fdi), EVFILT_WRITE, EV_ADD, 0, 0, nil) - inc(s.count) - - when not declared(CACHE_EVENTS): - flushKQueue(s) - -proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event]) = - let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, + changes.modifyKQueue(k, uint(uint32(fd)), EVFILT_WRITE, EV_ADD, 0, 0, nil) + inc(k) + if k > 0: + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(k), nil, + 0, nil)) == -1: + s.freeKey(fd) + return err(osLastError()) + ok() + +proc updateHandle2*[T](s: Selector[T], fd: cint, + events: set[Event]): SelectResult[void] = + let EventsMask = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, Event.User, Event.Oneshot, Event.Error} - let fdi = int(fd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, - "Descriptor $# is not registered in the queue!" % $fdi) - doAssert(pkey.events * maskEvents == {}) - - if pkey.events != events: - if (Event.Read in pkey.events) and (Event.Read notin events): - modifyKQueue(s, fdi.uint, EVFILT_READ, EV_DELETE, 0, 0, nil) - dec(s.count) - if (Event.Write in pkey.events) and (Event.Write notin events): - modifyKQueue(s, fdi.uint, EVFILT_WRITE, EV_DELETE, 0, 0, nil) - dec(s.count) - if (Event.Read notin pkey.events) and (Event.Read in events): - modifyKQueue(s, fdi.uint, EVFILT_READ, EV_ADD, 0, 0, nil) - inc(s.count) - if (Event.Write notin pkey.events) and (Event.Write in events): - modifyKQueue(s, fdi.uint, EVFILT_WRITE, EV_ADD, 0, 0, nil) - inc(s.count) - - when not declared(CACHE_EVENTS): - flushKQueue(s) - - pkey.events = events + s.fds.withValue(int32(fd), pkey): + doAssert(pkey[].events * EventsMask == {}, + "Descriptor [" & fd.toString() & "] could not be updated!") + if pkey.events != events: + var + changes: array[4, KEvent] + k = 0 + if (Event.Read in pkey[].events) and (Event.Read notin events): + changes.modifyKQueue(k, uint(uint32(fd)), EVFILT_READ, EV_DELETE, + 0, 0, nil) + inc(k) + if (Event.Write in pkey[].events) and (Event.Write notin events): + changes.modifyKQueue(k, uint(uint32(fd)), EVFILT_WRITE, EV_DELETE, + 0, 0, nil) + inc(k) + if (Event.Read notin pkey[].events) and (Event.Read in events): + changes.modifyKQueue(k, uint(uint32(fd)), EVFILT_READ, EV_ADD, + 0, 0, nil) + inc(k) + if (Event.Write notin pkey[].events) and (Event.Write in events): + changes.modifyKQueue(k, uint(uint32(fd)), EVFILT_WRITE, EV_ADD, + 0, 0, nil) + inc(k) + if k > 0: + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(k), nil, + 0, nil)) == -1: + return err(osLastError()) + pkey[].events = events + do: + raiseAssert "Descriptor [" & fd.toString() & + "] is not registered in the selector!" + ok() proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool, - data: T): int {.discardable.} = - let fdi = getUnique(s) - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - - let events = if oneshot: {Event.Timer, Event.Oneshot} else: {Event.Timer} - let flags: cushort = if oneshot: EV_ONESHOT or EV_ADD else: EV_ADD - - s.setKey(fdi, events, 0, data) + data: T): SelectResult[cint] = + let + fdi32 = ? s.getVirtualId() + events = if oneshot: {Event.Timer, Event.Oneshot} else: {Event.Timer} + flags: cushort = if oneshot: EV_ONESHOT or EV_ADD else: EV_ADD + selectorKey = SelectorKey[T](ident: fdi32, events: events, param: timeout, + data: data) + var changes: array[1, KEvent] + s.addKey(fdi32, selectorKey) # EVFILT_TIMER on Open/Net(BSD) has granularity of only milliseconds, # but MacOS and FreeBSD allow use `0` as `fflags` to use milliseconds # too - modifyKQueue(s, fdi.uint, EVFILT_TIMER, flags, 0, cint(timeout), nil) - - when not declared(CACHE_EVENTS): - flushKQueue(s) - - inc(s.count) - result = fdi + changes.modifyKQueue(0, uint(uint32(fdi32)), EVFILT_TIMER, flags, 0, + cint(timeout), nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: + s.freeKey(fdi32) + return err(osLastError()) + + ok(cint(fdi32)) + +proc blockSignal(signal: int): SelectResult[void] = + var omask, nmask: Sigset + if sigemptyset(nmask) < 0: + return err(osLastError()) + if sigemptyset(omask) < 0: + return err(osLastError()) + if sigaddset(nmask, cint(signal)) < 0: + return err(osLastError()) + ? blockSignals(nmask, omask) + ok() + +proc unblockSignal(signal: int): SelectResult[void] = + var omask, nmask: Sigset + if sigemptyset(nmask) < 0: + return err(osLastError()) + if sigemptyset(omask) < 0: + return err(osLastError()) + if sigaddset(nmask, cint(signal)) < 0: + return err(osLastError()) + ? unblockSignals(nmask, omask) + ok() + +template checkSignal(signal: int) = + doAssert((signal >= 0) and (signal <= int(high(int32))), + "Invalid signal value [" & $signal & "]") proc registerSignal*[T](s: Selector[T], signal: int, - data: T): int {.discardable.} = - let fdi = getUnique(s) - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - - s.setKey(fdi, {Event.Signal}, signal, data) - var nmask, omask: Sigset - discard sigemptyset(nmask) - discard sigemptyset(omask) - discard sigaddset(nmask, cint(signal)) - blockSignals(nmask, omask) - # to be compatible with linux semantic we need to "eat" signals - posix.signal(cint(signal), SIG_IGN) - - modifyKQueue(s, signal.uint, EVFILT_SIGNAL, EV_ADD, 0, 0, - cast[pointer](fdi)) - - when not declared(CACHE_EVENTS): - flushKQueue(s) - - inc(s.count) - result = fdi + data: T): SelectResult[cint] = + checkSignal(signal) + + let + fdi32 = ? s.getVirtualId() + events = {Event.Signal} + selectorKey = SelectorKey[T](ident: fdi32, events: events, + param: signal, data: data) + + var changes: array[1, KEvent] + s.addKey(fdi32, selectorKey) + + let res = blockSignal(signal) + if res.isErr(): + s.freeKey(fdi32) + return err(res.error()) + + # To be compatible with linux semantic we need to "eat" signals + signal(cint(signal), SIG_IGN) + changes.modifyKQueue(0, uint(signal), EVFILT_SIGNAL, EV_ADD, 0, 0, + cast[pointer](uint32(fdi32))) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: + let errorCode = osLastError() + s.freeKey(fdi32) + discard unblockSignal(signal) + return err(errorCode) + + ok(cint(fdi32)) + +template checkPid(pid: int) = + when sizeof(int) == 8: + doAssert(pid >= 0 and pid <= int(high(uint32)), + "Invalid process idientified (pid) value") + else: + doAssert(pid >= 0 and pid <= high(int32), + "Invalid process idientified (pid) value") proc registerProcess*[T](s: Selector[T], pid: int, - data: T): int {.discardable.} = - let fdi = getUnique(s) - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - - var kflags: cushort = EV_ONESHOT or EV_ADD - setKey(s, fdi, {Event.Process, Event.Oneshot}, pid, data) - - modifyKQueue(s, pid.uint, EVFILT_PROC, kflags, NOTE_EXIT, 0, - cast[pointer](fdi)) - - when not declared(CACHE_EVENTS): - flushKQueue(s) - - inc(s.count) - result = fdi - -proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) = - let fdi = ev.rfd.int - doAssert(s.fds[fdi].ident == InvalidIdent, "Event is already registered in the queue!") - setKey(s, fdi, {Event.User}, 0, data) - - modifyKQueue(s, fdi.uint, EVFILT_READ, EV_ADD, 0, 0, nil) - - when not declared(CACHE_EVENTS): - flushKQueue(s) - - inc(s.count) + data: T): SelectResult[cint] = + checkPid(pid) + + let + fdi32 = ? s.getVirtualId() + events = {Event.Process, Event.Oneshot} + flags: cushort = EV_ONESHOT or EV_ADD + selectorKey = SelectorKey[T](ident: fdi32, events: events, + param: pid, data: data) + var changes: array[1, KEvent] + s.addKey(fdi32, selectorKey) + + changes.modifyKQueue(0, uint(uint32(pid)), EVFILT_PROC, flags, NOTE_EXIT, + 0, cast[pointer](uint32(fdi32))) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: + s.freeKey(fdi32) + return err(osLastError()) + + ok(cint(fdi32)) + +proc registerEvent2*[T](s: Selector[T], ev: SelectEvent, + data: T): SelectResult[cint] = + doAssert(not(isNil(ev))) + let + selectorKey = SelectorKey[T](ident: ev.rfd, events: {Event.User}, + param: 0, data: data) + + var changes: array[1, KEvent] + s.addKey(ev.rfd, selectorKey) + + changes.modifyKQueue(0, uint(uint32(ev.rfd)), EVFILT_READ, EV_ADD, 0, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: + s.freeKey(ev.rfd) + return err(osLastError()) + + ok(ev.rfd) template processVnodeEvents(events: set[Event]): cuint = - var rfflags = 0.cuint + var rfflags = cuint(0) if events == {Event.VnodeWrite, Event.VnodeDelete, Event.VnodeExtend, Event.VnodeAttrib, Event.VnodeLink, Event.VnodeRename, Event.VnodeRevoke}: @@ -355,271 +382,329 @@ template processVnodeEvents(events: set[Event]): cuint = if Event.VnodeRevoke in events: rfflags = rfflags or NOTE_REVOKE rfflags -proc registerVnode*[T](s: Selector[T], fd: cint, events: set[Event], data: T) = - let fdi = fd.int - setKey(s, fdi, {Event.Vnode} + events, 0, data) - var fflags = processVnodeEvents(events) +proc registerVnode2*[T](s: Selector[T], fd: cint, events: set[Event], + data: T): SelectResult[cint] = + let + events = {Event.Vnode} + events + fflags = processVnodeEvents(events) + selectorKey = SelectorKey[T](ident: fd, events: events, + param: 0, data: data) + + var changes: array[1, KEvent] + s.addKey(fd, selectorKey) - modifyKQueue(s, fdi.uint, EVFILT_VNODE, EV_ADD or EV_CLEAR, fflags, 0, nil) + changes.modifyKQueue(0, uint(uint32(fd)), EVFILT_VNODE, EV_ADD or EV_CLEAR, + fflags, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: + s.freeKey(fd) + return err(osLastError()) - when not declared(CACHE_EVENTS): - flushKQueue(s) + ok(fd) - inc(s.count) +proc unregister2*[T](s: Selector[T], fd: cint): SelectResult[void] = + let + fdi32 = int32(fd) + pkey = s.getKey(fdi32) -proc unregister*[T](s: Selector[T], fd: int|SocketHandle) = - let fdi = int(fd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, - "Descriptor [" & $fdi & "] is not registered in the queue!") + var changes: array[2, KEvent] + var k = 0 if pkey.events != {}: if pkey.events * {Event.Read, Event.Write} != {}: if Event.Read in pkey.events: - modifyKQueue(s, uint(fdi), EVFILT_READ, EV_DELETE, 0, 0, nil) - dec(s.count) + changes.modifyKQueue(k, uint(uint32(fdi32)), EVFILT_READ, EV_DELETE, + 0, 0, nil) + inc(k) if Event.Write in pkey.events: - modifyKQueue(s, uint(fdi), EVFILT_WRITE, EV_DELETE, 0, 0, nil) - dec(s.count) - when not declared(CACHE_EVENTS): - flushKQueue(s) + changes.modifyKQueue(k, uint(uint32(fdi32)), EVFILT_WRITE, EV_DELETE, + 0, 0, nil) + inc(k) + if k > 0: + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(k), nil, + 0, nil)) == -1: + return err(osLastError()) + elif Event.Timer in pkey.events: if Event.Finished notin pkey.events: - modifyKQueue(s, uint(fdi), EVFILT_TIMER, EV_DELETE, 0, 0, nil) - when not declared(CACHE_EVENTS): - flushKQueue(s) - dec(s.count) - if posix.close(cint(pkey.ident)) != 0: - raiseIOSelectorsError(osLastError()) + changes.modifyKQueue(0, uint(uint32(fdi32)), EVFILT_TIMER, EV_DELETE, + 0, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, + 0, nil)) == -1: + return err(osLastError()) + elif Event.Signal in pkey.events: - var nmask, omask: Sigset - let signal = cint(pkey.param) - discard sigemptyset(nmask) - discard sigemptyset(omask) - discard sigaddset(nmask, signal) - unblockSignals(nmask, omask) - posix.signal(signal, SIG_DFL) - modifyKQueue(s, uint(pkey.param), EVFILT_SIGNAL, EV_DELETE, 0, 0, nil) - when not declared(CACHE_EVENTS): - flushKQueue(s) - dec(s.count) - if posix.close(cint(pkey.ident)) != 0: - raiseIOSelectorsError(osLastError()) + let sig = cint(pkey.param) + osdefs.signal(sig, SIG_DFL) + changes.modifyKQueue(0, uint(uint32(pkey.param)), EVFILT_SIGNAL, + EV_DELETE, 0, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, + 0, nil)) == -1: + discard unblockSignal(sig) + return err(osLastError()) + + ? unblockSignal(sig) + elif Event.Process in pkey.events: if Event.Finished notin pkey.events: - modifyKQueue(s, uint(pkey.param), EVFILT_PROC, EV_DELETE, 0, 0, nil) - when not declared(CACHE_EVENTS): - flushKQueue(s) - dec(s.count) - if posix.close(cint(pkey.ident)) != 0: - raiseIOSelectorsError(osLastError()) + changes.modifyKQueue(0, uint(uint32(pkey.param)), EVFILT_PROC, + EV_DELETE, 0, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, + 0, nil)) == -1: + return err(osLastError()) + elif Event.Vnode in pkey.events: - modifyKQueue(s, uint(fdi), EVFILT_VNODE, EV_DELETE, 0, 0, nil) - when not declared(CACHE_EVENTS): - flushKQueue(s) - dec(s.count) + changes.modifyKQueue(0, uint(uint32(fdi32)), EVFILT_VNODE, EV_DELETE, + 0, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, + 0, nil)) == -1: + return err(osLastError()) + elif Event.User in pkey.events: - modifyKQueue(s, uint(fdi), EVFILT_READ, EV_DELETE, 0, 0, nil) - when not declared(CACHE_EVENTS): - flushKQueue(s) - dec(s.count) - - clearKey(pkey) - -proc unregister*[T](s: Selector[T], ev: SelectEvent) = - let fdi = int(ev.rfd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, "Event is not registered in the queue!") - doAssert(Event.User in pkey.events) - modifyKQueue(s, uint(fdi), EVFILT_READ, EV_DELETE, 0, 0, nil) - when not declared(CACHE_EVENTS): - flushKQueue(s) - clearKey(pkey) - dec(s.count) + changes.modifyKQueue(0, uint(uint32(fdi32)), EVFILT_READ, EV_DELETE, + 0, 0, nil) + if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, + 0, nil)) == -1: + return err(osLastError()) + + s.freeKey(fdi32) + ok() + +proc unregister2*[T](s: Selector[T], event: SelectEvent): SelectResult[void] = + s.unregister2(event.rfd) + +proc prepareKey[T](s: Selector[T], event: KEvent): Opt[ReadyKey] = + let fdi32 = event.getIdent() + + var rkey = ReadyKey(fd: fdi32, events: {}) + var pkey = + case event.filter: + of EVFILT_READ, EVFILT_WRITE, EVFILT_TIMER, EVFILT_VNODE: + s.getKey(fdi32) + of EVFILT_SIGNAL, EVFILT_PROC: + let virtualFd = event.getUdata() + s.getKey(virtualFd) + else: + raiseAssert "Unsupported kqueue filter [" & $event.filter & "] reported!" + + case event.filter + of EVFILT_READ: + if (event.flags and EV_EOF) != 0: + rkey.events.incl(Event.Error) + rkey.errorCode = OSErrorCode(ECONNRESET) + + if Event.User in pkey.events: + var data: uint64 = 0 + if handleEintr(osdefs.read(cint(event.ident), addr data, + sizeof(uint64))) != sizeof(uint64): + let errorCode = osLastError() + if errorCode == EAGAIN: + # Someone already consumed event data + return Opt.none(ReadyKey) + else: + rkey.events.incl(Event.Error) + rkey.errorCode = errorCode + rkey.events.incl(Event.User) + else: + rkey.events.incl(Event.Read) + + of EVFILT_WRITE: + if (event.flags and EV_EOF) != 0: + rkey.events.incl(Event.Error) + rkey.errorCode = OSErrorCode(ECONNRESET) + + rkey.events.incl(Event.Write) + + of EVFILT_TIMER: + rkey.events.incl(Event.Timer) + if Event.Oneshot in pkey.events: + # we are marking key with `Finished` event, to avoid double decrease. + pkey.events.incl(Event.Finished) + rkey.events.incl({Event.Oneshot, Event.Finished}) + s.fds[fdi32] = pkey + + of EVFILT_VNODE: + rkey.events.incl(Event.Vnode) + if (event.fflags and NOTE_DELETE) != 0: rkey.events.incl(Event.VnodeDelete) + if (event.fflags and NOTE_WRITE) != 0: rkey.events.incl(Event.VnodeWrite) + if (event.fflags and NOTE_EXTEND) != 0: rkey.events.incl(Event.VnodeExtend) + if (event.fflags and NOTE_ATTRIB) != 0: rkey.events.incl(Event.VnodeAttrib) + if (event.fflags and NOTE_LINK) != 0: rkey.events.incl(Event.VnodeLink) + if (event.fflags and NOTE_RENAME) != 0: rkey.events.incl(Event.VnodeRename) + if (event.fflags and NOTE_REVOKE) != 0: rkey.events.incl(Event.VnodeRevoke) + + of EVFILT_SIGNAL: + rkey.events.incl(Event.Signal) + rkey.fd = pkey.ident + + of EVFILT_PROC: + rkey.events.incl({Event.Process, Event.Oneshot, Event.Finished}) + rkey.fd = pkey.ident + pkey.events.incl(Event.Finished) + s.fds[int32(pkey.ident)] = pkey -proc selectInto*[T](s: Selector[T], timeout: int, - results: var openArray[ReadyKey]): int = - var - tv: Timespec - resTable: array[MAX_KQUEUE_EVENTS, KEvent] - ptv = addr tv - maxres = MAX_KQUEUE_EVENTS + else: + raiseAssert "Unsupported kqueue filter [" & $event.filter & "] reported!" - verifySelectParams(timeout) + ok(rkey) - if timeout != -1: - if timeout >= 1000: - tv.tv_sec = posix.Time(timeout div 1_000) - tv.tv_nsec = (timeout %% 1_000) * 1_000_000 - else: - tv.tv_sec = posix.Time(0) - tv.tv_nsec = timeout * 1_000_000 - else: - ptv = nil +proc selectInto2*[T](s: Selector[T], timeout: int, + readyKeys: var openArray[ReadyKey] + ): SelectResult[int] = + var + tv: Timespec + queueEvents: array[asyncEventsCount, KEvent] - if maxres > len(results): - maxres = len(results) + verifySelectParams(timeout, -1, high(int)) - var count = 0 - when not declared(CACHE_EVENTS): - count = kevent(s.kqFD, nil, cint(0), addr(resTable[0]), cint(maxres), ptv) - else: - when hasThreadSupport: - s.withChangeLock(): - if s.changesLength > 0: - count = kevent(s.kqFD, addr(s.changes[0]), cint(s.changesLength), - addr(resTable[0]), cint(maxres), ptv) - s.changesLength = 0 + let + ptrTimeout = + if timeout != -1: + if timeout >= 1000: + tv.tv_sec = Time(timeout div 1_000) + tv.tv_nsec = (timeout %% 1_000) * 1_000_000 else: - count = kevent(s.kqFD, nil, cint(0), addr(resTable[0]), cint(maxres), - ptv) - else: - let length = cint(len(s.changes)) - if length > 0: - count = kevent(s.kqFD, addr(s.changes[0]), length, - addr(resTable[0]), cint(maxres), ptv) - s.changes.setLen(0) + tv.tv_sec = Time(0) + tv.tv_nsec = timeout * 1_000_000 + addr tv else: - count = kevent(s.kqFD, nil, cint(0), addr(resTable[0]), cint(maxres), - ptv) - - if count < 0: - result = 0 - let err = osLastError() - if cint(err) != EINTR: - raiseIOSelectorsError(err) - elif count == 0: - result = 0 - else: - var i = 0 - var k = 0 # do not delete this, because `continue` used in cycle. - var pkey: ptr SelectorKey[T] - while i < count: - let kevent = addr(resTable[i]) - var rkey = ReadyKey(fd: int(kevent.ident), events: {}) - - if (kevent.flags and EV_ERROR) != 0: - rkey.events = {Event.Error} - rkey.errorCode = OSErrorCode(kevent.data) - - case kevent.filter: - of EVFILT_READ: - pkey = addr(s.fds[int(kevent.ident)]) - rkey.events.incl(Event.Read) - if Event.User in pkey.events: - var data: uint64 = 0 - if posix.read(cint(kevent.ident), addr data, - sizeof(uint64)) != sizeof(uint64): - let err = osLastError() - if err == OSErrorCode(EAGAIN): - # someone already consumed event data - inc(i) + nil + maxEventsCount = cint(min(asyncEventsCount, len(readyKeys))) + eventsCount = + block: + var res = 0 + while true: + res = kevent(s.kqFd, nil, cint(0), addr(queueEvents[0]), + maxEventsCount, ptrTimeout) + if res < 0: + let errorCode = osLastError() + if errorCode == EINTR: continue - else: - raiseIOSelectorsError(err) - rkey.events = {Event.User} - of EVFILT_WRITE: - pkey = addr(s.fds[int(kevent.ident)]) - rkey.events.incl(Event.Write) - rkey.events = {Event.Write} - of EVFILT_TIMER: - pkey = addr(s.fds[int(kevent.ident)]) - if Event.Oneshot in pkey.events: - # we will not clear key until it will be unregistered, so - # application can obtain data, but we will decrease counter, - # because kqueue is empty. - dec(s.count) - # we are marking key with `Finished` event, to avoid double decrease. - pkey.events.incl(Event.Finished) - rkey.events.incl(Event.Timer) - of EVFILT_VNODE: - pkey = addr(s.fds[int(kevent.ident)]) - rkey.events.incl(Event.Vnode) - if (kevent.fflags and NOTE_DELETE) != 0: - rkey.events.incl(Event.VnodeDelete) - if (kevent.fflags and NOTE_WRITE) != 0: - rkey.events.incl(Event.VnodeWrite) - if (kevent.fflags and NOTE_EXTEND) != 0: - rkey.events.incl(Event.VnodeExtend) - if (kevent.fflags and NOTE_ATTRIB) != 0: - rkey.events.incl(Event.VnodeAttrib) - if (kevent.fflags and NOTE_LINK) != 0: - rkey.events.incl(Event.VnodeLink) - if (kevent.fflags and NOTE_RENAME) != 0: - rkey.events.incl(Event.VnodeRename) - if (kevent.fflags and NOTE_REVOKE) != 0: - rkey.events.incl(Event.VnodeRevoke) - of EVFILT_SIGNAL: - pkey = addr(s.fds[cast[int](kevent.udata)]) - rkey.fd = cast[int](kevent.udata) - rkey.events.incl(Event.Signal) - of EVFILT_PROC: - rkey.fd = cast[int](kevent.udata) - pkey = addr(s.fds[cast[int](kevent.udata)]) - # we will not clear key, until it will be unregistered, so - # application can obtain data, but we will decrease counter, - # because kqueue is empty. - dec(s.count) - # we are marking key with `Finished` event, to avoid double decrease. - pkey.events.incl(Event.Finished) - rkey.events.incl(Event.Process) - else: - doAssert(true, "Unsupported kqueue filter in the queue!") - - if (kevent.flags and EV_EOF) != 0: - # TODO this error handling needs to be rethought. - # `fflags` can sometimes be `0x80000000` and thus we use 'cast' - # here: - if kevent.fflags != 0: - rkey.errorCode = cast[OSErrorCode](kevent.fflags) - else: - # This assumes we are dealing with sockets. - # TODO: For future-proofing it might be a good idea to give the - # user access to the raw `kevent`. - rkey.errorCode = OSErrorCode(ECONNRESET) - rkey.events.incl(Event.Error) + return err(errorCode) + else: + break + res + + var k = 0 + for i in 0 ..< eventsCount: + let rkey = s.prepareKey(queueEvents[i]).valueOr: continue + readyKeys[k] = rkey + inc(k) + + ok(k) + +proc select2*[T](s: Selector[T], + timeout: int): Result[seq[ReadyKey], OSErrorCode] = + var res = newSeq[ReadyKey](asyncEventsCount) + let count = ? selectInto2(s, timeout, res) + res.setLen(count) + ok(res) + +proc newSelector*[T](): owned(Selector[T]) {. + raises: [Defect, IOSelectorsException].} = + let res = Selector.new(T) + if res.isErr(): + raiseIOSelectorsError(res.error()) + res.get() + +proc newSelectEvent*(): SelectEvent {. + raises: [Defect, IOSelectorsException].} = + let res = SelectEvent.new() + if res.isErr(): + raiseIOSelectorsError(res.error()) + res.get() + +proc trigger*(ev: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = ev.trigger2() + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc close*(ev: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = ev.close2() + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc registerHandle*[T](s: Selector[T], fd: cint | SocketHandle, + events: set[Event], data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerHandle2(s, cint(fd), events, data) + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc updateHandle*[T](s: Selector[T], fd: cint | SocketHandle, + events: set[Event]) {. + raises: [Defect, IOSelectorsException].} = + let res = updateHandle2(s, cint(fd), events) + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerEvent2(s, ev, data) + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc registerVnode*[T](s: Selector[T], fd: cint, events: set[Event], data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerVnode2(s, fd, events, data) + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc unregister*[T](s: Selector[T], event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = unregister2(s, event) + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc unregister*[T](s: Selector[T], fd: cint|SocketHandle) {. + raises: [Defect, IOSelectorsException].} = + let res = unregister2(s, fd) + if res.isErr(): + raiseIOSelectorsError(res.error()) - results[k] = rkey - inc(k) - inc(i) - result = k - -proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] = - result = newSeq[ReadyKey](MAX_KQUEUE_EVENTS) - let count = selectInto(s, timeout, result) - result.setLen(count) - -template isEmpty*[T](s: Selector[T]): bool = - (s.count == 0) - -proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} = - let fdi = fd.int - fdi < s.maxFD and s.fds[fd.int].ident != InvalidIdent - -proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool = - let fdi = int(fd) - if fdi in s: - s.fds[fdi].data = data - result = true - -template withData*[T](s: Selector[T], fd: SocketHandle|int, value, - body: untyped) = - let fdi = int(fd) - if fdi in s: - var value = addr(s.fds[fdi].data) +proc selectInto*[T](s: Selector[T], timeout: int, + results: var openArray[ReadyKey]): int {. + raises: [Defect, IOSelectorsException].} = + let res = selectInto2(s, timeout, results) + if res.isErr(): + raiseIOSelectorsError(res.error()) + res.get() + +proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] {. + raises: [Defect, IOSelectorsException].} = + let res = select2(s, timeout) + if res.isErr(): + raiseIOSelectorsError(res.error()) + res.get() + +proc close*[T](s: Selector[T]) {.raises: [Defect, IOSelectorsException].} = + let res = s.close2() + if res.isErr(): + raiseIOSelectorsError(res.error()) + +proc contains*[T](s: Selector[T], fd: SocketHandle|cint): bool {.inline.} = + s.checkKey(int32(fd)) + +proc setData*[T](s: Selector[T], fd: SocketHandle|cint, data: T): bool = + s.fds.withValue(int32(fd), skey): + skey[].data = data + return true + do: + return false + +template withData*[T](s: Selector[T], fd: SocketHandle|cint, value, + body: untyped) = + s.fds.withValue(int32(fd), skey): + var value = addr(skey[].data) body -template withData*[T](s: Selector[T], fd: SocketHandle|int, value, body1, - body2: untyped) = - let fdi = int(fd) - if fdi in s: - var value = addr(s.fds[fdi].data) +template withData*[T](s: Selector[T], fd: SocketHandle|cint, value, body1, + body2: untyped) = + s.fds.withValue(int32(fd), skey): + var value = addr(skey[].data) body1 - else: + do: body2 - -proc getFd*[T](s: Selector[T]): int = - return s.kqFD.int +proc getFd*[T](s: Selector[T]): cint = s.kqFd diff --git a/chronos/ioselects/ioselectors_poll.nim b/chronos/ioselects/ioselectors_poll.nim index 8c2e9f5eb..9ff8ad12e 100644 --- a/chronos/ioselects/ioselectors_poll.nim +++ b/chronos/ioselects/ioselectors_poll.nim @@ -8,31 +8,19 @@ # # This module implements Posix poll(). +import std/tables +import stew/base10 -import posix, times - -# Maximum number of events that can be returned -const MAX_POLL_EVENTS = 64 - -when hasThreadSupport: - type - SelectorImpl[T] = object - maxFD : int - pollcnt: int - fds: ptr SharedArray[SelectorKey[T]] - pollfds: ptr SharedArray[TPollFd] - count: int - lock: Lock - Selector*[T] = ptr SelectorImpl[T] +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} else: - type - SelectorImpl[T] = object - maxFD : int - pollcnt: int - fds: seq[SelectorKey[T]] - pollfds: seq[TPollFd] - count: int - Selector*[T] = ref SelectorImpl[T] + {.push raises: [].} + +type + SelectorImpl[T] = object + fds: Table[int32, SelectorKey[T]] + pollfds: seq[TPollFd] + Selector*[T] = ref SelectorImpl[T] type SelectEventImpl = object @@ -40,271 +28,316 @@ type wfd: cint SelectEvent* = ptr SelectEventImpl -when hasThreadSupport: - template withPollLock[T](s: Selector[T], body: untyped) = - acquire(s.lock) - {.locks: [s.lock].}: - try: - body - finally: - release(s.lock) -else: - template withPollLock(s, body: untyped) = - body +proc toString(key: int32): string = + Base10.toString(uint32(key)) -proc newSelector*[T](): Selector[T] = - var a = RLimit() - if getrlimit(posix.RLIMIT_NOFILE, a) != 0: - raiseIOSelectorsError(osLastError()) - var maxFD = int(a.rlim_max) - - when hasThreadSupport: - result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T]))) - result.maxFD = maxFD - result.fds = allocSharedArray[SelectorKey[T]](maxFD) - result.pollfds = allocSharedArray[TPollFd](maxFD) - initLock(result.lock) - else: - result = Selector[T]() - result.maxFD = maxFD - result.fds = newSeq[SelectorKey[T]](maxFD) - result.pollfds = newSeq[TPollFd](maxFD) +template addKey[T](s: Selector[T], key: int32, skey: SelectorKey[T]) = + if s.fds.hasKeyOrPut(key, skey): + raiseAssert "Descriptor [" & key.toString() & + "] is already registered in the selector!" - for i in 0 ..< maxFD: - result.fds[i].ident = InvalidIdent +template getKey[T](s: Selector[T], key: int32): SelectorKey[T] = + let + defaultKey = SelectorKey[T](ident: InvalidIdent) + pkey = s.fds.getOrDefault(key, defaultKey) + doAssert(pkey.ident != InvalidIdent, + "Descriptor [" & key.toString() & + "] is not registered in the selector!") + pkey + +template checkKey[T](s: Selector[T], key: int32): bool = + s.fds.contains(key) + +proc freeKey[T](s: Selector[T], key: int32) = + s.fds.del(key) + +proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] = + let selector = Selector[T]( + fds: initTable[int32, SelectorKey[T]](asyncInitialSize) + ) + ok(selector) + +proc close2*[T](s: Selector[T]): SelectResult[void] = + s.fds.clear() + s.pollfds.clear() + +proc new*(t: typedesc[SelectEvent]): SelectResult[SelectEvent] = + let flags = {DescriptorFlag.NonBlock, DescriptorFlag.CloseOnExec} + let pipes = ? createOsPipe(flags, flags) + var res = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) + res.rfd = pipes.read + res.wfd = pipes.write + ok(res) + +proc trigger2*(event: SelectEvent): SelectResult[void] = + var data: uint64 = 1 + let res = handleEintr(osdefs.write(event.wfd, addr data, sizeof(uint64))) + if res == -1: + err(osLastError()) + elif res != sizeof(uint64): + err(OSErrorCode(osdefs.EINVAL)) + else: + ok() + +proc close2*(event: SelectEvent): SelectResult[void] = + let + rfd = event.rfd + wfd = event.wfd + deallocShared(cast[pointer](event)) + let rres = handleEintr(osdefs.close(rfd)) + if rres == -1: + discard osdefs.close(wfd) + return err(osLastError()) + let wres = handleEintr(osdefs.close(wfd)) + if wres == -1: + err(osLastError()) + else: + ok() -proc close*[T](s: Selector[T]) = - when hasThreadSupport: - deinitLock(s.lock) - deallocSharedArray(s.fds) - deallocSharedArray(s.pollfds) - deallocShared(cast[pointer](s)) +template toPollEvents(events: set[Event]): cshort = + var res = cshort(0) + if Event.Read in events: res = res or POLLIN + if Event.Write in events: res = res or POLLOUT + res template pollAdd[T](s: Selector[T], sock: cint, events: set[Event]) = - withPollLock(s): - var pollev: cshort = 0 - if Event.Read in events: pollev = pollev or POLLIN - if Event.Write in events: pollev = pollev or POLLOUT - s.pollfds[s.pollcnt].fd = cint(sock) - s.pollfds[s.pollcnt].events = pollev - inc(s.count) - inc(s.pollcnt) + s.pollfds.add(TPollFd(fd: sock, events: toPollEvents(events), revents: 0)) template pollUpdate[T](s: Selector[T], sock: cint, events: set[Event]) = - withPollLock(s): - var i = 0 - var pollev: cshort = 0 - if Event.Read in events: pollev = pollev or POLLIN - if Event.Write in events: pollev = pollev or POLLOUT - - while i < s.pollcnt: - if s.pollfds[i].fd == sock: - s.pollfds[i].events = pollev - break - inc(i) - doAssert(i < s.pollcnt, - "Descriptor [" & $sock & "] is not registered in the queue!") + var updated = false + for mitem in s.pollfds.mitems(): + if mitem.fd == sock: + mitem.events = toPollEvents(events) + break + if not(updated): + raiseAssert "Descriptor [" & $sock & "] is not registered in the queue!" template pollRemove[T](s: Selector[T], sock: cint) = - withPollLock(s): - var i = 0 - while i < s.pollcnt: - if s.pollfds[i].fd == sock: - if i == s.pollcnt - 1: - s.pollfds[i].fd = 0 - s.pollfds[i].events = 0 - s.pollfds[i].revents = 0 - else: - while i < (s.pollcnt - 1): - s.pollfds[i].fd = s.pollfds[i + 1].fd - s.pollfds[i].events = s.pollfds[i + 1].events - inc(i) - break - inc(i) - dec(s.pollcnt) - dec(s.count) - -template checkFd(s, f) = - if f >= s.maxFD: - raiseIOSelectorsError("Maximum number of descriptors is exhausted!") - -proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event], data: T) = - var fdi = int(fd) - s.checkFd(fdi) - doAssert(s.fds[fdi].ident == InvalidIdent) - setKey(s, fdi, events, 0, data) - if events != {}: s.pollAdd(fdi.cint, events) - -proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event]) = - let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, - Event.User, Event.Oneshot, Event.Error} - let fdi = int(fd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, - "Descriptor [" & $fdi & "] is not registered in the queue!") - doAssert(pkey.events * maskEvents == {}) - - if pkey.events != events: - if pkey.events == {}: - s.pollAdd(fd.cint, events) - else: - if events != {}: - s.pollUpdate(fd.cint, events) + let index = + block: + var res = -1 + for key, item in s.pollfds.pairs(): + if item.fd == sock: + res = key + break + res + if index < 0: + raiseAssert "Descriptor [" & $sock & "] is not registered in the queue!" + else: + s.pollfds.del(index) + +proc registerHandle2*[T](s: Selector[T], fd: cint, events: set[Event], + data: T): SelectResult[void] = + let skey = SelectorKey[T](ident: fd, events: events, param: 0, data: data) + + s.addKey(fd, skey) + if events != {}: + s.pollAdd(fd, events) + ok() + +proc updateHandle2*[T](s: Selector[T], fd: cint, + events: set[Event]): SelectResult[void] = + const EventsMask = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, + Event.User, Event.Oneshot, Event.Error} + s.fds.withValue(int32(fd), pkey): + doAssert(pkey[].events * EventsMask == {}, + "Descriptor [" & fd.toString() & "] could not be updated!") + if pkey[].events != events: + if pkey[].events == {}: + s.pollAdd(fd, events) else: - s.pollRemove(fd.cint) - pkey.events = events - -proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) = - var fdi = int(ev.rfd) - doAssert(s.fds[fdi].ident == InvalidIdent, "Event is already registered in the queue!") - var events = {Event.User} - setKey(s, fdi, events, 0, data) - events.incl(Event.Read) - s.pollAdd(fdi.cint, events) - -proc unregister*[T](s: Selector[T], fd: int|SocketHandle) = - let fdi = int(fd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, - "Descriptor [" & $fdi & "] is not registered in the queue!") - pkey.ident = InvalidIdent + if events != {}: + s.pollUpdate(fd, events) + else: + s.pollRemove(fd) + pkey.events = events + do: + raiseAssert "Descriptor [" & fd.toString() & + "] is not registered in the selector!" + ok() + +proc registerEvent2*[T](s: Selector[T], ev: SelectEvent, + data: T): SelectResult[cint] = + doAssert(not(isNil(ev))) + let + key = SelectorKey[T](ident: ev.rfd, events: {Event.User}, + param: 0, data: data) + + s.addKey(ev.rfd, key) + s.pollAdd(ev.rfd, {Event.Read}.toPollEvents()) + ok(ev.rfd) + +proc unregister2*[T](s: Selector[T], fd: cint): SelectResult[void] = + let pkey = s.getKey(fd) if pkey.events != {}: - pkey.events = {} - s.pollRemove(fdi.cint) - -proc unregister*[T](s: Selector[T], ev: SelectEvent) = - let fdi = int(ev.rfd) - s.checkFd(fdi) - var pkey = addr(s.fds[fdi]) - doAssert(pkey.ident != InvalidIdent, "Event is not registered in the queue!") - doAssert(Event.User in pkey.events) - pkey.ident = InvalidIdent - pkey.events = {} - s.pollRemove(fdi.cint) - -proc newSelectEvent*(): SelectEvent = - var fds: array[2, cint] - if posix.pipe(fds) != 0: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fds[0]) - setNonBlocking(fds[1]) - result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) - result.rfd = fds[0] - result.wfd = fds[1] - -proc trigger*(ev: SelectEvent) = - var data: uint64 = 1 - if posix.write(ev.wfd, addr data, sizeof(uint64)) != sizeof(uint64): - raiseIOSelectorsError(osLastError()) + if {Event.Read, Event.Write, Event.User} * pkey.events != {}: + s.pollRemove(fd) + s.freeKey(fd) + ok() + +proc unregister2*[T](s: Selector[T], event: SelectEvent): SelectResult[void] = + s.unregister2(event.rfd) + +proc prepareKey[T](s: Selector[T], event: var TPollfd): Opt[ReadyKey] = + let + defaultKey = SelectorKey[T](ident: InvalidIdent) + fdi32 = int32(event.fd) + revents = event.revents + + var + pkey = s.getKey(fdi32) + rkey = ReadyKey(fd: event.fd) + + # Cleanup all the received events. + event.revents = 0 + + if (revents and POLLIN) != 0: + if Event.User in pkey.events: + var data: uint64 = 0 + let res = handleEintr(osdefs.read(event.fd, addr data, sizeof(uint64))) + if res != sizeof(uint64): + let errorCode = osLastError() + if errorCode == EAGAIN: + return Opt.none(ReadyKey) + else: + rkey.events.incl({Event.User, Event.Error}) + rkey.errorCode = errorCode + else: + rkey.events.incl(Event.User) + else: + rkey.events.incl(Event.Read) + + if (revents and POLLOUT) != 0: + rkey.events.incl(Event.Write) + + if (revents and POLLERR) != 0 or (revents and POLLHUP) != 0 or + (revents and POLLNVAL) != 0: + rkey.events.incl(Event.Error) + + ok(rkey) + +proc selectInto2*[T](s: Selector[T], timeout: int, + readyKeys: var openArray[ReadyKey]): SelectResult[int] = + var k = 0 -proc close*(ev: SelectEvent) = - let res1 = posix.close(ev.rfd) - let res2 = posix.close(ev.wfd) - deallocShared(cast[pointer](ev)) - if res1 != 0 or res2 != 0: - raiseIOSelectorsError(osLastError()) + verifySelectParams(timeout, -1, int(high(cint))) + + let + maxEventsCount = min(len(s.pollfds), len(readyKeys)) + eventsCount = + if maxEventsCount > 0: + let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount), + timeout)) + if res < 0: + return err(osLastError()) + res + else: + 0 + + for i in 0 ..< len(s.pollfds): + if s.pollfds[i].revents != 0: + let rkey = s.prepareKey(s.pollfds[i]).valueOr: continue + readyKeys[k] = rkey + inc(k) + if k == eventsCount: break + + ok(k) + +proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] = + var res = newSeq[ReadyKey](asyncEventsCount) + let count = ? selectInto2(s, timeout, res) + res.setLen(count) + ok(res) + +proc newSelector*[T](): Selector[T] {. + raises: [Defect, OSError].} = + let res = Selector.new(T) + if res.isErr(): raiseOSError(res.error) + res.get() + +proc close*[T](s: Selector[T]) {. + raises: [Defect, IOSelectorsException].} = + let res = s.close2() + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc newSelectEvent*(): SelectEvent {. + raises: [Defect, IOSelectorsException].} = + let res = SelectEvent.new() + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() + +proc trigger*(event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = event.trigger2() + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc close*(event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = event.close2() + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc registerHandle*[T](s: Selector[T], fd: cint | SocketHandle, + events: set[Event], data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerHandle2(s, cint(fd), events, data) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc updateHandle*[T](s: Selector[T], fd: cint | SocketHandle, + events: set[Event]) {. + raises: [Defect, IOSelectorsException].} = + let res = updateHandle2(s, cint(fd), events) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc unregister*[T](s: Selector[T], fd: cint | SocketHandle) {. + raises: [Defect, IOSelectorsException].} = + let res = unregister2(s, cint(fd)) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc unregister*[T](s: Selector[T], event: SelectEvent) {. + raises: [Defect, IOSelectorsException].} = + let res = unregister2(s, event) + if res.isErr(): raiseIOSelectorsError(res.error()) + +proc registerEvent*[T](s: Selector[T], event: SelectEvent, + data: T) {. + raises: [Defect, IOSelectorsException].} = + let res = registerEvent2(s, event, data) + if res.isErr(): raiseIOSelectorsError(res.error()) proc selectInto*[T](s: Selector[T], timeout: int, - results: var openArray[ReadyKey]): int = - var maxres = MAX_POLL_EVENTS - if maxres > len(results): - maxres = len(results) - - verifySelectParams(timeout) - - s.withPollLock(): - let count = posix.poll(addr(s.pollfds[0]), Tnfds(s.pollcnt), timeout) - if count < 0: - result = 0 - let err = osLastError() - if cint(err) != EINTR: - raiseIOSelectorsError(err) - elif count == 0: - result = 0 - else: - var i = 0 - var k = 0 - var rindex = 0 - while (i < s.pollcnt) and (k < count) and (rindex < maxres): - let revents = s.pollfds[i].revents - if revents != 0: - let fd = s.pollfds[i].fd - var pkey = addr(s.fds[fd]) - var rkey = ReadyKey(fd: int(fd), events: {}) - - if (revents and POLLIN) != 0: - rkey.events.incl(Event.Read) - if Event.User in pkey.events: - var data: uint64 = 0 - if posix.read(fd, addr data, sizeof(uint64)) != sizeof(uint64): - let err = osLastError() - if err != OSErrorCode(EAGAIN): - raiseIOSelectorsError(err) - else: - # someone already consumed event data - inc(i) - continue - rkey.events = {Event.User} - if (revents and POLLOUT) != 0: - rkey.events.incl(Event.Write) - if (revents and POLLERR) != 0 or (revents and POLLHUP) != 0 or - (revents and POLLNVAL) != 0: - rkey.events.incl(Event.Error) - results[rindex] = rkey - s.pollfds[i].revents = 0 - inc(rindex) - inc(k) - inc(i) - result = k + readyKeys: var openArray[ReadyKey]): int {. + raises: [Defect, IOSelectorsException].} = + let res = selectInto2(s, timeout, readyKeys) + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] = - result = newSeq[ReadyKey](MAX_POLL_EVENTS) - let count = selectInto(s, timeout, result) - result.setLen(count) - -template isEmpty*[T](s: Selector[T]): bool = - (s.count == 0) - -proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} = - return s.fds[fd.int].ident != InvalidIdent - -proc getData*[T](s: Selector[T], fd: SocketHandle|int): var T = - let fdi = int(fd) - s.checkFd(fdi) - if fdi in s: - result = s.fds[fdi].data - -proc setData*[T](s: Selector[T], fd: SocketHandle|int, data: T): bool = - let fdi = int(fd) - s.checkFd(fdi) - if fdi in s: - s.fds[fdi].data = data - result = true - -template withData*[T](s: Selector[T], fd: SocketHandle|int, value, + let res = select2(s, timeout) + if res.isErr(): raiseIOSelectorsError(res.error()) + res.get() + +proc contains*[T](s: Selector[T], fd: SocketHandle|cint): bool {.inline.} = + s.checkKey(int32(fd)) + +proc setData*[T](s: Selector[T], fd: SocketHandle|cint, data: T): bool = + s.fds.withValue(int32(fd), skey): + skey[].data = data + return true + do: + return false + +template withData*[T](s: Selector[T], fd: SocketHandle|cint, value, body: untyped) = - mixin checkFd - let fdi = int(fd) - s.checkFd(fdi) - if fdi in s: - var value = addr(s.getData(fdi)) + s.fds.withValue(int32(fd), skey): + var value = addr(skey[].data) body -template withData*[T](s: Selector[T], fd: SocketHandle|int, value, body1, +template withData*[T](s: Selector[T], fd: SocketHandle|cint, value, body1, body2: untyped) = - mixin checkFd - let fdi = int(fd) - s.checkFd(fdi) - if fdi in s: - var value = addr(s.getData(fdi)) + s.fds.withValue(int32(fd), skey): + var value = addr(skey[].data) body1 - else: + do: body2 - -proc getFd*[T](s: Selector[T]): int = - return -1 +proc getFd*[T](s: Selector[T]): int = -1 diff --git a/chronos/ioselects/ioselectors_select.nim b/chronos/ioselects/ioselectors_select.nim deleted file mode 100644 index 9a2914f78..000000000 --- a/chronos/ioselects/ioselectors_select.nim +++ /dev/null @@ -1,465 +0,0 @@ -# -# -# Nim's Runtime Library -# (c) Copyright 2016 Eugene Kabanov -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -# This module implements Posix and Windows select(). - -import times, nativesockets - -when defined(windows): - import winlean - when defined(gcc): - {.passl: "-lws2_32".} - elif defined(vcc): - {.passl: "ws2_32.lib".} - const platformHeaders = """#include - #include """ - const EAGAIN = WSAEWOULDBLOCK -else: - const platformHeaders = """#include - #include - #include - #include """ -type - Fdset {.importc: "fd_set", header: platformHeaders, pure, final.} = object -var - FD_SETSIZE {.importc: "FD_SETSIZE", header: platformHeaders.}: cint - -proc IOFD_SET(fd: SocketHandle, fdset: ptr Fdset) - {.cdecl, importc: "FD_SET", header: platformHeaders, inline.} -proc IOFD_CLR(fd: SocketHandle, fdset: ptr Fdset) - {.cdecl, importc: "FD_CLR", header: platformHeaders, inline.} -proc IOFD_ZERO(fdset: ptr Fdset) - {.cdecl, importc: "FD_ZERO", header: platformHeaders, inline.} - -when defined(windows): - proc IOFD_ISSET(fd: SocketHandle, fdset: ptr Fdset): cint - {.stdcall, importc: "FD_ISSET", header: platformHeaders, inline.} - proc ioselect(nfds: cint, readFds, writeFds, exceptFds: ptr Fdset, - timeout: ptr Timeval): cint - {.stdcall, importc: "select", header: platformHeaders.} -else: - proc IOFD_ISSET(fd: SocketHandle, fdset: ptr Fdset): cint - {.cdecl, importc: "FD_ISSET", header: platformHeaders, inline.} - proc ioselect(nfds: cint, readFds, writeFds, exceptFds: ptr Fdset, - timeout: ptr Timeval): cint - {.cdecl, importc: "select", header: platformHeaders.} - -when hasThreadSupport: - type - SelectorImpl[T] = object - rSet: Fdset - wSet: Fdset - eSet: Fdset - maxFD: int - fds: ptr SharedArray[SelectorKey[T]] - count: int - lock: Lock - Selector*[T] = ptr SelectorImpl[T] -else: - type - SelectorImpl[T] = object - rSet: Fdset - wSet: Fdset - eSet: Fdset - maxFD: int - fds: seq[SelectorKey[T]] - count: int - Selector*[T] = ref SelectorImpl[T] - -type - SelectEventImpl = object - rsock: SocketHandle - wsock: SocketHandle - SelectEvent* = ptr SelectEventImpl - -when hasThreadSupport: - template withSelectLock[T](s: Selector[T], body: untyped) = - acquire(s.lock) - {.locks: [s.lock].}: - try: - body - finally: - release(s.lock) -else: - template withSelectLock[T](s: Selector[T], body: untyped) = - body - -proc newSelector*[T](): Selector[T] = - when hasThreadSupport: - result = cast[Selector[T]](allocShared0(sizeof(SelectorImpl[T]))) - result.fds = allocSharedArray[SelectorKey[T]](FD_SETSIZE) - initLock result.lock - else: - result = Selector[T]() - result.fds = newSeq[SelectorKey[T]](FD_SETSIZE) - - for i in 0 ..< FD_SETSIZE: - result.fds[i].ident = InvalidIdent - - IOFD_ZERO(addr result.rSet) - IOFD_ZERO(addr result.wSet) - IOFD_ZERO(addr result.eSet) - -proc close*[T](s: Selector[T]) = - when hasThreadSupport: - deallocSharedArray(s.fds) - deallocShared(cast[pointer](s)) - -when defined(windows): - proc newSelectEvent*(): SelectEvent = - var ssock = createNativeSocket() - var wsock = createNativeSocket() - var rsock: SocketHandle = INVALID_SOCKET - var saddr = Sockaddr_in() - - saddr.sin_family = winlean.AF_INET - saddr.sin_port = 0 - saddr.sin_addr.s_addr = INADDR_ANY - if bindAddr(ssock, cast[ptr SockAddr](addr(saddr)), - sizeof(saddr).SockLen) < 0'i32: - raiseIOSelectorsError(osLastError()) - - if winlean.listen(ssock, 1) != 0: - raiseIOSelectorsError(osLastError()) - - var namelen = sizeof(saddr).SockLen - if getsockname(ssock, cast[ptr SockAddr](addr(saddr)), - addr(namelen)) != 0'i32: - raiseIOSelectorsError(osLastError()) - - saddr.sin_addr.s_addr = 0x0100007F - if winlean.connect(wsock, cast[ptr SockAddr](addr(saddr)), - sizeof(saddr).SockLen) != 0: - raiseIOSelectorsError(osLastError()) - namelen = sizeof(saddr).SockLen - rsock = winlean.accept(ssock, cast[ptr SockAddr](addr(saddr)), - cast[ptr SockLen](addr(namelen))) - if rsock == SocketHandle(-1): - raiseIOSelectorsError(osLastError()) - - if winlean.closesocket(ssock) != 0: - raiseIOSelectorsError(osLastError()) - - var mode = clong(1) - if ioctlsocket(rsock, FIONBIO, addr(mode)) != 0: - raiseIOSelectorsError(osLastError()) - mode = clong(1) - if ioctlsocket(wsock, FIONBIO, addr(mode)) != 0: - raiseIOSelectorsError(osLastError()) - - result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) - result.rsock = rsock - result.wsock = wsock - - proc trigger*(ev: SelectEvent) = - var data: uint64 = 1 - if winlean.send(ev.wsock, cast[pointer](addr data), - cint(sizeof(uint64)), 0) != sizeof(uint64): - raiseIOSelectorsError(osLastError()) - - proc close*(ev: SelectEvent) = - let res1 = winlean.closesocket(ev.rsock) - let res2 = winlean.closesocket(ev.wsock) - deallocShared(cast[pointer](ev)) - if res1 != 0 or res2 != 0: - raiseIOSelectorsError(osLastError()) - -else: - proc newSelectEvent*(): SelectEvent = - var fds: array[2, cint] - if posix.pipe(fds) != 0: - raiseIOSelectorsError(osLastError()) - setNonBlocking(fds[0]) - setNonBlocking(fds[1]) - result = cast[SelectEvent](allocShared0(sizeof(SelectEventImpl))) - result.rsock = SocketHandle(fds[0]) - result.wsock = SocketHandle(fds[1]) - - proc trigger*(ev: SelectEvent) = - var data: uint64 = 1 - if posix.write(cint(ev.wsock), addr data, sizeof(uint64)) != sizeof(uint64): - raiseIOSelectorsError(osLastError()) - - proc close*(ev: SelectEvent) = - let res1 = posix.close(cint(ev.rsock)) - let res2 = posix.close(cint(ev.wsock)) - deallocShared(cast[pointer](ev)) - if res1 != 0 or res2 != 0: - raiseIOSelectorsError(osLastError()) - -proc setSelectKey[T](s: Selector[T], fd: SocketHandle, events: set[Event], - data: T) = - var i = 0 - let fdi = int(fd) - while i < FD_SETSIZE: - if s.fds[i].ident == InvalidIdent: - var pkey = addr(s.fds[i]) - pkey.ident = fdi - pkey.events = events - pkey.data = data - break - inc(i) - if i >= FD_SETSIZE: - raiseIOSelectorsError("Maximum number of descriptors is exhausted!") - -proc getKey[T](s: Selector[T], fd: SocketHandle): ptr SelectorKey[T] = - var i = 0 - let fdi = int(fd) - while i < FD_SETSIZE: - if s.fds[i].ident == fdi: - result = addr(s.fds[i]) - break - inc(i) - doAssert(i < FD_SETSIZE, - "Descriptor [" & $int(fd) & "] is not registered in the queue!") - -proc delKey[T](s: Selector[T], fd: SocketHandle) = - var empty: T - var i = 0 - while i < FD_SETSIZE: - if s.fds[i].ident == fd.int: - s.fds[i].ident = InvalidIdent - s.fds[i].events = {} - s.fds[i].data = empty - break - inc(i) - doAssert(i < FD_SETSIZE, - "Descriptor [" & $int(fd) & "] is not registered in the queue!") - -proc registerHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event], data: T) = - when not defined(windows): - let fdi = int(fd) - s.withSelectLock(): - s.setSelectKey(fd, events, data) - when not defined(windows): - if fdi > s.maxFD: s.maxFD = fdi - if Event.Read in events: - IOFD_SET(fd, addr s.rSet) - inc(s.count) - if Event.Write in events: - IOFD_SET(fd, addr s.wSet) - IOFD_SET(fd, addr s.eSet) - inc(s.count) - -proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) = - when not defined(windows): - let fdi = int(ev.rsock) - s.withSelectLock(): - s.setSelectKey(ev.rsock, {Event.User}, data) - when not defined(windows): - if fdi > s.maxFD: s.maxFD = fdi - IOFD_SET(ev.rsock, addr s.rSet) - inc(s.count) - -proc updateHandle*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event]) = - let maskEvents = {Event.Timer, Event.Signal, Event.Process, Event.Vnode, - Event.User, Event.Oneshot, Event.Error} - s.withSelectLock(): - var pkey = s.getKey(fd) - doAssert(pkey.events * maskEvents == {}) - if pkey.events != events: - if (Event.Read in pkey.events) and (Event.Read notin events): - IOFD_CLR(fd, addr s.rSet) - dec(s.count) - if (Event.Write in pkey.events) and (Event.Write notin events): - IOFD_CLR(fd, addr s.wSet) - IOFD_CLR(fd, addr s.eSet) - dec(s.count) - if (Event.Read notin pkey.events) and (Event.Read in events): - IOFD_SET(fd, addr s.rSet) - inc(s.count) - if (Event.Write notin pkey.events) and (Event.Write in events): - IOFD_SET(fd, addr s.wSet) - IOFD_SET(fd, addr s.eSet) - inc(s.count) - pkey.events = events - -proc unregister*[T](s: Selector[T], fd: SocketHandle|int) = - s.withSelectLock(): - let fd = fd.SocketHandle - var pkey = s.getKey(fd) - if Event.Read in pkey.events or Event.User in pkey.events: - IOFD_CLR(fd, addr s.rSet) - dec(s.count) - if Event.Write in pkey.events: - IOFD_CLR(fd, addr s.wSet) - IOFD_CLR(fd, addr s.eSet) - dec(s.count) - s.delKey(fd) - -proc unregister*[T](s: Selector[T], ev: SelectEvent) = - let fd = ev.rsock - s.withSelectLock(): - var pkey = s.getKey(fd) - IOFD_CLR(fd, addr s.rSet) - dec(s.count) - s.delKey(fd) - -proc selectInto*[T](s: Selector[T], timeout: int, - results: var openArray[ReadyKey]): int = - var tv = Timeval() - var ptv = addr tv - var rset, wset, eset: Fdset - - verifySelectParams(timeout) - - if timeout != -1: - when defined(genode): - tv.tv_sec = Time(timeout div 1_000) - else: - tv.tv_sec = timeout.int32 div 1_000 - tv.tv_usec = (timeout.int32 %% 1_000) * 1_000 - else: - ptv = nil - - s.withSelectLock(): - rset = s.rSet - wset = s.wSet - eset = s.eSet - - var count = ioselect(cint(s.maxFD) + 1, addr(rset), addr(wset), - addr(eset), ptv) - if count < 0: - result = 0 - when defined(windows): - raiseIOSelectorsError(osLastError()) - else: - let err = osLastError() - if cint(err) != EINTR: - raiseIOSelectorsError(err) - elif count == 0: - result = 0 - else: - var rindex = 0 - var i = 0 - var k = 0 - - while (i < FD_SETSIZE) and (k < count): - if s.fds[i].ident != InvalidIdent: - var flag = false - var pkey = addr(s.fds[i]) - var rkey = ReadyKey(fd: int(pkey.ident), events: {}) - let fd = SocketHandle(pkey.ident) - if IOFD_ISSET(fd, addr rset) != 0: - if Event.User in pkey.events: - var data: uint64 = 0 - if recv(fd, cast[pointer](addr(data)), - sizeof(uint64).cint, 0) != sizeof(uint64): - let err = osLastError() - if cint(err) != EAGAIN: - raiseIOSelectorsError(err) - else: - inc(i) - inc(k) - continue - else: - flag = true - rkey.events = {Event.User} - else: - flag = true - rkey.events = {Event.Read} - if IOFD_ISSET(fd, addr wset) != 0: - rkey.events.incl(Event.Write) - if IOFD_ISSET(fd, addr eset) != 0: - rkey.events.incl(Event.Error) - flag = true - if flag: - results[rindex] = rkey - inc(rindex) - inc(k) - inc(i) - result = rindex - -proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] = - result = newSeq[ReadyKey](FD_SETSIZE) - var count = selectInto(s, timeout, result) - result.setLen(count) - -proc flush*[T](s: Selector[T]) = discard - -template isEmpty*[T](s: Selector[T]): bool = - (s.count == 0) - -proc contains*[T](s: Selector[T], fd: SocketHandle|int): bool {.inline.} = - s.withSelectLock(): - result = false - - let fdi = int(fd) - for i in 0..", pure, final.} = object + ssi_signo*: uint32 + ssi_errno*: int32 + ssi_code*: int32 + ssi_pid*: uint32 + ssi_uid*: uint32 + ssi_fd*: int32 + ssi_tid*: uint32 + ssi_band*: uint32 + ssi_overrun*: uint32 + ssi_trapno*: uint32 + ssi_status*: int32 + ssi_int*: int32 + ssi_ptr*: uint64 + ssi_utime*: uint64 + ssi_stime*: uint64 + ssi_addr*: uint64 + pad* {.importc: "__pad".}: array[0..47, uint8] + proc epoll_create*(size: cint): cint {.importc: "epoll_create", header: "", sideEffect.} @@ -933,9 +953,19 @@ elif defined(linux): timeout: cint): cint {. importc: "epoll_wait", header: "", sideEffect.} + proc timerfd_create*(clock_id: ClockId, flags: cint): cint {. + cdecl, importc: "timerfd_create", header: "".} + proc timerfd_settime*(ufd: cint, flags: cint, + utmr: var Itimerspec, otmr: var Itimerspec): cint {. + cdecl, importc: "timerfd_settime", header: "".} + proc eventfd*(count: cuint, flags: cint): cint {. + cdecl, importc: "eventfd", header: "".} + proc signalfd*(fd: cint, mask: var Sigset, flags: cint): cint {. + cdecl, importc: "signalfd", header: "".} + else: - import std/[posix, os] - export posix, os + import std/posix + export posix var IP_MULTICAST_TTL* {.importc: "IP_MULTICAST_TTL", header: "".}: cint diff --git a/chronos/selectors2.nim b/chronos/selectors2.nim index d9ef778cb..bbb52a5d6 100644 --- a/chronos/selectors2.nim +++ b/chronos/selectors2.nim @@ -31,22 +31,32 @@ # support - changes could potentially be backported to nim but are not # backwards-compatible. -import os, nativesockets - -const hasThreadSupport = compileOption("threads") and defined(threadsafe) - -const ioselSupportedPlatform* = defined(macosx) or defined(freebsd) or - defined(netbsd) or defined(openbsd) or - defined(dragonfly) or - (defined(linux) and not defined(android)) - ## This constant is used to determine whether the destination platform is - ## fully supported by ``ioselectors`` module. +import stew/results +import osdefs, osutils +export results + +const + asyncEventsCount* {.intdefine.} = 64 + ## Number of epoll events retrieved by syscall. + asyncInitialSize* {.intdefine.} = 64 + ## Initial size of Selector[T]'s array of file descriptors. + asyncEventEngine* {.strdefine.} = + when defined(linux): + "epoll" + elif defined(macosx) or defined(macos) or defined(ios) or + defined(freebsd) or defined(netbsd) or defined(openbsd) or + defined(dragonfly): + "kqueue" + elif defined(posix): + "poll" + else: + "" + ## Engine type which is going to be used by module. -const bsdPlatform = defined(macosx) or defined(freebsd) or - defined(netbsd) or defined(openbsd) or - defined(dragonfly) + hasThreadSupport = compileOption("threads") when defined(nimdoc): + type Selector*[T] = ref object ## An object which holds descriptors to be checked for read/write status @@ -236,30 +246,16 @@ when defined(nimdoc): ## For *poll* and *select* selectors ``-1`` is returned. else: - import strutils - when hasThreadSupport: - import locks - - type - SharedArray[T] = UncheckedArray[T] - - proc allocSharedArray[T](nsize: int): ptr SharedArray[T] = - result = cast[ptr SharedArray[T]](allocShared0(sizeof(T) * nsize)) + type + IOSelectorsException* = object of CatchableError - proc reallocSharedArray[T](sa: ptr SharedArray[T], nsize: int): ptr SharedArray[T] = - result = cast[ptr SharedArray[T]](reallocShared(sa, sizeof(T) * nsize)) + SelectResult*[T] = Result[T, OSErrorCode] - proc deallocSharedArray[T](sa: ptr SharedArray[T]) = - deallocShared(cast[pointer](sa)) - type Event* {.pure.} = enum Read, Write, Timer, Signal, Process, Vnode, User, Error, Oneshot, Finished, VnodeWrite, VnodeDelete, VnodeExtend, VnodeAttrib, VnodeLink, VnodeRename, VnodeRevoke - type - IOSelectorsException* = object of CatchableError - ReadyKey* = object fd* : int events*: set[Event] @@ -285,78 +281,54 @@ else: var err = newException(IOSelectorsException, msg) raise err - proc setNonBlocking(fd: cint) {.inline.} = - setBlocking(fd.SocketHandle, false) - - when not defined(windows): - import posix - - template setKey(s, pident, pevents, pparam, pdata: untyped) = - var skey = addr(s.fds[pident]) - skey.ident = pident - skey.events = pevents - skey.param = pparam - skey.data = data - - when ioselSupportedPlatform: - template blockSignals(newmask: var Sigset, oldmask: var Sigset) = + when asyncEventEngine in ["epoll", "kqueue"]: + proc blockSignals(newmask: Sigset, + oldmask: var Sigset): Result[void, OSErrorCode] = + var nmask = newmask + # We do this trick just because Nim's posix.nim has declaration like + # this: + # proc pthread_sigmask(a1: cint; a2, a3: var Sigset): cint + # proc sigprocmask*(a1: cint, a2, a3: var Sigset): cint when hasThreadSupport: - if posix.pthread_sigmask(SIG_BLOCK, newmask, oldmask) == -1: - raiseIOSelectorsError(osLastError()) + if pthread_sigmask(SIG_BLOCK, nmask, oldmask) == -1: + err(osLastError()) + else: + ok() else: - if posix.sigprocmask(SIG_BLOCK, newmask, oldmask) == -1: - raiseIOSelectorsError(osLastError()) - - template unblockSignals(newmask: var Sigset, oldmask: var Sigset) = + if sigprocmask(SIG_BLOCK, nmask, oldmask) == -1: + err(osLastError()) + else: + ok() + + proc unblockSignals(newmask: Sigset, + oldmask: var Sigset): Result[void, OSErrorCode] = + # We do this trick just because Nim's posix.nim has declaration like + # this: + # proc pthread_sigmask(a1: cint; a2, a3: var Sigset): cint + # proc sigprocmask*(a1: cint, a2, a3: var Sigset): cint + var nmask = newmask when hasThreadSupport: - if posix.pthread_sigmask(SIG_UNBLOCK, newmask, oldmask) == -1: - raiseIOSelectorsError(osLastError()) + if pthread_sigmask(SIG_UNBLOCK, nmask, oldmask) == -1: + err(osLastError()) + else: + ok() else: - if posix.sigprocmask(SIG_UNBLOCK, newmask, oldmask) == -1: - raiseIOSelectorsError(osLastError()) + if sigprocmask(SIG_UNBLOCK, nmask, oldmask) == -1: + err(osLastError()) + else: + ok() - template clearKey[T](key: ptr SelectorKey[T]) = - var empty: T - key.ident = InvalidIdent - key.events = {} - key.data = empty - - proc verifySelectParams(timeout: int) = + template verifySelectParams(timeout, min, max: int) = # Timeout of -1 means: wait forever # Anything higher is the time to wait in milliseconds. - doAssert(timeout >= -1, "Cannot select with a negative value, got " & $timeout) - - when defined(linux): - include ./ioselects/ioselectors_epoll - elif bsdPlatform: - include ./ioselects/ioselectors_kqueue - elif defined(windows): - include ./ioselects/ioselectors_select - elif defined(solaris): - include ./ioselects/ioselectors_poll # need to replace it with event ports - elif defined(genode): - include ./ioselects/ioselectors_select # TODO: use the native VFS layer - elif defined(nintendoswitch): - include ./ioselects/ioselectors_select - else: - include ./ioselects/ioselectors_poll - -proc register*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event], data: T) {.deprecated: "use registerHandle instead".} = - ## **Deprecated since v0.18.0:** Use ``registerHandle`` instead. - s.registerHandle(fd, events, data) - -proc setEvent*(ev: SelectEvent) {.deprecated: "use trigger instead", - raises: [Defect, IOSelectorsException].} = - ## Trigger event ``ev``. - ## - ## **Deprecated since v0.18.0:** Use ``trigger`` instead. - ev.trigger() - -proc update*[T](s: Selector[T], fd: int | SocketHandle, - events: set[Event]) {.deprecated: "use updateHandle instead".} = - ## Update file/socket descriptor ``fd``, registered in selector - ## ``s`` with new events set ``event``. - ## - ## **Deprecated since v0.18.0:** Use ``updateHandle`` instead. - s.updateHandle() + doAssert((timeout >= min) and (timeout <= max), + "Cannot select with incorrect timeout value, got " & $timeout) + +when asyncEventEngine == "epoll": + include ./ioselects/ioselectors_epoll +elif asyncEventEngine == "kqueue": + include ./ioselects/ioselectors_kqueue +elif asyncEventEngine == "poll": + include ./ioselects/ioselectors_poll +else: + {.fatal: "Event engine `" & asyncEventEngine & "` is not supported!".} From 229de5f842dd38a42a55d7dc4cedf38b5d034b63 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 31 Mar 2023 07:35:04 +0200 Subject: [PATCH 005/146] Compile-time configuration (#371) This PR moves all compile-time configuration to a single module, simplifying documentation and access to these features. Upcomfing features may be enabled either individually, or through a new `chronosPreviewV4` catch-all designed to allow code to be prepared for increased strictness in future chronos releases. `-d:chronosDebug` may be used to enable the existing debugging helpers together. --- README.md | 6 +++++ chronos.nimble | 7 ++--- chronos/asyncfutures2.nim | 42 ++++++++++++++++------------- chronos/asyncloop.nim | 6 ++--- chronos/asyncmacro2.nim | 8 +++--- chronos/config.nim | 57 +++++++++++++++++++++++++++++++++++++++ chronos/debugutils.nim | 8 +++--- tests/testaddress.nim | 2 +- tests/testasyncstream.nim | 2 +- tests/testbugs.nim | 2 +- tests/testdatagram.nim | 2 +- tests/testfut.nim | 2 +- tests/testhttpclient.nim | 2 +- tests/testhttpserver.nim | 2 +- tests/testmacro.nim | 2 +- tests/testnet.nim | 2 +- tests/testratelimit.nim | 4 ++- tests/testserver.nim | 2 +- tests/testshttpserver.nim | 2 +- tests/testsignal.nim | 2 +- tests/testsoon.nim | 2 +- tests/teststream.nim | 2 +- tests/testsync.nim | 2 +- tests/testtime.nim | 2 +- tests/testutils.nim | 12 ++++----- 25 files changed, 126 insertions(+), 56 deletions(-) create mode 100644 chronos/config.nim diff --git a/README.md b/README.md index c63d2542f..c0cc2309f 100644 --- a/README.md +++ b/README.md @@ -333,6 +333,12 @@ Known `async` backends include: ``none`` can be used when a library supports both a synchronous and asynchronous API, to disable the latter. +### Compile-time configuration + +`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant. + +Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information. + ## TODO * Pipe/Subprocess Transports. * Multithreading Stream/Datagram servers diff --git a/chronos.nimble b/chronos.nimble index 18f7a47d8..dfb343c70 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -32,10 +32,11 @@ proc run(args, path: string) = task test, "Run all tests": for args in [ - "-d:useSysAssert -d:useGcAssert", - "-d:chronosStackTrace -d:chronosStrictException", + "-d:debug -d:chronosDebug", + "-d:debug -d:chronosPreviewV4", + "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", "-d:release", - "-d:release -d:chronosFutureTracking", + "-d:release -d:chronosPreviewV4", ]: run args, "tests/testall" task test_libbacktrace, "test with libbacktrace": diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 249bb9811..bac4ba113 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -10,7 +10,7 @@ import std/sequtils import stew/base10 -import ./srcloc +import "."/[config, srcloc] export srcloc when defined(nimHasStacktracesModule): @@ -24,7 +24,7 @@ const LocCreateIndex* = 0 LocCompleteIndex* = 1 -when defined(chronosStackTrace): +when chronosStackTrace: type StackTrace = string type @@ -41,11 +41,11 @@ type mustCancel*: bool id*: uint - when defined(chronosStackTrace): + when chronosStackTrace: errorStackTrace*: StackTrace stackTrace: StackTrace ## For debugging purposes only. - when defined(chronosFutureTracking): + when chronosFutureTracking: next*: FutureBase prev*: FutureBase @@ -54,7 +54,7 @@ type # How much refactoring is needed to make this a regular non-ref type? # Obviously, it will still be allocated on the heap when necessary. Future*[T] = ref object of FutureBase ## Typed future. - when defined(chronosStrictException): + when chronosStrictException: closure*: iterator(f: Future[T]): FutureBase {.raises: [Defect, CatchableError], gcsafe.} else: closure*: iterator(f: Future[T]): FutureBase {.raises: [Defect, CatchableError, Exception], gcsafe.} @@ -80,23 +80,27 @@ type tail*: FutureBase count*: uint -var currentID* {.threadvar.}: uint -currentID = 0'u +when chronosFutureId: + var currentID* {.threadvar.}: uint +else: + template id*(f: FutureBase): uint = + cast[uint](addr f[]) -when defined(chronosFutureTracking): +when chronosFutureTracking: var futureList* {.threadvar.}: FutureList futureList = FutureList() template setupFutureBase(loc: ptr SrcLoc) = new(result) - currentID.inc() result.state = FutureState.Pending - when defined(chronosStackTrace): + when chronosStackTrace: result.stackTrace = getStackTrace() - result.id = currentID + when chronosFutureId: + currentID.inc() + result.id = currentID result.location[LocCreateIndex] = loc - when defined(chronosFutureTracking): + when chronosFutureTracking: result.next = nil result.prev = futureList.tail if not(isNil(futureList.tail)): @@ -160,7 +164,7 @@ proc done*(future: FutureBase): bool {.inline.} = ## This is an alias for ``completed(future)`` procedure. completed(future) -when defined(chronosFutureTracking): +when chronosFutureTracking: proc futureDestructor(udata: pointer) = ## This procedure will be called when Future[T] got finished, cancelled or ## failed and all Future[T].callbacks are already scheduled and processed. @@ -188,7 +192,7 @@ proc checkFinished(future: FutureBase, loc: ptr SrcLoc) = msg.add("\n " & $future.location[LocCompleteIndex]) msg.add("\n Second completion location:") msg.add("\n " & $loc) - when defined(chronosStackTrace): + when chronosStackTrace: msg.add("\n Stack trace to moment of creation:") msg.add("\n" & indent(future.stackTrace.strip(), 4)) msg.add("\n Stack trace to moment of secondary completion:") @@ -212,7 +216,7 @@ proc finish(fut: FutureBase, state: FutureState) = item = default(AsyncCallback) # release memory as early as possible fut.callbacks = default(seq[AsyncCallback]) # release seq as well - when defined(chronosFutureTracking): + when chronosFutureTracking: scheduleDestructor(fut) proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = @@ -240,7 +244,7 @@ proc fail[T](future: Future[T], error: ref CatchableError, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(FutureBase(future), loc) future.error = error - when defined(chronosStackTrace): + when chronosStackTrace: future.errorStackTrace = if getStackTrace(error) == "": getStackTrace() else: @@ -258,7 +262,7 @@ proc cancelAndSchedule(future: FutureBase, loc: ptr SrcLoc) = if not(future.finished()): checkFinished(future, loc) future.error = newCancelledError() - when defined(chronosStackTrace): + when chronosStackTrace: future.errorStackTrace = getStackTrace() future.finish(FutureState.Cancelled) @@ -472,7 +476,7 @@ proc `$`(stackTraceEntries: seq[StackTraceEntry]): string = return exc.msg # Shouldn't actually happen since we set the formatting # string -when defined(chronosStackTrace): +when chronosStackTrace: proc injectStacktrace(future: FutureBase) = const header = "\nAsync traceback:\n" @@ -500,7 +504,7 @@ proc internalCheckComplete*(fut: FutureBase) {. raises: [Defect, CatchableError].} = # For internal use only. Used in asyncmacro if not(isNil(fut.error)): - when defined(chronosStackTrace): + when chronosStackTrace: injectStacktrace(fut) raise fut.error diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index ca1655433..abf3edcc2 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -14,9 +14,9 @@ else: {.push raises: [].} from nativesockets import Port -import std/[tables, strutils, heapqueue, lists, options, deques] +import std/[tables, strutils, heapqueue, options, deques] import stew/results -import "."/[osdefs, osutils, timer] +import "."/[config, osdefs, osutils, timer] export Port export timer, results @@ -1263,7 +1263,7 @@ proc getTracker*(id: string): TrackerBase = let loop = getThreadDispatcher() result = loop.trackers.getOrDefault(id, nil) -when defined(chronosFutureTracking): +when chronosFutureTracking: iterator pendingFutures*(): FutureBase = ## Iterates over the list of pending Futures (Future[T] objects which not ## yet completed, cancelled or failed). diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index 61bf3b51d..3fb0bb719 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -193,12 +193,12 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = # here the possibility of transporting more specific error types here # for example by casting exceptions coming out of `await`.. let raises = nnkBracket.newTree() - when not defined(chronosStrictException): - raises.add(newIdentNode("Exception")) - else: + when chronosStrictException: raises.add(newIdentNode("CatchableError")) when (NimMajor, NimMinor) < (1, 4): raises.add(newIdentNode("Defect")) + else: + raises.add(newIdentNode("Exception")) closureIterator.addPragma(nnkExprColonExpr.newTree( newIdentNode("raises"), @@ -328,5 +328,5 @@ macro async*(prc: untyped): untyped = result.add asyncSingleProc(oneProc) else: result = asyncSingleProc(prc) - when defined(nimDumpAsync): + when chronosDumpAsync: echo repr result diff --git a/chronos/config.nim b/chronos/config.nim new file mode 100644 index 000000000..abc9c375f --- /dev/null +++ b/chronos/config.nim @@ -0,0 +1,57 @@ +## Compile-time configuration options for chronos that control the availability +## of various strictness and debuggability options. In general, debug helpers +## are enabled when `debug` is defined while strictness options are introduced +## in transition periods leading up to a breaking release that starts enforcing +## them and removes the option. +## +## `chronosPreviewV4` is a preview flag to enable v4 semantics - in particular, +## it enables strict exception checking and disables parts of the deprecated +## API and other changes being prepared for the upcoming release +## +## `chronosDebug` can be defined to enable several debugging helpers that come +## with a runtime cost - it is recommeneded to not enable these in production +## code. +when (NimMajor, NimMinor) >= (1, 4): + const + chronosStrictException* {.booldefine.}: bool = defined(chronosPreviewV4) + ## Require that `async` code raises only derivatives of `CatchableError` and + ## not `Exception` - forward declarations, methods and `proc` types used + ## from within `async` code may need to be be explicitly annotated with + ## `raises: [CatchableError]` when this mode is enabled. + + chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug) + ## Include stack traces in futures for creation and completion points + + chronosFutureId* {.booldefine.}: bool = defined(chronosDebug) + ## Generate a unique `id` for every future - when disabled, the address of + ## the future will be used instead + + chronosFutureTracking* {.booldefine.}: bool = defined(chronosDebug) + ## Keep track of all pending futures and allow iterating over them - + ## useful for detecting hung tasks + + chronosDumpAsync* {.booldefine.}: bool = defined(nimDumpAsync) + ## Print code generated by {.async.} transformation +else: + # 1.2 doesn't support `booldefine` in `when` properly + const + chronosStrictException*: bool = + defined(chronosPreviewV4) or defined(chronosStrictException) + chronosStackTrace*: bool = defined(chronosDebug) or defined(chronosStackTrace) + chronosFutureId*: bool = defined(chronosDebug) or defined(chronosFutureId) + chronosFutureTracking*: bool = + defined(chronosDebug) or defined(chronosFutureTracking) + chronosDumpAsync*: bool = defined(nimDumpAsync) + +when defined(debug) or defined(chronosConfig): + import std/macros + + static: + hint("Chronos configuration:") + template printOption(name: string, value: untyped) = + hint(name & ": " & $value) + printOption("chronosStrictException", chronosStrictException) + printOption("chronosStackTrace", chronosStackTrace) + printOption("chronosFutureId", chronosFutureId) + printOption("chronosFutureTracking", chronosFutureTracking) + printOption("chronosDumpAsync", chronosDumpAsync) diff --git a/chronos/debugutils.nim b/chronos/debugutils.nim index 451189a39..17d6412bc 100644 --- a/chronos/debugutils.nim +++ b/chronos/debugutils.nim @@ -12,10 +12,10 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import ./asyncloop +import "."/[asyncloop, config] export asyncloop -when defined(chronosFutureTracking): +when chronosFutureTracking: import stew/base10 const @@ -34,7 +34,7 @@ proc dumpPendingFutures*(filter = AllFutureStates): string = ## not yet finished). ## 2. Future[T] objects with ``FutureState.Finished/Cancelled/Failed`` state ## which callbacks are scheduled, but not yet fully processed. - when defined(chronosFutureTracking): + when chronosFutureTracking: var count = 0'u var res = "" for item in pendingFutures(): @@ -62,7 +62,7 @@ proc pendingFuturesCount*(filter: set[FutureState]): uint = ## ## If ``filter`` is equal to ``AllFutureStates`` Operation's complexity is ## O(1), otherwise operation's complexity is O(n). - when defined(chronosFutureTracking): + when chronosFutureTracking: if filter == AllFutureStates: pendingFuturesCount() else: diff --git a/tests/testaddress.nim b/tests/testaddress.nim index 040fc2880..e505ff515 100644 --- a/tests/testaddress.nim +++ b/tests/testaddress.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} suite "TransportAddress test suite": test "initTAddress(string)": diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index b8c729bb4..fd581cb9a 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -10,7 +10,7 @@ import bearssl/[x509] import ../chronos import ../chronos/streams/[tlsstream, chunkstream, boundstream] -when defined(nimHasUsed): {.used.} +{.used.} # To create self-signed certificate and key you can use openssl # openssl req -new -x509 -sha256 -newkey rsa:2048 -nodes \ diff --git a/tests/testbugs.nim b/tests/testbugs.nim index d31ea994e..19a8edbac 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} suite "Asynchronous issues test suite": const HELLO_PORT = 45679 diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index 149ce9d19..1eea48958 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -9,7 +9,7 @@ import std/[strutils, net] import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} suite "Datagram Transport test suite": const diff --git a/tests/testfut.nim b/tests/testfut.nim index 23015b979..fa250a19b 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos, ../chronos/unittest2/asynctests -when defined(nimHasUsed): {.used.} +{.used.} suite "Future[T] behavior test suite": proc testFuture1(): Future[int] {.async.} = diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 15f77d3f8..e04e2ab8c 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -10,7 +10,7 @@ import unittest2 import ../chronos, ../chronos/apps/http/[httpserver, shttpserver, httpclient] import stew/base10 -when defined(nimHasUsed): {.used.} +{.used.} # To create self-signed certificate and key you can use openssl # openssl req -new -x509 -sha256 -newkey rsa:2048 -nodes \ diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index b2000400c..8b71ca2e8 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -11,7 +11,7 @@ import ../chronos, ../chronos/apps/http/httpserver, ../chronos/apps/http/httpcommon import stew/base10 -when defined(nimHasUsed): {.used.} +{.used.} suite "HTTP server testing suite": type diff --git a/tests/testmacro.nim b/tests/testmacro.nim index e6c98125c..f50015fb1 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -9,7 +9,7 @@ import unittest2 import macros import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} type RetValueType = proc(n: int): Future[int] {.async.} diff --git a/tests/testnet.nim b/tests/testnet.nim index ff784293d..419195d83 100644 --- a/tests/testnet.nim +++ b/tests/testnet.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos/transports/[osnet, ipnet] -when defined(nimHasUsed): {.used.} +{.used.} suite "Network utilities test suite": diff --git a/tests/testratelimit.nim b/tests/testratelimit.nim index 32b8e95dc..4c78664d9 100644 --- a/tests/testratelimit.nim +++ b/tests/testratelimit.nim @@ -6,7 +6,9 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import unittest +{.used.} + +import unittest2 import ../chronos import ../chronos/ratelimit diff --git a/tests/testserver.nim b/tests/testserver.nim index 8828bb227..e7e834e2d 100644 --- a/tests/testserver.nim +++ b/tests/testserver.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} suite "Server's test suite": type diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index 596a8daad..4b55b2701 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -10,7 +10,7 @@ import unittest2 import ../chronos, ../chronos/apps/http/shttpserver import stew/base10 -when defined(nimHasUsed): {.used.} +{.used.} # To create self-signed certificate and key you can use openssl # openssl req -new -x509 -sha256 -newkey rsa:2048 -nodes \ diff --git a/tests/testsignal.nim b/tests/testsignal.nim index 1163a8cc8..5eca5a94f 100644 --- a/tests/testsignal.nim +++ b/tests/testsignal.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} when not defined(windows): import posix diff --git a/tests/testsoon.nim b/tests/testsoon.nim index 8b76113b4..69bffd41a 100644 --- a/tests/testsoon.nim +++ b/tests/testsoon.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} suite "callSoon() tests suite": const CallSoonTests = 10 diff --git a/tests/teststream.nim b/tests/teststream.nim index e0ee9f3d9..90fd55de2 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -9,7 +9,7 @@ import std/[strutils, os] import unittest2 import ".."/chronos, ".."/chronos/osdefs -when defined(nimHasUsed): {.used.} +{.used.} when defined(windows): proc get_osfhandle*(fd: FileHandle): HANDLE {. diff --git a/tests/testsync.nim b/tests/testsync.nim index 9acea50d5..4e0968229 100644 --- a/tests/testsync.nim +++ b/tests/testsync.nim @@ -8,7 +8,7 @@ import unittest2 import ../chronos -when defined(nimHasUsed): {.used.} +{.used.} suite "Asynchronous sync primitives test suite": var testLockResult {.threadvar.}: string diff --git a/tests/testtime.nim b/tests/testtime.nim index 430db8f8b..aac926efd 100644 --- a/tests/testtime.nim +++ b/tests/testtime.nim @@ -9,7 +9,7 @@ import std/os import unittest2 import ../chronos, ../chronos/timer -when defined(nimHasUsed): {.used.} +{.used.} static: doAssert Moment.high - Moment.low == Duration.high diff --git a/tests/testutils.nim b/tests/testutils.nim index 67a59ab3b..f45819007 100644 --- a/tests/testutils.nim +++ b/tests/testutils.nim @@ -6,12 +6,12 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import unittest2 -import ../chronos +import ../chronos, ../chronos/config -when defined(nimHasUsed): {.used.} +{.used.} suite "Asynchronous utilities test suite": - when defined(chronosFutureTracking): + when chronosFutureTracking: proc getCount(): uint = # This procedure counts number of Future[T] in double-linked list via list # iteration. @@ -21,7 +21,7 @@ suite "Asynchronous utilities test suite": res test "Future clean and leaks test": - when defined(chronosFutureTracking): + when chronosFutureTracking: if pendingFuturesCount(WithoutFinished) == 0'u: if pendingFuturesCount(OnlyFinished) > 0'u: poll() @@ -33,7 +33,7 @@ suite "Asynchronous utilities test suite": skip() test "FutureList basics test": - when defined(chronosFutureTracking): + when chronosFutureTracking: var fut1 = newFuture[void]() check: getCount() == 1'u @@ -65,7 +65,7 @@ suite "Asynchronous utilities test suite": skip() test "FutureList async procedure test": - when defined(chronosFutureTracking): + when chronosFutureTracking: proc simpleProc() {.async.} = await sleepAsync(10.milliseconds) From ab5a8c2e0f6941fe3debd61dff0293790079d1b0 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Mon, 3 Apr 2023 14:34:35 +0200 Subject: [PATCH 006/146] Add `localAddress` support to `stream.connect` (#362) * Add `localAddress` support to `stream.connect` * fix windows * TransportAddress() instead of AnyAddress * tweak flags * Better flags * try to workaround nim 1.2 issue * Handle ReusePort in createStreamServer and improve tests * Rename ClientFlags to SocketFlags --------- Co-authored-by: Diego --- chronos/transports/stream.nim | 92 +++++++++++++++++++++++++++++++++-- tests/testasyncstream.nim | 2 +- tests/teststream.nim | 43 ++++++++++++++++ 3 files changed, 131 insertions(+), 6 deletions(-) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index ef9641698..2c74085a8 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -50,7 +50,12 @@ type # get stuck on transport `close()`. # Please use this flag only if you are making both client and server in # the same thread. - TcpNoDelay + TcpNoDelay # deprecated: Use SocketFlags.TcpNoDelay + + SocketFlags* {.pure.} = enum + TcpNoDelay, + ReuseAddr, + ReusePort StreamTransportTracker* = ref object of TrackerBase @@ -699,7 +704,9 @@ when defined(windows): proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, child: StreamTransport = nil, - flags: set[TransportFlags] = {}): Future[StreamTransport] = + localAddress = TransportAddress(), + flags: set[SocketFlags] = {}, + ): Future[StreamTransport] = ## Open new connection to remote peer with address ``address`` and create ## new transport object ``StreamTransport`` for established connection. ## ``bufferSize`` is size of internal buffer for transport. @@ -724,7 +731,35 @@ when defined(windows): retFuture.fail(getTransportOsError(osLastError())) return retFuture - if not(bindToDomain(sock, raddress.getDomain())): + if SocketFlags.ReuseAddr in flags: + if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEADDR, 1)): + let err = osLastError() + sock.closeSocket() + retFuture.fail(getTransportOsError(err)) + return retFuture + if SocketFlags.ReusePort in flags: + if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEPORT, 1)): + let err = osLastError() + sock.closeSocket() + retFuture.fail(getTransportOsError(err)) + return retFuture + + if localAddress != TransportAddress(): + if localAddress.family != address.family: + sock.closeSocket() + retFuture.fail(newException(TransportOsError, + "connect local address domain is not equal to target address domain")) + return retFuture + var + localAddr: Sockaddr_storage + localAddrLen: SockLen + localAddress.toSAddr(localAddr, localAddrLen) + if bindSocket(SocketHandle(sock), + cast[ptr SockAddr](addr localAddr), localAddrLen) != 0: + sock.closeSocket() + retFuture.fail(getTransportOsError(osLastError())) + return retFuture + elif not(bindToDomain(sock, raddress.getDomain())): let err = wsaGetLastError() sock.closeSocket() retFuture.fail(getTransportOsError(err)) @@ -1496,7 +1531,9 @@ else: proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, child: StreamTransport = nil, - flags: set[TransportFlags] = {}): Future[StreamTransport] = + localAddress = TransportAddress(), + flags: set[SocketFlags] = {}, + ): Future[StreamTransport] = ## Open new connection to remote peer with address ``address`` and create ## new transport object ``StreamTransport`` for established connection. ## ``bufferSize`` - size of internal buffer for transport. @@ -1523,12 +1560,40 @@ else: return retFuture if address.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - if TransportFlags.TcpNoDelay in flags: + if SocketFlags.TcpNoDelay in flags: if not(setSockOpt(sock, osdefs.IPPROTO_TCP, osdefs.TCP_NODELAY, 1)): let err = osLastError() sock.closeSocket() retFuture.fail(getTransportOsError(err)) return retFuture + if SocketFlags.ReuseAddr in flags: + if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEADDR, 1)): + let err = osLastError() + sock.closeSocket() + retFuture.fail(getTransportOsError(err)) + return retFuture + if SocketFlags.ReusePort in flags: + if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEPORT, 1)): + let err = osLastError() + sock.closeSocket() + retFuture.fail(getTransportOsError(err)) + return retFuture + + if localAddress != TransportAddress(): + if localAddress.family != address.family: + sock.closeSocket() + retFuture.fail(newException(TransportOsError, + "connect local address domain is not equal to target address domain")) + return retFuture + var + localAddr: Sockaddr_storage + localAddrLen: SockLen + localAddress.toSAddr(localAddr, localAddrLen) + if bindSocket(SocketHandle(sock), + cast[ptr SockAddr](addr localAddr), localAddrLen) != 0: + sock.closeSocket() + retFuture.fail(getTransportOsError(osLastError())) + return retFuture proc continuation(udata: pointer) = if not(retFuture.finished()): @@ -1776,6 +1841,16 @@ proc join*(server: StreamServer): Future[void] = retFuture.complete() return retFuture +proc connect*(address: TransportAddress, + bufferSize = DefaultStreamBufferSize, + child: StreamTransport = nil, + flags: set[TransportFlags], + localAddress = TransportAddress()): Future[StreamTransport] = + # Retro compatibility with TransportFlags + var mappedFlags: set[SocketFlags] + if TcpNoDelay in flags: mappedFlags.incl(SocketFlags.TcpNoDelay) + address.connect(bufferSize, child, localAddress, mappedFlags) + proc close*(server: StreamServer) = ## Release ``server`` resources. ## @@ -1864,6 +1939,13 @@ proc createStreamServer*(host: TransportAddress, if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) raiseTransportOsError(err) + if ServerFlags.ReusePort in flags: + if not(setSockOpt(serverSocket, osdefs.SOL_SOCKET, + osdefs.SO_REUSEPORT, 1)): + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(serverSocket)) + raiseTransportOsError(err) # TCP flags are not useful for Unix domain sockets. if ServerFlags.TcpNoDelay in flags: if not(setSockOpt(serverSocket, osdefs.IPPROTO_TCP, diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index fd581cb9a..47a6c9424 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -958,7 +958,7 @@ suite "TLSStream test suite": key = TLSPrivateKey.init(pemkey) cert = TLSCertificate.init(pemcert) - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(address, serveClient, {ServerFlags.ReuseAddr}) server.start() var conn = await connect(address) var creader = newAsyncStreamReader(conn) diff --git a/tests/teststream.nim b/tests/teststream.nim index 90fd55de2..c76ccf6fa 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1259,6 +1259,47 @@ suite "Stream Transport test suite": await allFutures(rtransp.closeWait(), wtransp.closeWait()) return buffer == message + proc testConnectBindLocalAddress() {.async.} = + let dst1 = initTAddress("127.0.0.1:33335") + let dst2 = initTAddress("127.0.0.1:33336") + let dst3 = initTAddress("127.0.0.1:33337") + + proc client(server: StreamServer, transp: StreamTransport) {.async.} = + await transp.closeWait() + + # We use ReuseAddr here only to be able to reuse the same IP/Port when there's a TIME_WAIT socket. It's useful when + # running the test multiple times or if a test ran previously used the same port. + let servers = + [createStreamServer(dst1, client, {ReuseAddr}), + createStreamServer(dst2, client, {ReuseAddr}), + createStreamServer(dst3, client, {ReusePort})] + + for server in servers: + server.start() + + let ta = initTAddress("0.0.0.0:35000") + + # It works cause there's no active listening socket bound to ta and we are using ReuseAddr + var transp1 = await connect(dst1, localAddress = ta, flags={SocketFlags.ReuseAddr}) + var transp2 = await connect(dst2, localAddress = ta, flags={SocketFlags.ReuseAddr}) + + # It works cause even thought there's an active listening socket bound to dst3, we are using ReusePort + var transp3 = await connect(dst2, localAddress = dst3, flags={SocketFlags.ReusePort}) + + expect(TransportOsError): + var transp2 = await connect(dst3, localAddress = ta) + + expect(TransportOsError): + var transp3 = await connect(dst3, localAddress = initTAddress(":::35000")) + + await transp1.closeWait() + await transp2.closeWait() + await transp3.closeWait() + + for server in servers: + server.stop() + await server.closeWait() + markFD = getCurrentFD() for i in 0.. Date: Sat, 8 Apr 2023 19:34:57 +0300 Subject: [PATCH 007/146] Fix some compilation warnings and expose tracker names. (#376) * Fix asyncsync compilation warnings. * Fix tracker names should be public. --- chronos/asyncsync.nim | 10 ++++++++-- chronos/transports/datagram.nim | 2 +- chronos/transports/stream.nim | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index 12feffdc3..4a0a13ec7 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -154,7 +154,10 @@ proc wakeUpFirst(lock: AsyncLock): bool {.inline.} = res = true break if i > 0: - lock.waiters.delete(0, i - 1) + when compiles(lock.waiters.delete(0 .. (i - 1))): + lock.waiters.delete(0 .. (i - 1)) + else: + lock.waiters.delete(0, i - 1) res proc checkAll(lock: AsyncLock): bool {.inline.} = @@ -272,7 +275,10 @@ proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} = break if i > 0: - waiters.delete(0, i - 1) + when compiles(waiters.delete(0 .. (i - 1))): + waiters.delete(0 .. (i - 1)) + else: + waiters.delete(0, i - 1) proc full*[T](aq: AsyncQueue[T]): bool {.inline.} = ## Return ``true`` if there are ``maxsize`` items in the queue. diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 32eba3213..f372af2b1 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -61,7 +61,7 @@ type closed*: int64 const - DgramTransportTrackerName = "datagram.transport" + DgramTransportTrackerName* = "datagram.transport" proc remoteAddress*(transp: DatagramTransport): TransportAddress {. raises: [Defect, TransportOsError].} = diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 2c74085a8..4264e77f9 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -71,8 +71,8 @@ type gcsafe, raises: [Defect].} const - StreamTransportTrackerName = "stream.transport" - StreamServerTrackerName = "stream.server" + StreamTransportTrackerName* = "stream.transport" + StreamServerTrackerName* = "stream.server" when defined(windows): type From 3118f8c1b2f9fc4448f7002372878806f1695a75 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 30 Apr 2023 09:20:08 +0300 Subject: [PATCH 008/146] Fix cast[pointer] issues on nim-devel. (#381) * Fix cast[pointer] issues on nim-devel. * More cast[T] fixes. * Fix compilation error. * Add oserrno.nim Further fixes for Windows and Linux. * MacOS fixes. * More Windows fixes and attempt to fix 1.2, 1.4 branches. * Implicitly import/export oserrno. * Replace oserrno with osdefs. * Return back oserrno. * epoll to oserrno. * datagram/stream to oserrno. * common to oserrno. * test to oserrno. --- chronos/asyncloop.nim | 75 +- chronos/ioselects/ioselectors_epoll.nim | 11 +- chronos/ioselects/ioselectors_kqueue.nim | 20 +- chronos/osdefs.nim | 87 +- chronos/oserrno.nim | 1346 ++++++++++++++++++++++ chronos/osutils.nim | 12 +- chronos/selectors2.nim | 4 +- chronos/transports/common.nim | 67 +- chronos/transports/datagram.nim | 30 +- chronos/transports/osnet.nim | 13 +- chronos/transports/stream.nim | 310 ++--- tests/teststream.nim | 9 +- 12 files changed, 1690 insertions(+), 294 deletions(-) create mode 100644 chronos/oserrno.nim diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index abf3edcc2..6e3bd9acd 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -152,14 +152,15 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or defined(openbsd) or defined(dragonfly) or defined(macos) or defined(linux) or defined(android) or defined(solaris): import "."/selectors2 - from posix import EINTR, EAGAIN, EINPROGRESS, EWOULDBLOCK, MSG_PEEK, - MSG_NOSIGNAL, + import "."/oserrno + from posix import MSG_PEEK, MSG_NOSIGNAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE export SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE + export oserrno type CallbackFunc* = proc (arg: pointer) {.gcsafe, raises: [Defect].} @@ -282,6 +283,12 @@ proc raiseOsDefect*(error: OSErrorCode, msg = "") {.noreturn, noinline.} = raise (ref Defect)(msg: msg & "\n[" & $int(error) & "] " & osErrorMsg(error) & "\n" & getStackTrace()) +func toPointer(error: OSErrorCode): pointer = + when sizeof(int) == 8: + cast[pointer](uint64(uint32(error))) + else: + cast[pointer](uint32(error)) + func toException*(v: OSErrorCode): ref OSError = newOSError(v) # This helper will allow to use `tryGet()` and raise OSError for # Result[T, OSErrorCode] values. @@ -518,27 +525,30 @@ when defined(windows): ## Closes a socket and ensures that it is unregistered. let loop = getThreadDispatcher() loop.handles.excl(fd) - let param = - if closeFd(SocketHandle(fd)) == 0: - OSErrorCode(0) - else: - osLastError() - if not isNil(aftercb): - var acb = AsyncCallback(function: aftercb, udata: cast[pointer](param)) - loop.callbacks.addLast(acb) + let + param = toPointer( + if closeFd(SocketHandle(fd)) == 0: + OSErrorCode(0) + else: + osLastError() + ) + if not(isNil(aftercb)): + loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) = ## Closes a (pipe/file) handle and ensures that it is unregistered. let loop = getThreadDispatcher() loop.handles.excl(fd) - let param = - if closeFd(HANDLE(fd)) == 0: - OSErrorCode(0) - else: - osLastError() - if not isNil(aftercb): - var acb = AsyncCallback(function: aftercb, udata: cast[pointer](param)) - loop.callbacks.addLast(acb) + let + param = toPointer( + if closeFd(HANDLE(fd)) == 0: + OSErrorCode(0) + else: + osLastError() + ) + + if not(isNil(aftercb)): + loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) proc contains*(disp: PDispatcher, fd: AsyncFD): bool = ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. @@ -720,21 +730,22 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or let loop = getThreadDispatcher() proc continuation(udata: pointer) = - let param = - if SocketHandle(fd) in loop.selector: - let ures = unregister2(fd) - if ures.isErr(): - discard closeFd(cint(fd)) - ures.error() - else: - if closeFd(cint(fd)) != 0: - osLastError() + let + param = toPointer( + if SocketHandle(fd) in loop.selector: + let ures = unregister2(fd) + if ures.isErr(): + discard closeFd(cint(fd)) + ures.error() else: - OSErrorCode(0) - else: - OSErrorCode(osdefs.EBADF) - if not isNil(aftercb): - aftercb(cast[pointer](param)) + if closeFd(cint(fd)) != 0: + osLastError() + else: + OSErrorCode(0) + else: + OSErrorCode(osdefs.EBADF) + ) + if not(isNil(aftercb)): aftercb(param) withData(loop.selector, cint(fd), adata) do: # We are scheduling reader and writer callbacks to be called diff --git a/chronos/ioselects/ioselectors_epoll.nim b/chronos/ioselects/ioselectors_epoll.nim index 187479fff..3eed8707b 100644 --- a/chronos/ioselects/ioselectors_epoll.nim +++ b/chronos/ioselects/ioselectors_epoll.nim @@ -42,7 +42,7 @@ proc getVirtualId[T](s: Selector[T]): SelectResult[int32] = ok(s.virtualHoles.popLast()) else: if s.virtualId == low(int32): - err(OSErrorCode(EMFILE)) + err(oserrno.EMFILE) else: dec(s.virtualId) ok(s.virtualId) @@ -139,7 +139,7 @@ proc trigger2*(event: SelectEvent): SelectResult[void] = if res == -1: err(osLastError()) elif res != sizeof(uint64): - err(OSErrorCode(osdefs.EINVAL)) + err(oserrno.EINVAL) else: ok() @@ -521,11 +521,11 @@ proc prepareKey[T](s: Selector[T], event: EpollEvent): Opt[ReadyKey] = if (event.events and EPOLLERR) != 0: rkey.events.incl(Event.Error) - rkey.errorCode = OSErrorCode(ECONNRESET) + rkey.errorCode = oserrno.ECONNRESET if (event.events and EPOLLHUP) != 0 or (event.events and EPOLLRDHUP) != 0: rkey.events.incl(Event.Error) - rkey.errorCode = OSErrorCode(ECONNRESET) + rkey.errorCode = oserrno.ECONNRESET if (event.events and EPOLLOUT) != 0: rkey.events.incl(Event.Write) @@ -580,7 +580,8 @@ proc prepareKey[T](s: Selector[T], event: EpollEvent): Opt[ReadyKey] = let res = handleEintr(osdefs.read(fdi32, addr data, sizeof(uint64))) if res != sizeof(uint64): let errorCode = osLastError() - if errorCode == EAGAIN: + case errorCode + of oserrno.EAGAIN: return Opt.none(ReadyKey) else: rkey.events.incl({Event.User, Event.Error}) diff --git a/chronos/ioselects/ioselectors_kqueue.nim b/chronos/ioselects/ioselectors_kqueue.nim index 4ff746e58..dc9567114 100644 --- a/chronos/ioselects/ioselectors_kqueue.nim +++ b/chronos/ioselects/ioselectors_kqueue.nim @@ -58,6 +58,12 @@ proc toString(key: int32|cint|SocketHandle|int): string = else: Base10.toString(uint32(fdi32)) +proc toPointer(data: int32): pointer = + when sizeof(int) == 8: + cast[pointer](uint64(uint32(data))) + else: + cast[pointer](uint32(data)) + template addKey[T](s: Selector[T], key: int32, skey: SelectorKey[T]) = if s.fds.hasKeyOrPut(key, skey): raiseAssert "Descriptor [" & key.toString() & @@ -154,7 +160,7 @@ proc trigger2*(event: SelectEvent): SelectResult[void] = if res == -1: err(osLastError()) elif res != sizeof(uint64): - err(OSErrorCode(osdefs.EINVAL)) + err(oserrno.EINVAL) else: ok() @@ -310,7 +316,7 @@ proc registerSignal*[T](s: Selector[T], signal: int, # To be compatible with linux semantic we need to "eat" signals signal(cint(signal), SIG_IGN) changes.modifyKQueue(0, uint(signal), EVFILT_SIGNAL, EV_ADD, 0, 0, - cast[pointer](uint32(fdi32))) + fdi32.toPointer()) if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: let errorCode = osLastError() s.freeKey(fdi32) @@ -341,7 +347,7 @@ proc registerProcess*[T](s: Selector[T], pid: int, s.addKey(fdi32, selectorKey) changes.modifyKQueue(0, uint(uint32(pid)), EVFILT_PROC, flags, NOTE_EXIT, - 0, cast[pointer](uint32(fdi32))) + 0, fdi32.toPointer()) if handleEintr(kevent(s.kqFd, addr(changes[0]), cint(1), nil, 0, nil)) == -1: s.freeKey(fdi32) return err(osLastError()) @@ -490,14 +496,14 @@ proc prepareKey[T](s: Selector[T], event: KEvent): Opt[ReadyKey] = of EVFILT_READ: if (event.flags and EV_EOF) != 0: rkey.events.incl(Event.Error) - rkey.errorCode = OSErrorCode(ECONNRESET) + rkey.errorCode = oserrno.ECONNRESET if Event.User in pkey.events: var data: uint64 = 0 if handleEintr(osdefs.read(cint(event.ident), addr data, sizeof(uint64))) != sizeof(uint64): let errorCode = osLastError() - if errorCode == EAGAIN: + if errorCode == oserrno.EAGAIN: # Someone already consumed event data return Opt.none(ReadyKey) else: @@ -510,7 +516,7 @@ proc prepareKey[T](s: Selector[T], event: KEvent): Opt[ReadyKey] = of EVFILT_WRITE: if (event.flags and EV_EOF) != 0: rkey.events.incl(Event.Error) - rkey.errorCode = OSErrorCode(ECONNRESET) + rkey.errorCode = oserrno.ECONNRESET rkey.events.incl(Event.Write) @@ -577,7 +583,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, maxEventsCount, ptrTimeout) if res < 0: let errorCode = osLastError() - if errorCode == EINTR: + if errorCode == oserrno.EINTR: continue return err(errorCode) else: diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 07eb90cc8..0d7bfabe6 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -6,18 +6,10 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) - -from std/os import osLastError, osErrorMsg, OSErrorCode, raiseOSError, - newOSError -export osLastError, osErrorMsg, OSError, OSErrorCode, raiseOSError, newOSError +import oserrno +export oserrno when defined(windows): - from std/winlean import SocketHandle, SockLen, SockAddr, InAddr, - In6_addr, Sockaddr_in, Sockaddr_in6, Sockaddr_storage, - AddrInfo - export SocketHandle, SockLen, SockAddr, InAddr, - In6_addr, Sockaddr_in, Sockaddr_in6, Sockaddr_storage, AddrInfo - # Prerequisites for constants template WSAIORW*(x, y): untyped = (IOC_INOUT or x or y) template WSAIOW*(x, y): untyped = @@ -25,6 +17,49 @@ when defined(windows): ((clong(sizeof(int32)) and clong(IOCPARM_MASK)) shl 16) or (x shl 8) or y type + Sockaddr_storage* {.final, pure.} = object + ss_family*: uint16 + ss_pad1: array[6, byte] + ss_align: int64 + ss_pad2: array[112, byte] + + InAddr* {.final, pure, union.} = object + s_addr*: uint32 + + In6Addr* {.final, pure, union.} = object + s_addr*: array[16, byte] + + Sockaddr_in* {.final, pure.} = object + sin_family*: uint16 + sin_port*: uint16 + sin_addr*: InAddr + sin_zero*: array[0..7, char] + + Sockaddr_in6* {.final, pure.} = object + sin6_family*: uint16 + sin6_port*: uint16 + sin6_flowinfo*: uint32 + sin6_addr*: In6Addr + sin6_scope_id*: uint32 + + SockLen* = cuint + + SockAddr* {.final, pure.} = object + sa_family*: uint16 + sa_data*: array[14, char] + + AddrInfo* {.final, pure.} = object + ai_flags*: cint ## Input flags. + ai_family*: cint ## Address family of socket. + ai_socktype*: cint ## Socket type. + ai_protocol*: cint ## Protocol of socket. + ai_addrlen*: csize_t ## Length of socket address. + ai_canonname*: pointer ## Canonical name of service location. + ai_addr*: ptr SockAddr ## Socket address of socket. + ai_next*: ptr AddrInfo ## Pointer to next in list. + + SocketHandle* = distinct int + HANDLE* = distinct uint GUID* {.final, pure.} = object D1*: uint32 @@ -104,36 +139,6 @@ when defined(windows): PIPE_UNLIMITED_INSTANCES* = 255'u32 DEFAULT_PIPE_SIZE* = 65536'u32 - ERROR_SUCCESS* = 0 - ERROR_FILE_NOT_FOUND* = 2 - ERROR_TOO_MANY_OPEN_FILES* = 4 - ERROR_ACCESS_DENIED* = 5 - ERROR_BROKEN_PIPE* = 109 - ERROR_BUFFER_OVERFLOW* = 111 - ERROR_PIPE_BUSY* = 231 - ERROR_NO_DATA* = 232 - ERROR_PIPE_NOT_CONNECTED* = 233 - ERROR_PIPE_CONNECTED* = 535 - ERROR_OPERATION_ABORTED* = 995 - ERROR_IO_PENDING* = 997 - ERROR_CONNECTION_REFUSED* = 1225 - ERROR_CONNECTION_ABORTED* = 1236 - - WSAEMFILE* = 10024 - WSAENETDOWN* = 10050 - WSAENETRESET* = 10052 - WSAECONNABORTED* = 10053 - WSAECONNRESET* = 10054 - WSAENOBUFS* = 10055 - WSAETIMEDOUT* = 10060 - WSAEADDRINUSE* = 10048 - WSAEDISCON* = 10101 - WSANOTINITIALISED* = 10093 - WSAENOTSOCK* = 10038 - WSAEINPROGRESS* = 10036 - WSAEINTR* = 10004 - WSAEWOULDBLOCK* = 10035 - ERROR_NETNAME_DELETED* = 64 STATUS_PENDING* = 0x103 IOCPARM_MASK* = 0x7f'u32 @@ -1283,8 +1288,6 @@ when defined(posix): INVALID_SOCKET* = SocketHandle(-1) INVALID_HANDLE_VALUE* = cint(-1) -proc `==`*(x: OSErrorCode, y: int): bool = - int(x) == y proc `==`*(x: SocketHandle, y: int): bool = x == SocketHandle(y) diff --git a/chronos/oserrno.nim b/chronos/oserrno.nim new file mode 100644 index 000000000..5cacb223c --- /dev/null +++ b/chronos/oserrno.nim @@ -0,0 +1,1346 @@ +# +# Chronos Posix OS error codes +# (c) Copyright 2023-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +from std/os import osLastError, osErrorMsg, OSErrorCode, raiseOSError, + newOSError, `==` +export osLastError, osErrorMsg, OSError, OSErrorCode, raiseOSError, newOSError, + `==` + +when defined(netbsd): + ## Source: https://github.com/NetBSD/src/blob/trunk/sys/sys/errno.h + const + EPERM* = OSErrorCode(1) + # Operation not permitted + ENOENT* = OSErrorCode(2) + # No such file or directory + ESRCH* = OSErrorCode(3) + # No such process + EINTR* = OSErrorCode(4) + # Interrupted system call + EIO* = OSErrorCode(5) + # Input/output error + ENXIO* = OSErrorCode(6) + # Device not configured + E2BIG* = OSErrorCode(7) + # Argument list too long + ENOEXEC* = OSErrorCode(8) + # Exec format error + EBADF* = OSErrorCode(9) + # Bad file descriptor + ECHILD* = OSErrorCode(10) + # No child processes + EDEADLK* = OSErrorCode(11) + # Resource deadlock avoided + ENOMEM* = OSErrorCode(12) + # Cannot allocate memory + EACCES* = OSErrorCode(13) + # Permission denied + EFAULT* = OSErrorCode(14) + # Bad address + ENOTBLK* = OSErrorCode(15) + # Block device required + EBUSY* = OSErrorCode(16) + # Device busy + EEXIST* = OSErrorCode(17) + # File exists + EXDEV* = OSErrorCode(18) + # Cross-device link + ENODEV* = OSErrorCode(19) + # Operation not supported by device + ENOTDIR* = OSErrorCode(20) + # Not a directory + EISDIR* = OSErrorCode(21) + # Is a directory + EINVAL* = OSErrorCode(22) + # Invalid argument + ENFILE* = OSErrorCode(23) + # Too many open files in system + EMFILE* = OSErrorCode(24) + # Too many open files + ENOTTY* = OSErrorCode(25) + # Inappropriate ioctl for device + ETXTBSY* = OSErrorCode(26) + # Text file busy + EFBIG* = OSErrorCode(27) + # File too large + ENOSPC* = OSErrorCode(28) + # No space left on device + ESPIPE* = OSErrorCode(29) + # Illegal seek + EROFS* = OSErrorCode(30) + # Read-only file system + EMLINK* = OSErrorCode(31) + # Too many links + EPIPE* = OSErrorCode(32) + # Broken pipe + EDOM* = OSErrorCode(33) + # Numerical argument out of domain + ERANGE* = OSErrorCode(34) + # Result too large or too small + EAGAIN* = OSErrorCode(35) + # Resource temporarily unavailable + EWOULDBLOCK* = EAGAIN + # Operation would block + EINPROGRESS* = OSErrorCode(36) + # Operation now in progress + EALREADY* = OSErrorCode(37) + # Operation already in progress + ENOTSOCK* = OSErrorCode(38) + # Socket operation on non-socket + EDESTADDRREQ* = OSErrorCode(39) + # Destination address required + EMSGSIZE* = OSErrorCode(40) + # Message too long + EPROTOTYPE* = OSErrorCode(41) + # Protocol wrong type for socket + ENOPROTOOPT* = OSErrorCode(42) + # Protocol option not available + EPROTONOSUPPORT* = OSErrorCode(43) + # Protocol not supported + ESOCKTNOSUPPORT* = OSErrorCode(44) + # Socket type not supported + EOPNOTSUPP* = OSErrorCode(45) + # Operation not supported + EPFNOSUPPORT* = OSErrorCode(46) + # Protocol family not supported + EAFNOSUPPORT* = OSErrorCode(47) + # Address family not supported by protocol family + EADDRINUSE* = OSErrorCode(48) + # Address already in use + EADDRNOTAVAIL* = OSErrorCode(49) + # Can't assign requested address + ENETDOWN* = OSErrorCode(50) + # Network is down + ENETUNREACH* = OSErrorCode(51) + # Network is unreachable + ENETRESET* = OSErrorCode(52) + # Network dropped connection on reset + ECONNABORTED* = OSErrorCode(53) + # Software caused connection abort + ECONNRESET* = OSErrorCode(54) + # Connection reset by peer + ENOBUFS* = OSErrorCode(55) + # No buffer space available + EISCONN* = OSErrorCode(56) + # Socket is already connected + ENOTCONN* = OSErrorCode(57) + # Socket is not connected + ESHUTDOWN* = OSErrorCode(58) + # Can't send after socket shutdown + ETOOMANYREFS* = OSErrorCode(59) + # Too many references: can't splice + ETIMEDOUT* = OSErrorCode(60) + # Operation timed out + ECONNREFUSED* = OSErrorCode(61) + # Connection refused + ELOOP* = OSErrorCode(62) + # Too many levels of symbolic links + ENAMETOOLONG* = OSErrorCode(63) + # File name too long + EHOSTDOWN* = OSErrorCode(64) + # Host is down + EHOSTUNREACH* = OSErrorCode(65) + # No route to host + ENOTEMPTY* = OSErrorCode(66) + # Directory not empty + EPROCLIM* = OSErrorCode(67) + # Too many processes + EUSERS* = OSErrorCode(68) + # Too many users + EDQUOT* = OSErrorCode(69) + # Disc quota exceeded + ESTALE* = OSErrorCode(70) + # Stale NFS file handle + EREMOTE* = OSErrorCode(71) + # Too many levels of remote in path + EBADRPC* = OSErrorCode(72) + # RPC struct is bad + ERPCMISMATCH* = OSErrorCode(73) + # RPC version wrong + EPROGUNAVAIL* = OSErrorCode(74) + # RPC prog. not avail + EPROGMISMATCH* = OSErrorCode(75) + # Program version wrong + EPROCUNAVAIL* = OSErrorCode(76) + # Bad procedure for program + ENOLCK* = OSErrorCode(77) + # No locks available + ENOSYS* = OSErrorCode(78) + # Function not implemented + EFTYPE* = OSErrorCode(79) + # Inappropriate file type or format + EAUTH* = OSErrorCode(80) + # Authentication error + ENEEDAUTH* = OSErrorCode(81) + # Need authenticator + EIDRM* = OSErrorCode(82) + # Identifier removed + ENOMSG* = OSErrorCode(83) + # No message of desired type + EOVERFLOW* = OSErrorCode(84) + # Value too large to be stored in data type + EILSEQ* = OSErrorCode(85) + # Illegal byte sequence + ENOTSUP* = OSErrorCode(86) + # Not supported + ECANCELED* = OSErrorCode(87) + # Operation canceled + EBADMSG* = OSErrorCode(88) + # Bad or Corrupt message + ENODATA* = OSErrorCode(89) + # No message available + ENOSR* = OSErrorCode(90) + # No STREAM resources + ENOSTR* = OSErrorCode(91) + # Not a STREAM + ETIME* = OSErrorCode(92) + # STREAM ioctl timeout + ENOATTR* = OSErrorCode(93) + # Attribute not found + EMULTIHOP* = OSErrorCode(94) + # Multihop attempted + ENOLINK* = OSErrorCode(95) + # Link has been severed + EPROTO* = OSErrorCode(96) + # Protocol error + EOWNERDEAD* = OSErrorCode(97) + # Previous owner died + ENOTRECOVERABLE* = OSErrorCode(98) + # State not recoverable + ELAST* = OSErrorCode(98) + # Must equal largest errno + +elif defined(openbsd): + ## Source: https://github.com/openbsd/src/blob/master/sys/sys/errno.h + const + EPERM* = OSErrorCode(1) + # Operation not permitted + ENOENT* = OSErrorCode(2) + # No such file or directory + ESRCH* = OSErrorCode(3) + # No such process + EINTR* = OSErrorCode(4) + # Interrupted system call + EIO* = OSErrorCode(5) + # Input/output error + ENXIO* = OSErrorCode(6) + # Device not configured + E2BIG* = OSErrorCode(7) + # Argument list too long + ENOEXEC* = OSErrorCode(8) + # Exec format error + EBADF* = OSErrorCode(9) + # Bad file descriptor + ECHILD* = OSErrorCode(10) + # No child processes + EDEADLK* = OSErrorCode(11) + # Resource deadlock avoided + ENOMEM* = OSErrorCode(12) + # Cannot allocate memory + EACCES* = OSErrorCode(13) + # Permission denied + EFAULT* = OSErrorCode(14) + # Bad address + ENOTBLK* = OSErrorCode(15) + # Block device required + EBUSY* = OSErrorCode(16) + # Device busy + EEXIST* = OSErrorCode(17) + # File exists + EXDEV* = OSErrorCode(18) + # Cross-device link + ENODEV* = OSErrorCode(19) + # Operation not supported by device + ENOTDIR* = OSErrorCode(20) + # Not a directory + EISDIR* = OSErrorCode(21) + # Is a directory + EINVAL* = OSErrorCode(22) + # Invalid argument + ENFILE* = OSErrorCode(23) + # Too many open files in system + EMFILE* = OSErrorCode(24) + # Too many open files + ENOTTY* = OSErrorCode(25) + # Inappropriate ioctl for device + ETXTBSY* = OSErrorCode(26) + # Text file busy + EFBIG* = OSErrorCode(27) + # File too large + ENOSPC* = OSErrorCode(28) + # No space left on device + ESPIPE* = OSErrorCode(29) + # Illegal seek + EROFS* = OSErrorCode(30) + # Read-only file system + EMLINK* = OSErrorCode(31) + # Too many links + EPIPE* = OSErrorCode(32) + # Broken pipe + EDOM* = OSErrorCode(33) + # Numerical argument out of domain + ERANGE* = OSErrorCode(34) + # Result too large + EAGAIN* = OSErrorCode(35) + # Resource temporarily unavailable + EWOULDBLOCK* = EAGAIN + # Operation would block + EINPROGRESS* = OSErrorCode(36) + # Operation now in progress + EALREADY* = OSErrorCode(37) + # Operation already in progress + ENOTSOCK* = OSErrorCode(38) + # Socket operation on non-socket + EDESTADDRREQ* = OSErrorCode(39) + # Destination address required + EMSGSIZE* = OSErrorCode(40) + # Message too long + EPROTOTYPE* = OSErrorCode(41) + # Protocol wrong type for socket + ENOPROTOOPT* = OSErrorCode(42) + # Protocol not available + EPROTONOSUPPORT* = OSErrorCode(43) + # Protocol not supported + ESOCKTNOSUPPORT* = OSErrorCode(44) + # Socket type not supported + EOPNOTSUPP* = OSErrorCode(45) + # Operation not supported + EPFNOSUPPORT* = OSErrorCode(46) + # Protocol family not supported + EAFNOSUPPORT* = OSErrorCode(47) + # Address family not supported by protocol family + EADDRINUSE* = OSErrorCode(48) + # Address already in use + EADDRNOTAVAIL* = OSErrorCode(49) + # Can't assign requested address + ENETDOWN* = OSErrorCode(50) + # Network is down + ENETUNREACH* = OSErrorCode(51) + # Network is unreachable + ENETRESET* = OSErrorCode(52) + # Network dropped connection on reset + ECONNABORTED* = OSErrorCode(53) + # Software caused connection abort + ECONNRESET* = OSErrorCode(54) + # Connection reset by peer + ENOBUFS* = OSErrorCode(55) + # No buffer space available + EISCONN* = OSErrorCode(56) + # Socket is already connected + ENOTCONN* = OSErrorCode(57) + # Socket is not connected + ESHUTDOWN* = OSErrorCode(58) + # Can't send after socket shutdown + ETOOMANYREFS* = OSErrorCode(59) + # Too many references: can't splice + ETIMEDOUT* = OSErrorCode(60) + # Operation timed out + ECONNREFUSED* = OSErrorCode(61) + # Connection refused + ELOOP* = OSErrorCode(62) + # Too many levels of symbolic links + ENAMETOOLONG* = OSErrorCode(63) + # File name too long + EHOSTDOWN* = OSErrorCode(64) + # Host is down + EHOSTUNREACH* = OSErrorCode(65) + # No route to host + ENOTEMPTY* = OSErrorCode(66) + # Directory not empty + EPROCLIM* = OSErrorCode(67) + # Too many processes + EUSERS* = OSErrorCode(68) + # Too many users + EDQUOT* = OSErrorCode(69) + # Disk quota exceeded + ESTALE* = OSErrorCode(70) + # Stale NFS file handle + EREMOTE* = OSErrorCode(71) + # Too many levels of remote in path + EBADRPC* = OSErrorCode(72) + # RPC struct is bad + ERPCMISMATCH* = OSErrorCode(73) + # RPC version wrong + EPROGUNAVAIL* = OSErrorCode(74) + # RPC program not available + EPROGMISMATCH* = OSErrorCode(75) + # Program version wrong + EPROCUNAVAIL* = OSErrorCode(76) + # Bad procedure for program + ENOLCK* = OSErrorCode(77) + # No locks available + ENOSYS* = OSErrorCode(78) + # Function not implemented + EFTYPE* = OSErrorCode(79) + # Inappropriate file type or format + EAUTH* = OSErrorCode(80) + # Authentication error + ENEEDAUTH* = OSErrorCode(81) + # Need authenticator + EIPSEC* = OSErrorCode(82) + # IPsec processing failure + ENOATTR* = OSErrorCode(83) + # Attribute not found + EILSEQ* = OSErrorCode(84) + # Illegal byte sequence + ENOMEDIUM* = OSErrorCode(85) + # No medium found + EMEDIUMTYPE* = OSErrorCode(86) + # Wrong medium type + EOVERFLOW* = OSErrorCode(87) + # Value too large to be stored in data type + ECANCELED* = OSErrorCode(88) + # Operation canceled + EIDRM* = OSErrorCode(89) + # Identifier removed + ENOMSG* = OSErrorCode(90) + # No message of desired type + ENOTSUP* = OSErrorCode(91) + # Not supported + EBADMSG* = OSErrorCode(92) + # Bad message + ENOTRECOVERABLE* = OSErrorCode(93) + # State not recoverable + EOWNERDEAD* = OSErrorCode(94) + # Previous owner died + EPROTO* = OSErrorCode(95) + # Protocol error + ELAST* = OSErrorCode(95) + # Must be equal largest errno + +elif defined(freebsd): + ## Source: https://github.com/freebsd/freebsd-src/blob/main/sys/sys/errno.h + const + EPERM* = OSErrorCode(1) + # Operation not permitted + ENOENT* = OSErrorCode(2) + # No such file or directory + ESRCH* = OSErrorCode(3) + # No such process + EINTR* = OSErrorCode(4) + # Interrupted system call + EIO* = OSErrorCode(5) + # Input/output error + ENXIO* = OSErrorCode(6) + # Device not configured + E2BIG* = OSErrorCode(7) + # Argument list too long + ENOEXEC* = OSErrorCode(8) + # Exec format error + EBADF* = OSErrorCode(9) + # Bad file descriptor + ECHILD* = OSErrorCode(10) + # No child processes + EDEADLK* = OSErrorCode(11) + # Resource deadlock avoided + ENOMEM* = OSErrorCode(12) + # Cannot allocate memory + EACCES* = OSErrorCode(13) + # Permission denied + EFAULT* = OSErrorCode(14) + # Bad address + ENOTBLK* = OSErrorCode(15) + # Block device required + EBUSY* = OSErrorCode(16) + # Device busy + EEXIST* = OSErrorCode(17) + # File exists + EXDEV* = OSErrorCode(18) + # Cross-device link + ENODEV* = OSErrorCode(19) + # Operation not supported by device + ENOTDIR* = OSErrorCode(20) + # Not a directory + EISDIR* = OSErrorCode(21) + # Is a directory + EINVAL* = OSErrorCode(22) + # Invalid argument + ENFILE* = OSErrorCode(23) + # Too many open files in system + EMFILE* = OSErrorCode(24) + # Too many open files + ENOTTY* = OSErrorCode(25) + # Inappropriate ioctl for device + ETXTBSY* = OSErrorCode(26) + # Text file busy + EFBIG* = OSErrorCode(27) + # File too large + ENOSPC* = OSErrorCode(28) + # No space left on device + ESPIPE* = OSErrorCode(29) + # Illegal seek + EROFS* = OSErrorCode(30) + # Read-only filesystem + EMLINK* = OSErrorCode(31) + # Too many links + EPIPE* = OSErrorCode(32) + # Broken pipe + EDOM* = OSErrorCode(33) + # Numerical argument out of domain + ERANGE* = OSErrorCode(34) + # Result too large + EAGAIN* = OSErrorCode(35) + # Resource temporarily unavailable + EWOULDBLOCK* = EAGAIN + # Operation would block + EINPROGRESS* = OSErrorCode(36) + # Operation now in progress + EALREADY* = OSErrorCode(37) + # Operation already in progress + ENOTSOCK* = OSErrorCode(38) + # Socket operation on non-socket + EDESTADDRREQ* = OSErrorCode(39) + # Destination address required + EMSGSIZE* = OSErrorCode(40) + # Message too long + EPROTOTYPE* = OSErrorCode(41) + # Protocol wrong type for socket + ENOPROTOOPT* = OSErrorCode(42) + # Protocol not available + EPROTONOSUPPORT* = OSErrorCode(43) + # Protocol not supported + ESOCKTNOSUPPORT* = OSErrorCode(44) + # Socket type not supported + EOPNOTSUPP* = OSErrorCode(45) + # Operation not supported + ENOTSUP* = EOPNOTSUPP + # Operation not supported + EPFNOSUPPORT* = OSErrorCode(46) + # Protocol family not supported + EAFNOSUPPORT* = OSErrorCode(47) + # Address family not supported by protocol family + EADDRINUSE* = OSErrorCode(48) + # Address already in use + EADDRNOTAVAIL* = OSErrorCode(49) + # Can't assign requested address + ENETDOWN* = OSErrorCode(50) + # Network is down + ENETUNREACH* = OSErrorCode(51) + # Network is unreachable + ENETRESET* = OSErrorCode(52) + # Network dropped connection on reset + ECONNABORTED* = OSErrorCode(53) + # Software caused connection abort + ECONNRESET* = OSErrorCode(54) + # Connection reset by peer + ENOBUFS* = OSErrorCode(55) + # No buffer space available + EISCONN* = OSErrorCode(56) + # Socket is already connected + ENOTCONN* = OSErrorCode(57) + # Socket is not connected + ESHUTDOWN* = OSErrorCode(58) + # Can't send after socket shutdown + ETOOMANYREFS* = OSErrorCode(59) + # Too many references: can't splice + ETIMEDOUT* = OSErrorCode(60) + # Operation timed out + ECONNREFUSED* = OSErrorCode(61) + # Connection refused + ELOOP* = OSErrorCode(62) + # Too many levels of symbolic links + ENAMETOOLONG* = OSErrorCode(63) + # File name too long + EHOSTDOWN* = OSErrorCode(64) + # Host is down + EHOSTUNREACH* = OSErrorCode(65) + # No route to host + ENOTEMPTY* = OSErrorCode(66) + # Directory not empty + EPROCLIM* = OSErrorCode(67) + # Too many processes + EUSERS* = OSErrorCode(68) + # Too many users + EDQUOT* = OSErrorCode(69) + # Disc quota exceeded + ESTALE* = OSErrorCode(70) + # Stale NFS file handle + EREMOTE* = OSErrorCode(71) + # Too many levels of remote in path + EBADRPC* = OSErrorCode(72) + # RPC struct is bad + ERPCMISMATCH* = OSErrorCode(73) + # RPC version wrong + EPROGUNAVAIL* = OSErrorCode(74) + # RPC prog. not avail + EPROGMISMATCH* = OSErrorCode(75) + # Program version wrong + EPROCUNAVAIL* = OSErrorCode(76) + # Bad procedure for program + ENOLCK* = OSErrorCode(77) + # No locks available + ENOSYS* = OSErrorCode(78) + # Function not implemented + EFTYPE* = OSErrorCode(79) + # Inappropriate file type or format + EAUTH* = OSErrorCode(80) + # Authentication error + ENEEDAUTH* = OSErrorCode(81) + # Need authenticator + EIDRM* = OSErrorCode(82) + # Identifier removed + ENOMSG* = OSErrorCode(83) + # No message of desired type + EOVERFLOW* = OSErrorCode(84) + # Value too large to be stored in data type + ECANCELED* = OSErrorCode(85) + # Operation canceled + EILSEQ* = OSErrorCode(86) + # Illegal byte sequence + ENOATTR* = OSErrorCode(87) + # Attribute not found + EDOOFUS* = OSErrorCode(88) + # Programming error + EBADMSG* = OSErrorCode(89) + # Bad message + EMULTIHOP* = OSErrorCode(90) + # Multihop attempted + ENOLINK* = OSErrorCode(91) + # Link has been severed + EPROTO* = OSErrorCode(92) + # Protocol error + ENOTCAPABLE* = OSErrorCode(93) + # Capabilities insufficient + ECAPMODE* = OSErrorCode(94) + # Not permitted in capability mode + ENOTRECOVERABLE* = OSErrorCode(95) + # State not recoverable + EOWNERDEAD* = OSErrorCode(96) + # Previous owner died + EINTEGRITY* = OSErrorCode(97) + # Integrity check failed + ELAST* = OSErrorCode(97) + # Must be equal largest errno + +elif defined(dragonfly) or defined(dragonflybsd): + ## Source: https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/errno.h + const + EPERM* = OSErrorCode(1) + # Operation not permitted + ENOENT* = OSErrorCode(2) + # No such file or directory + ESRCH* = OSErrorCode(3) + # No such process + EINTR* = OSErrorCode(4) + # Interrupted system call + EIO* = OSErrorCode(5) + # Input/output error + ENXIO* = OSErrorCode(6) + # Device not configured + E2BIG* = OSErrorCode(7) + # Argument list too long + ENOEXEC* = OSErrorCode(8) + # Exec format error + EBADF* = OSErrorCode(9) + # Bad file descriptor + ECHILD* = OSErrorCode(10) + # No child processes + EDEADLK* = OSErrorCode(11) + # Resource deadlock avoided + ENOMEM* = OSErrorCode(12) + # Cannot allocate memory + EACCES* = OSErrorCode(13) + # Permission denied + EFAULT* = OSErrorCode(14) + # Bad address + ENOTBLK* = OSErrorCode(15) + # Block device required + EBUSY* = OSErrorCode(16) + # Device busy + EEXIST* = OSErrorCode(17) + # File exists + EXDEV* = OSErrorCode(18) + # Cross-device link + ENODEV* = OSErrorCode(19) + # Operation not supported by device + ENOTDIR* = OSErrorCode(20) + # Not a directory + EISDIR* = OSErrorCode(21) + # Is a directory + EINVAL* = OSErrorCode(22) + # Invalid argument + ENFILE* = OSErrorCode(23) + # Too many open files in system + EMFILE* = OSErrorCode(24) + # Too many open files + ENOTTY* = OSErrorCode(25) + # Inappropriate ioctl for device + ETXTBSY* = OSErrorCode(26) + # Text file busy + EFBIG* = OSErrorCode(27) + # File too large + ENOSPC* = OSErrorCode(28) + # No space left on device + ESPIPE* = OSErrorCode(29) + # Illegal seek + EROFS* = OSErrorCode(30) + # Read-only filesystem + EMLINK* = OSErrorCode(31) + # Too many links + EPIPE* = OSErrorCode(32) + # Broken pipe + EDOM* = OSErrorCode(33) + # Numerical argument out of domain + ERANGE* = OSErrorCode(34) + # Result too large + EAGAIN* = OSErrorCode(35) + # Resource temporarily unavailable + EWOULDBLOCK* = EAGAIN + # Operation would block + EINPROGRESS* = OSErrorCode(36) + # Operation now in progress + EALREADY* = OSErrorCode(37) + # Operation already in progress + ENOTSOCK* = OSErrorCode(38) + # Socket operation on non-socket + EDESTADDRREQ* = OSErrorCode(39) + # Destination address required + EMSGSIZE* = OSErrorCode(40) + # Message too long + EPROTOTYPE* = OSErrorCode(41) + # Protocol wrong type for socket + ENOPROTOOPT* = OSErrorCode(42) + # Protocol not available + EPROTONOSUPPORT* = OSErrorCode(43) + # Protocol not supported + ESOCKTNOSUPPORT* = OSErrorCode(44) + # Socket type not supported + EOPNOTSUPP* = OSErrorCode(45) + # Operation not supported + ENOTSUP* = EOPNOTSUPP + # Operation not supported + EPFNOSUPPORT* = OSErrorCode(46) + # Protocol family not supported + EAFNOSUPPORT* = OSErrorCode(47) + # Address family not supported by protocol family + EADDRINUSE* = OSErrorCode(48) + # Address already in use + EADDRNOTAVAIL* = OSErrorCode(49) + # Can't assign requested address + ENETDOWN* = OSErrorCode(50) + # Network is down + ENETUNREACH* = OSErrorCode(51) + # Network is unreachable + ENETRESET* = OSErrorCode(52) + # Network dropped connection on reset + ECONNABORTED* = OSErrorCode(53) + # Software caused connection abort + ECONNRESET* = OSErrorCode(54) + # Connection reset by peer + ENOBUFS* = OSErrorCode(55) + # No buffer space available + EISCONN* = OSErrorCode(56) + # Socket is already connected + ENOTCONN* = OSErrorCode(57) + # Socket is not connected + ESHUTDOWN* = OSErrorCode(58) + # Can't send after socket shutdown + ETOOMANYREFS* = OSErrorCode(59) + # Too many references: can't splice + ETIMEDOUT* = OSErrorCode(60) + # Operation timed out + ECONNREFUSED* = OSErrorCode(61) + # Connection refused + ELOOP* = OSErrorCode(62) + # Too many levels of symbolic links + ENAMETOOLONG* = OSErrorCode(63) + # File name too long + EHOSTDOWN* = OSErrorCode(64) + # Host is down + EHOSTUNREACH* = OSErrorCode(65) + # No route to host + ENOTEMPTY* = OSErrorCode(66) + # Directory not empty + EPROCLIM* = OSErrorCode(67) + # Too many processes + EUSERS* = OSErrorCode(68) + # Too many users + EDQUOT* = OSErrorCode(69) + # Disc quota exceeded + ESTALE* = OSErrorCode(70) + # Stale NFS file handle + EREMOTE* = OSErrorCode(71) + # Too many levels of remote in path + EBADRPC* = OSErrorCode(72) + # RPC struct is bad + ERPCMISMATCH* = OSErrorCode(73) + # RPC version wrong + EPROGUNAVAIL* = OSErrorCode(74) + # RPC prog. not avail + EPROGMISMATCH* = OSErrorCode(75) + # Program version wrong + EPROCUNAVAIL* = OSErrorCode(76) + # Bad procedure for program + ENOLCK* = OSErrorCode(77) + # No locks available + ENOSYS* = OSErrorCode(78) + # Function not implemented + EFTYPE* = OSErrorCode(79) + # Inappropriate file type or format + EAUTH* = OSErrorCode(80) + # Authentication error + ENEEDAUTH* = OSErrorCode(81) + # Need authenticator + EIDRM* = OSErrorCode(82) + # Identifier removed + ENOMSG* = OSErrorCode(83) + # No message of desired type + EOVERFLOW* = OSErrorCode(84) + # Value too large to be stored in data type + ECANCELED* = OSErrorCode(85) + # Operation canceled + EILSEQ* = OSErrorCode(86) + # Illegal byte sequence + ENOATTR* = OSErrorCode(87) + # Attribute not found + EDOOFUS* = OSErrorCode(88) + # Programming error + EBADMSG* = OSErrorCode(89) + # Bad message + EMULTIHOP* = OSErrorCode(90) + # Multihop attempted + ENOLINK* = OSErrorCode(91) + # Link has been severed + EPROTO* = OSErrorCode(92) + # Protocol error + ENOMEDIUM* = OSErrorCode(93) + # linux + ENOTRECOVERABLE* = OSErrorCode(94) + # State not recoverable + EOWNERDEAD* = OSErrorCode(95) + # Previous owner died + EASYNC* = OSErrorCode(99) + # XXX + ELAST* = OSErrorCode(99) + # Must be equal largest errno + +elif defined(macos) or defined(macosx): + ## Source: https://github.com/apple/darwin-xnu/blob/main/bsd/sys/errno.h + const + EPERM* = OSErrorCode(1) + # Operation not permitted + ENOENT* = OSErrorCode(2) + # No such file or directory + ESRCH* = OSErrorCode(3) + # No such process + EINTR* = OSErrorCode(4) + # Interrupted system call + EIO* = OSErrorCode(5) + # Input/output error + ENXIO* = OSErrorCode(6) + # Device not configured + E2BIG* = OSErrorCode(7) + # Argument list too long + ENOEXEC* = OSErrorCode(8) + # Exec format error + EBADF* = OSErrorCode(9) + # Bad file descriptor + ECHILD* = OSErrorCode(10) + # No child processes + EDEADLK* = OSErrorCode(11) + # Resource deadlock avoided + ENOMEM* = OSErrorCode(12) + # Cannot allocate memory + EACCES* = OSErrorCode(13) + # Permission denied + EFAULT* = OSErrorCode(14) + # Bad address + ENOTBLK* = OSErrorCode(15) + # Block device required + EBUSY* = OSErrorCode(16) + # Device / Resource busy + EEXIST* = OSErrorCode(17) + # File exists + EXDEV* = OSErrorCode(18) + # Cross-device link + ENODEV* = OSErrorCode(19) + # Operation not supported by device + ENOTDIR* = OSErrorCode(20) + # Not a directory + EISDIR* = OSErrorCode(21) + # Is a directory + EINVAL* = OSErrorCode(22) + # Invalid argument + ENFILE* = OSErrorCode(23) + # Too many open files in system + EMFILE* = OSErrorCode(24) + # Too many open files + ENOTTY* = OSErrorCode(25) + # Inappropriate ioctl for device + ETXTBSY* = OSErrorCode(26) + # Text file busy + EFBIG* = OSErrorCode(27) + # File too large + ENOSPC* = OSErrorCode(28) + # No space left on device + ESPIPE* = OSErrorCode(29) + # Illegal seek + EROFS* = OSErrorCode(30) + # Read-only file system + EMLINK* = OSErrorCode(31) + # Too many links + EPIPE* = OSErrorCode(32) + # Broken pipe + EDOM* = OSErrorCode(33) + # Numerical argument out of domain + ERANGE* = OSErrorCode(34) + # Result too large + EAGAIN* = OSErrorCode(35) + # Resource temporarily unavailable + EWOULDBLOCK* = EAGAIN + # Operation would block + EINPROGRESS* = OSErrorCode(36) + # Operation now in progress + EALREADY* = OSErrorCode(37) + # Operation already in progress + ENOTSOCK* = OSErrorCode(38) + # Socket operation on non-socket + EDESTADDRREQ* = OSErrorCode(39) + # Destination address required + EMSGSIZE* = OSErrorCode(40) + # Message too long + EPROTOTYPE* = OSErrorCode(41) + # Protocol wrong type for socket + ENOPROTOOPT* = OSErrorCode(42) + # Protocol not available + EPROTONOSUPPORT* = OSErrorCode(43) + # Protocol not supported + ESOCKTNOSUPPORT* = OSErrorCode(44) + # Socket type not supported + ENOTSUP* = OSErrorCode(45) + # Operation not supported + EPFNOSUPPORT* = OSErrorCode(46) + # Protocol family not supported + EAFNOSUPPORT* = OSErrorCode(47) + # Address family not supported by protocol family + EADDRINUSE* = OSErrorCode(48) + # Address already in use + EADDRNOTAVAIL* = OSErrorCode(49) + # Can't assign requested address + ENETDOWN* = OSErrorCode(50) + # Network is down + ENETUNREACH* = OSErrorCode(51) + # Network is unreachable + ENETRESET* = OSErrorCode(52) + # Network dropped connection on reset + ECONNABORTED* = OSErrorCode(53) + # Software caused connection abort + ECONNRESET* = OSErrorCode(54) + # Connection reset by peer + ENOBUFS* = OSErrorCode(55) + # No buffer space available + EISCONN* = OSErrorCode(56) + # Socket is already connected + ENOTCONN* = OSErrorCode(57) + # Socket is not connected + ESHUTDOWN* = OSErrorCode(58) + # Can't send after socket shutdown + ETOOMANYREFS* = OSErrorCode(59) + # Too many references: can't splice + ETIMEDOUT* = OSErrorCode(60) + # Operation timed out + ECONNREFUSED* = OSErrorCode(61) + # Connection refused + ELOOP* = OSErrorCode(62) + # Too many levels of symbolic links + ENAMETOOLONG* = OSErrorCode(63) + # File name too long + EHOSTDOWN* = OSErrorCode(64) + # Host is down + EHOSTUNREACH* = OSErrorCode(65) + # No route to host + ENOTEMPTY* = OSErrorCode(66) + # Directory not empty + EPROCLIM* = OSErrorCode(67) + # Too many processes + EUSERS* = OSErrorCode(68) + # Too many users + EDQUOT* = OSErrorCode(69) + # Disc quota exceeded + ESTALE* = OSErrorCode(70) + # Stale NFS file handle + EREMOTE* = OSErrorCode(71) + # Too many levels of remote in path + EBADRPC* = OSErrorCode(72) + # RPC struct is bad + ERPCMISMATCH* = OSErrorCode(73) + # RPC version wrong + EPROGUNAVAIL* = OSErrorCode(74) + # RPC prog. not avail + EPROGMISMATCH* = OSErrorCode(75) + # Program version wrong + EPROCUNAVAIL* = OSErrorCode(76) + # Bad procedure for program + ENOLCK* = OSErrorCode(77) + # No locks available + ENOSYS* = OSErrorCode(78) + # Function not implemented + EFTYPE* = OSErrorCode(79) + # Inappropriate file type or format + EAUTH* = OSErrorCode(80) + # Authentication error + ENEEDAUTH* = OSErrorCode(81) + # Need authenticator + EPWROFF* = OSErrorCode(82) + # Device power is off + EDEVERR* = OSErrorCode(83) + # Device error, e.g. paper out + EOVERFLOW* = OSErrorCode(84) + # Value too large to be stored in data type + EBADEXEC* = OSErrorCode(85) + # Bad executable + EBADARCH* = OSErrorCode(86) + # Bad CPU type in executable + ESHLIBVERS* = OSErrorCode(87) + # Shared library version mismatch + EBADMACHO* = OSErrorCode(88) + # Malformed Macho file + ECANCELED* = OSErrorCode(89) + # Operation canceled + EIDRM* = OSErrorCode(90) + # Identifier removed + ENOMSG* = OSErrorCode(91) + # No message of desired type + EILSEQ* = OSErrorCode(92) + # Illegal byte sequence + ENOATTR* = OSErrorCode(93) + # Attribute not found + EBADMSG* = OSErrorCode(94) + # Bad message + EMULTIHOP* = OSErrorCode(95) + # Reserved + ENODATA* = OSErrorCode(96) + # No message available on STREAM + ENOLINK* = OSErrorCode(97) + # Reserved + ENOSR* = OSErrorCode(98) + # No STREAM resources + ENOSTR* = OSErrorCode(99) + # Not a STREAM + EPROTO* = OSErrorCode(100) + # Protocol error + ETIME* = OSErrorCode(101) + # STREAM ioctl timeout + EOPNOTSUPP* = OSErrorCode(102) + # Operation not supported on socket + ENOPOLICY* = OSErrorCode(103) + # No such policy registered + ENOTRECOVERABLE* = OSErrorCode(104) + # State not recoverable + EOWNERDEAD* = OSErrorCode(105) + # Previous owner died + EQFULL* = OSErrorCode(106) + # Interface output queue is full + ELAST* = OSErrorCode(106) + # Must be equal largest errno + +elif defined(linux): + ## Source: https://github.com/torvalds/linux/blob/master/include/uapi/asm-generic/errno-base.h + ## https://github.com/torvalds/linux/blob/master/include/uapi/asm-generic/errno.h + const + EPERM* = OSErrorCode(1) + # Operation not permitted + ENOENT* = OSErrorCode(2) + # No such file or directory + ESRCH* = OSErrorCode(3) + # No such process + EINTR* = OSErrorCode(4) + # Interrupted system call + EIO* = OSErrorCode(5) + # I/O error + ENXIO* = OSErrorCode(6) + # No such device or address + E2BIG* = OSErrorCode(7) + # Argument list too long + ENOEXEC* = OSErrorCode(8) + # Exec format error + EBADF* = OSErrorCode(9) + # Bad file number + ECHILD* = OSErrorCode(10) + # No child processes + EAGAIN* = OSErrorCode(11) + # Try again + ENOMEM* = OSErrorCode(12) + # Out of memory + EACCES* = OSErrorCode(13) + # Permission denied + EFAULT* = OSErrorCode(14) + # Bad address + ENOTBLK* = OSErrorCode(15) + # Block device required + EBUSY* = OSErrorCode(16) + # Device or resource busy + EEXIST* = OSErrorCode(17) + # File exists + EXDEV* = OSErrorCode(18) + # Cross-device link + ENODEV* = OSErrorCode(19) + # No such device + ENOTDIR* = OSErrorCode(20) + # Not a directory + EISDIR* = OSErrorCode(21) + # Is a directory + EINVAL* = OSErrorCode(22) + # Invalid argument + ENFILE* = OSErrorCode(23) + # File table overflow + EMFILE* = OSErrorCode(24) + # Too many open files + ENOTTY* = OSErrorCode(25) + # Not a typewriter + ETXTBSY* = OSErrorCode(26) + # Text file busy + EFBIG* = OSErrorCode(27) + # File too large + ENOSPC* = OSErrorCode(28) + # No space left on device + ESPIPE* = OSErrorCode(29) + # Illegal seek + EROFS* = OSErrorCode(30) + # Read-only file system + EMLINK* = OSErrorCode(31) + # Too many links + EPIPE* = OSErrorCode(32) + # Broken pipe + EDOM* = OSErrorCode(33) + # Math argument out of domain of func + ERANGE* = OSErrorCode(34) + # Math result not representable + EDEADLK* = OSErrorCode(35) + # Resource deadlock would occur + ENAMETOOLONG* = OSErrorCode(36) + # File name too long + ENOLCK* = OSErrorCode(37) + # No record locks available + ENOSYS* = OSErrorCode(38) + # Invalid system call number + ENOTEMPTY* = OSErrorCode(39) + # Directory not empty + ELOOP* = OSErrorCode(40) + # Too many symbolic links encountered + EWOULDBLOCK* = EAGAIN + # Operation would block + ENOMSG* = OSErrorCode(42) + # No message of desired type + EIDRM* = OSErrorCode(43) + # Identifier removed + ECHRNG* = OSErrorCode(44) + # Channel number out of range + EL2NSYNC* = OSErrorCode(45) + # Level 2 not synchronized + EL3HLT* = OSErrorCode(46) + # Level 3 halted + EL3RST* = OSErrorCode(47) + # Level 3 reset + ELNRNG* = OSErrorCode(48) + # Link number out of range + EUNATCH* = OSErrorCode(49) + # Protocol driver not attached + ENOCSI* = OSErrorCode(50) + # No CSI structure available + EL2HLT* = OSErrorCode(51) + # Level 2 halted + EBADE* = OSErrorCode(52) + # Invalid exchange + EBADR* = OSErrorCode(53) + # Invalid request descriptor + EXFULL* = OSErrorCode(54) + # Exchange full + ENOANO* = OSErrorCode(55) + # No anode + EBADRQC* = OSErrorCode(56) + # Invalid request code + EBADSLT* = OSErrorCode(57) + # Invalid slot + EDEADLOCK* = EDEADLK + # Resource deadlock would occur + EBFONT* = OSErrorCode(59) + # Bad font file format + ENOSTR* = OSErrorCode(60) + # Device not a stream + ENODATA* = OSErrorCode(61) + # No data available + ETIME* = OSErrorCode(62) + # Timer expired + ENOSR* = OSErrorCode(63) + # Out of streams resources + ENONET* = OSErrorCode(64) + # Machine is not on the network + ENOPKG* = OSErrorCode(65) + # Package not installed + EREMOTE* = OSErrorCode(66) + # Object is remote + ENOLINK* = OSErrorCode(67) + # Link has been severed + EADV* = OSErrorCode(68) + # Advertise error + ESRMNT* = OSErrorCode(69) + # Srmount error + ECOMM* = OSErrorCode(70) + # Communication error on send + EPROTO* = OSErrorCode(71) + # Protocol error + EMULTIHOP* = OSErrorCode(72) + # Multihop attempted + EDOTDOT* = OSErrorCode(73) + # RFS specific error + EBADMSG* = OSErrorCode(74) + # Not a data message + EOVERFLOW* = OSErrorCode(75) + # Value too large for defined data type + ENOTUNIQ* = OSErrorCode(76) + # Name not unique on network + EBADFD* = OSErrorCode(77) + # File descriptor in bad state + EREMCHG* = OSErrorCode(78) + # Remote address changed + ELIBACC* = OSErrorCode(79) + # Can not access a needed shared library + ELIBBAD* = OSErrorCode(80) + # Accessing a corrupted shared library + ELIBSCN* = OSErrorCode(81) + # .lib section in a.out corrupted + ELIBMAX* = OSErrorCode(82) + # Attempting to link in too many shared libraries + ELIBEXEC* = OSErrorCode(83) + # Cannot exec a shared library directly + EILSEQ* = OSErrorCode(84) + # Illegal byte sequence + ERESTART* = OSErrorCode(85) + # Interrupted system call should be restarted + ESTRPIPE* = OSErrorCode(86) + # Streams pipe error + EUSERS* = OSErrorCode(87) + # Too many users + ENOTSOCK* = OSErrorCode(88) + # Socket operation on non-socket + EDESTADDRREQ* = OSErrorCode(89) + # Destination address required + EMSGSIZE* = OSErrorCode(90) + # Message too long + EPROTOTYPE* = OSErrorCode(91) + # Protocol wrong type for socket + ENOPROTOOPT* = OSErrorCode(92) + # Protocol not available + EPROTONOSUPPORT* = OSErrorCode(93) + # Protocol not supported + ESOCKTNOSUPPORT* = OSErrorCode(94) + # Socket type not supported + EOPNOTSUPP* = OSErrorCode(95) + # Operation not supported on transport endpoint + EPFNOSUPPORT* = OSErrorCode(96) + # Protocol family not supported + EAFNOSUPPORT* = OSErrorCode(97) + # Address family not supported by protocol + EADDRINUSE* = OSErrorCode(98) + # Address already in use + EADDRNOTAVAIL* = OSErrorCode(99) + # Cannot assign requested address + ENETDOWN* = OSErrorCode(100) + # Network is down + ENETUNREACH* = OSErrorCode(101) + # Network is unreachable + ENETRESET* = OSErrorCode(102) + # Network dropped connection because of reset + ECONNABORTED* = OSErrorCode(103) + # Software caused connection abort + ECONNRESET* = OSErrorCode(104) + # Connection reset by peer + ENOBUFS* = OSErrorCode(105) + # No buffer space available + EISCONN* = OSErrorCode(106) + # Transport endpoint is already connected + ENOTCONN* = OSErrorCode(107) + # Transport endpoint is not connected + ESHUTDOWN* = OSErrorCode(108) + # Cannot send after transport endpoint shutdown + ETOOMANYREFS* = OSErrorCode(109) + # Too many references: cannot splice + ETIMEDOUT* = OSErrorCode(110) + # Connection timed out + ECONNREFUSED* = OSErrorCode(111) + # Connection refused + EHOSTDOWN* = OSErrorCode(112) + # Host is down + EHOSTUNREACH* = OSErrorCode(113) + # No route to host + EALREADY* = OSErrorCode(114) + # Operation already in progress + EINPROGRESS* = OSErrorCode(115) + # Operation now in progress + ESTALE* = OSErrorCode(116) + # Stale file handle + EUCLEAN* = OSErrorCode(117) + # Structure needs cleaning + ENOTNAM* = OSErrorCode(118) + # Not a XENIX named type file + ENAVAIL* = OSErrorCode(119) + # No XENIX semaphores available + EISNAM* = OSErrorCode(120) + # Is a named type file + EREMOTEIO* = OSErrorCode(121) + # Remote I/O error + EDQUOT* = OSErrorCode(122) + # Quota exceeded + ENOMEDIUM* = OSErrorCode(123) + # No medium found + EMEDIUMTYPE* = OSErrorCode(124) + # Wrong medium type + ECANCELED* = OSErrorCode(125) + # Operation Canceled + ENOKEY* = OSErrorCode(126) + # Required key not available + EKEYEXPIRED* = OSErrorCode(127) + # Key has expired + EKEYREVOKED* = OSErrorCode(128) + # Key has been revoked + EKEYREJECTED* = OSErrorCode(129) + # Key was rejected by service + EOWNERDEAD* = OSErrorCode(130) + # Owner died + ENOTRECOVERABLE* = OSErrorCode(131) + # State not recoverable + ERFKILL* = OSErrorCode(132) + # Operation not possible due to RF-kill + EHWPOISON* = OSErrorCode(133) + # Memory page has hardware error +elif defined(windows): + const + ERROR_SUCCESS* = OSErrorCode(0) + ERROR_FILE_NOT_FOUND* = OSErrorCode(2) + ERROR_TOO_MANY_OPEN_FILES* = OSErrorCode(4) + ERROR_ACCESS_DENIED* = OSErrorCode(5) + ERROR_BROKEN_PIPE* = OSErrorCode(109) + ERROR_BUFFER_OVERFLOW* = OSErrorCode(111) + ERROR_PIPE_BUSY* = OSErrorCode(231) + ERROR_NO_DATA* = OSErrorCode(232) + ERROR_PIPE_NOT_CONNECTED* = OSErrorCode(233) + ERROR_PIPE_CONNECTED* = OSErrorCode(535) + ERROR_OPERATION_ABORTED* = OSErrorCode(995) + ERROR_IO_PENDING* = OSErrorCode(997) + ERROR_CONNECTION_REFUSED* = OSErrorCode(1225) + ERROR_CONNECTION_ABORTED* = OSErrorCode(1236) + WSAEMFILE* = OSErrorCode(10024) + WSAENETDOWN* = OSErrorCode(10050) + WSAENETRESET* = OSErrorCode(10052) + WSAECONNABORTED* = OSErrorCode(10053) + WSAECONNRESET* = OSErrorCode(10054) + WSAENOBUFS* = OSErrorCode(10055) + WSAETIMEDOUT* = OSErrorCode(10060) + WSAEADDRINUSE* = OSErrorCode(10048) + WSAEDISCON* = OSErrorCode(10101) + WSANOTINITIALISED* = OSErrorCode(10093) + WSAENOTSOCK* = OSErrorCode(10038) + WSAEINPROGRESS* = OSErrorCode(10036) + WSAEINTR* = OSErrorCode(10004) + WSAEWOULDBLOCK* = OSErrorCode(10035) + ERROR_NETNAME_DELETED* = OSErrorCode(64) + STATUS_PENDING* = OSErrorCode(0x103) + +else: + {.fatal: "Operation system is not yet supported!".} diff --git a/chronos/osutils.nim b/chronos/osutils.nim index 235946934..2ff1072aa 100644 --- a/chronos/osutils.nim +++ b/chronos/osutils.nim @@ -7,9 +7,9 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import stew/results -import osdefs +import osdefs, oserrno -export results +export results, osdefs, oserrno when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} @@ -187,10 +187,10 @@ when defined(windows): let cleanupFlag = block: let errorCode = osLastError() - case int(errorCode) - of osdefs.ERROR_PIPE_CONNECTED: + case errorCode + of ERROR_PIPE_CONNECTED: false - of osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: if DescriptorFlag.NonBlock in writeset: var bytesRead = 0.DWORD if getOverlappedResult(pipeIn, addr ovl, bytesRead, 1) == FALSE: @@ -215,7 +215,7 @@ else: var res = 0 while true: res = body - if not((res == -1) and (osLastError() == EINTR)): + if not((res == -1) and (osLastError() == oserrno.EINTR)): break res diff --git a/chronos/selectors2.nim b/chronos/selectors2.nim index bbb52a5d6..45c453304 100644 --- a/chronos/selectors2.nim +++ b/chronos/selectors2.nim @@ -32,8 +32,8 @@ # backwards-compatible. import stew/results -import osdefs, osutils -export results +import osdefs, osutils, oserrno +export results, oserrno const asyncEventsCount* {.intdefine.} = 64 diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 8d75a5a67..0b3f431e5 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -12,10 +12,17 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import std/[strutils, nativesockets, net] +import std/[strutils] import stew/[base10, byteutils] -import ".."/[asyncloop, osdefs] -export net +import ".."/[asyncloop, osdefs, oserrno] + +from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, + SockType, Protocol, Port, `$` +from std/nativesockets import toInt, `$` + +export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, + Protocol, Port, toInt, `$` + const DefaultStreamBufferSize* = 4096 ## Default buffer size for stream @@ -600,35 +607,37 @@ proc isLiteral*[T](s: seq[T]): bool {.inline.} = else: (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0 -template getTransportTooManyError*(code: int = 0): ref TransportTooManyError = +template getTransportTooManyError*( + code = OSErrorCode(0) + ): ref TransportTooManyError = let msg = when defined(posix): - if code == 0: + if code == OSErrorCode(0): "Too many open transports" - elif code == EMFILE: + elif code == oserrno.EMFILE: "[EMFILE] Too many open files in the process" - elif code == ENFILE: + elif code == oserrno.ENFILE: "[ENFILE] Too many open files in system" - elif code == ENOBUFS: + elif code == oserrno.ENOBUFS: "[ENOBUFS] No buffer space available" - elif code == ENOMEM: + elif code == oserrno.ENOMEM: "[ENOMEM] Not enough memory availble" else: - "[" & $code & "] Too many open transports" + "[" & $int(code) & "] Too many open transports" elif defined(windows): case code - of 0: + of OSErrorCode(0): "Too many open transports" - of osdefs.ERROR_TOO_MANY_OPEN_FILES: + of ERROR_TOO_MANY_OPEN_FILES: "[ERROR_TOO_MANY_OPEN_FILES] Too many open files" - of osdefs.WSAENOBUFS: + of WSAENOBUFS: "[WSAENOBUFS] No buffer space available" - of osdefs.WSAEMFILE: + of WSAEMFILE: "[WSAEMFILE] Too many open sockets" else: - "[" & $code & "] Too many open transports" + "[" & $int(code) & "] Too many open transports" else: - "[" & $code & "] Too many open transports" + "[" & $int(code) & "] Too many open transports" newException(TransportTooManyError, msg) template getConnectionAbortedError*(m: string = ""): ref TransportAbortedError = @@ -639,32 +648,34 @@ template getConnectionAbortedError*(m: string = ""): ref TransportAbortedError = "[ECONNABORTED] " & m newException(TransportAbortedError, msg) -template getConnectionAbortedError*(code: int): ref TransportAbortedError = +template getConnectionAbortedError*( + code: OSErrorCode + ): ref TransportAbortedError = let msg = when defined(posix): - if code == 0: + if code == OSErrorCode(0): "[ECONNABORTED] Connection has been aborted before being accepted" - elif code == EPERM: + elif code == oserrno.EPERM: "[EPERM] Firewall rules forbid connection" - elif code == ETIMEDOUT: + elif code == oserrno.ETIMEDOUT: "[ETIMEDOUT] Operation has been timed out" else: - "[" & $code & "] Connection has been aborted" + "[" & $int(code) & "] Connection has been aborted" elif defined(windows): case code - of 0, osdefs.WSAECONNABORTED: + of OSErrorCode(0), oserrno.WSAECONNABORTED: "[ECONNABORTED] Connection has been aborted before being accepted" - of osdefs.WSAENETDOWN: + of WSAENETDOWN: "[ENETDOWN] Network is down" - of osdefs.WSAENETRESET: + of oserrno.WSAENETRESET: "[ENETRESET] Network dropped connection on reset" - of osdefs.WSAECONNRESET: + of oserrno.WSAECONNRESET: "[ECONNRESET] Connection reset by peer" - of osdefs.WSAETIMEDOUT: + of WSAETIMEDOUT: "[ETIMEDOUT] Connection timed out" else: - "[" & $code & "] Connection has been aborted" + "[" & $int(code) & "] Connection has been aborted" else: - "[" & $code & "] Connection has been aborted" + "[" & $int(code) & "] Connection has been aborted" newException(TransportAbortedError, msg) diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index f372af2b1..a642b594c 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -14,7 +14,7 @@ else: import std/deques when not(defined(windows)): import ".."/selectors2 -import ".."/[asyncloop, osdefs, handles] +import ".."/[asyncloop, osdefs, oserrno, handles] import "."/common type @@ -139,10 +139,11 @@ when defined(windows): transp.state.excl(WritePending) let err = transp.wovl.data.errCode let vector = transp.queue.popFirst() - if err == OSErrorCode(-1): + case err + of OSErrorCode(-1): if not(vector.writer.finished()): vector.writer.complete() - elif int(err) == osdefs.ERROR_OPERATION_ABORTED: + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.incl(WritePaused) if not(vector.writer.finished()): @@ -170,13 +171,14 @@ when defined(windows): DWORD(0), cast[POVERLAPPED](addr transp.wovl), nil) if ret != 0: let err = osLastError() - if int(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.excl(WritePending) transp.state.incl(WritePaused) if not(vector.writer.finished()): vector.writer.complete() - elif int(err) == osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: transp.queue.addFirst(vector) else: transp.state.excl(WritePending) @@ -201,14 +203,15 @@ when defined(windows): ## Continuation transp.state.excl(ReadPending) let err = transp.rovl.data.errCode - if err == OSErrorCode(-1): + case err + of OSErrorCode(-1): let bytesCount = transp.rovl.data.bytesCount if bytesCount == 0: transp.state.incl({ReadEof, ReadPaused}) fromSAddr(addr transp.raddr, transp.ralen, raddr) transp.buflen = int(bytesCount) asyncSpawn transp.function(transp, raddr) - elif int(err) == osdefs.ERROR_OPERATION_ABORTED: + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or closeSocket() call. transp.state.incl(ReadPaused) if ReadClosed in transp.state and not(transp.future.finished()): @@ -237,15 +240,16 @@ when defined(windows): cast[POVERLAPPED](addr transp.rovl), nil) if ret != 0: let err = osLastError() - if int(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.excl(ReadPending) transp.state.incl(ReadPaused) - elif int(err) == osdefs.WSAECONNRESET: + of WSAECONNRESET: transp.state.excl(ReadPending) transp.state.incl({ReadPaused, ReadEof}) break - elif int(err) == osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: discard else: transp.state.excl(ReadPending) @@ -419,7 +423,8 @@ else: asyncSpawn transp.function(transp, raddr) else: let err = osLastError() - if int(err) == EINTR: + case err + of oserrno.EINTR: continue else: transp.buflen = 0 @@ -454,7 +459,8 @@ else: vector.writer.complete() else: let err = osLastError() - if int(err) == EINTR: + case err + of oserrno.EINTR: continue else: if not(vector.writer.finished()): diff --git a/chronos/transports/osnet.nim b/chronos/transports/osnet.nim index c5753d875..29b956268 100644 --- a/chronos/transports/osnet.nim +++ b/chronos/transports/osnet.nim @@ -838,17 +838,15 @@ elif defined(macosx) or defined(macos) or defined(bsd): if family == osdefs.AF_INET: fromSAddr(cast[ptr Sockaddr_storage](ifap.ifa_netmask), SockLen(sizeof(Sockaddr_in)), na) - if cint(ifaddress.host.family) == osdefs.AF_INET: + if ifaddress.host.family == AddressFamily.IPv4: ifaddress.net = IpNet.init(ifaddress.host, na) elif family == osdefs.AF_INET6: fromSAddr(cast[ptr Sockaddr_storage](ifap.ifa_netmask), SockLen(sizeof(Sockaddr_in6)), na) - if cint(ifaddress.host.family) == osdefs.AF_INET6: + if ifaddress.host.family == AddressFamily.IPv6: ifaddress.net = IpNet.init(ifaddress.host, na) if ifaddress.host.family != AddressFamily.None: - if len(res[i].addresses) == 0: - res[i].addresses = newSeq[InterfaceAddress]() res[i].addresses.add(ifaddress) ifap = ifap.ifa_next @@ -1047,10 +1045,11 @@ elif defined(windows): var addresses = cast[ptr IpAdapterAddressesXp](addr buffer[0]) gres = getAdaptersAddresses(osdefs.AF_UNSPEC, GAA_FLAG_INCLUDE_PREFIX, nil, addresses, addr size) - if gres == ERROR_SUCCESS: + case OSErrorCode(gres) + of ERROR_SUCCESS: buffer.setLen(size) break - elif gres == ERROR_BUFFER_OVERFLOW: + of ERROR_BUFFER_OVERFLOW: discard else: break @@ -1058,7 +1057,7 @@ elif defined(windows): if tries >= MaxTries: break - if gres == ERROR_SUCCESS: + if OSErrorCode(gres) == ERROR_SUCCESS: var slider = cast[ptr IpAdapterAddressesXp](addr buffer[0]) while not isNil(slider): var iface = NetworkInterface( diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 4264e77f9..1b28b1deb 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -13,7 +13,7 @@ else: {.push raises: [].} import std/deques -import ".."/[asyncloop, handles, osdefs, osutils] +import ".."/[asyncloop, handles, osdefs, osutils, oserrno] import common type @@ -322,10 +322,12 @@ when defined(windows): (t).wwsabuf.buf = cast[cstring](v.buf) (t).wwsabuf.len = cast[ULONG](v.buflen) - proc isConnResetError(err: OSErrorCode): bool {.inline.} = - result = (err == OSErrorCode(osdefs.WSAECONNRESET)) or - (err == OSErrorCode(osdefs.WSAECONNABORTED)) or - (err == OSErrorCode(osdefs.ERROR_PIPE_NOT_CONNECTED)) + func isConnResetError(err: OSErrorCode): bool {.inline.} = + case err + of WSAECONNRESET, WSAECONNABORTED, ERROR_PIPE_NOT_CONNECTED: + true + else: + false proc writeStreamLoop(udata: pointer) {.gcsafe, nimcall.} = var bytesCount: uint32 @@ -343,7 +345,8 @@ when defined(windows): ## Continuation transp.state.excl(WritePending) let err = transp.wovl.data.errCode - if err == OSErrorCode(-1): + case err + of OSErrorCode(-1): bytesCount = transp.wovl.data.bytesCount var vector = transp.queue.popFirst() if bytesCount == 0: @@ -377,7 +380,7 @@ when defined(windows): # This conversion to `int` safe, because its impossible # to call write() with size bigger than `int`. vector.writer.complete(int(transp.wwsabuf.len)) - elif int(err) == osdefs.ERROR_OPERATION_ABORTED: + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.incl({WritePaused, WriteEof}) let vector = transp.queue.popFirst() @@ -416,7 +419,8 @@ when defined(windows): cast[POVERLAPPED](addr transp.wovl), nil) if ret != 0: let err = osLastError() - if int(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.excl(WritePending) transp.state.incl({WritePaused, WriteEof}) @@ -424,7 +428,7 @@ when defined(windows): vector.writer.complete(0) completePendingWriteQueue(transp.queue, 0) break - elif int(err) == osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: transp.queue.addFirst(vector) else: transp.state.excl(WritePending) @@ -456,7 +460,8 @@ when defined(windows): nil, 0'u32) if ret == 0: let err = osLastError() - if int(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.excl(WritePending) transp.state.incl({WritePaused, WriteEof}) @@ -464,7 +469,7 @@ when defined(windows): vector.writer.complete(0) completePendingWriteQueue(transp.queue, 0) break - elif int(err) == osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: transp.queue.addFirst(vector) else: transp.state.excl(WritePending) @@ -496,8 +501,8 @@ when defined(windows): cast[POVERLAPPED](addr transp.wovl)) if ret == 0: let err = osLastError() - if int(err) in {osdefs.ERROR_OPERATION_ABORTED, - osdefs.ERROR_NO_DATA}: + case err + of ERROR_OPERATION_ABORTED, ERROR_NO_DATA: # CancelIO() interrupt transp.state.excl(WritePending) transp.state.incl({WritePaused, WriteEof}) @@ -505,7 +510,7 @@ when defined(windows): vector.writer.complete(0) completePendingWriteQueue(transp.queue, 0) break - elif int(err) == osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: transp.queue.addFirst(vector) else: transp.state.excl(WritePending) @@ -539,7 +544,8 @@ when defined(windows): ## Continuation transp.state.excl(ReadPending) let err = transp.rovl.data.errCode - if err == OSErrorCode(-1): + case err + of OSErrorCode(-1): let bytesCount = transp.rovl.data.bytesCount if bytesCount == 0: transp.state.incl({ReadEof, ReadPaused}) @@ -552,19 +558,20 @@ when defined(windows): transp.roffset = transp.offset if transp.offset == len(transp.buffer): transp.state.incl(ReadPaused) - elif int(err) in {osdefs.ERROR_OPERATION_ABORTED, - osdefs.ERROR_CONNECTION_ABORTED, - osdefs.ERROR_BROKEN_PIPE, - osdefs.ERROR_NETNAME_DELETED}: + of ERROR_OPERATION_ABORTED, ERROR_CONNECTION_ABORTED, + ERROR_BROKEN_PIPE: # CancelIO() interrupt or closeSocket() call. transp.state.incl(ReadPaused) - elif transp.kind == TransportKind.Socket and - (int(err) in {osdefs.ERROR_NETNAME_DELETED, - osdefs.WSAECONNABORTED}): - transp.state.incl({ReadEof, ReadPaused}) - elif transp.kind == TransportKind.Pipe and - (int(err) in {osdefs.ERROR_PIPE_NOT_CONNECTED}): - transp.state.incl({ReadEof, ReadPaused}) + of ERROR_NETNAME_DELETED, WSAECONNABORTED: + if transp.kind == TransportKind.Socket: + transp.state.incl({ReadEof, ReadPaused}) + else: + transp.setReadError(err) + of ERROR_PIPE_NOT_CONNECTED: + if transp.kind == TransportKind.Pipe: + transp.state.incl({ReadEof, ReadPaused}) + else: + transp.setReadError(err) else: transp.setReadError(err) @@ -593,16 +600,18 @@ when defined(windows): cast[POVERLAPPED](addr transp.rovl), nil) if ret != 0: let err = osLastError() - if int32(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.excl(ReadPending) transp.state.incl(ReadPaused) - elif int32(err) in {osdefs.WSAECONNRESET, osdefs.WSAENETRESET, - osdefs.WSAECONNABORTED}: + of WSAECONNRESET, WSAENETRESET, WSAECONNABORTED: transp.state.excl(ReadPending) transp.state.incl({ReadEof, ReadPaused}) transp.completeReader() - elif int32(err) != osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: + discard + else: transp.state.excl(ReadPending) transp.state.incl(ReadPaused) transp.setReadError(err) @@ -616,16 +625,18 @@ when defined(windows): cast[POVERLAPPED](addr transp.rovl)) if ret == 0: let err = osLastError() - if int32(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt transp.state.excl(ReadPending) transp.state.incl(ReadPaused) - elif int32(err) in {osdefs.ERROR_BROKEN_PIPE, - osdefs.ERROR_PIPE_NOT_CONNECTED}: + of ERROR_BROKEN_PIPE, ERROR_PIPE_NOT_CONNECTED: transp.state.excl(ReadPending) transp.state.incl({ReadEof, ReadPaused}) transp.completeReader() - elif int32(err) != osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: + discard + else: transp.state.excl(ReadPending) transp.state.incl(ReadPaused) transp.setReadError(err) @@ -769,8 +780,8 @@ when defined(windows): var ovl = cast[RefCustomOverlapped](udata) if not(retFuture.finished()): if ovl.data.errCode == OSErrorCode(-1): - if setsockopt(SocketHandle(sock), cint(osdefs.SOL_SOCKET), - cint(osdefs.SO_UPDATE_CONNECT_CONTEXT), nil, + if setsockopt(SocketHandle(sock), cint(SOL_SOCKET), + cint(SO_UPDATE_CONNECT_CONTEXT), nil, SockLen(0)) != 0'i32: let err = wsaGetLastError() sock.closeSocket() @@ -796,9 +807,12 @@ when defined(windows): cint(slen), nil, 0, nil, cast[POVERLAPPED](povl)) # We will not process immediate completion, to avoid undefined behavior. - if res == osdefs.FALSE: + if res == FALSE: let err = osLastError() - if int32(err) != osdefs.ERROR_IO_PENDING: + case err + of ERROR_IO_PENDING: + discard + else: GC_unref(povl) sock.closeSocket() retFuture.fail(getTransportOsError(err)) @@ -809,7 +823,7 @@ when defined(windows): ## Unix domain socket emulation with Windows Named Pipes. # For some reason Nim compiler does not detect `pipeHandle` usage in # pipeContinuation() procedure, so we marking it as {.used.} here. - var pipeHandle {.used.} = osdefs.INVALID_HANDLE_VALUE + var pipeHandle {.used.} = INVALID_HANDLE_VALUE var pipeContinuation: proc (udata: pointer) {.gcsafe, raises: [Defect].} pipeContinuation = proc (udata: pointer) {.gcsafe, raises: [Defect].} = @@ -821,15 +835,16 @@ when defined(windows): pipeName = toWideString(pipeAsciiName).valueOr: retFuture.fail(getTransportOsError(error)) return - genericFlags = osdefs.GENERIC_READ or osdefs.GENERIC_WRITE - shareFlags = osdefs.FILE_SHARE_READ or osdefs.FILE_SHARE_WRITE + genericFlags = GENERIC_READ or GENERIC_WRITE + shareFlags = FILE_SHARE_READ or FILE_SHARE_WRITE pipeHandle = createFile(pipeName, genericFlags, shareFlags, - nil, osdefs.OPEN_EXISTING, - osdefs.FILE_FLAG_OVERLAPPED, HANDLE(0)) + nil, OPEN_EXISTING, + FILE_FLAG_OVERLAPPED, HANDLE(0)) free(pipeName) - if pipeHandle == osdefs.INVALID_HANDLE_VALUE: + if pipeHandle == INVALID_HANDLE_VALUE: let err = osLastError() - if int32(err) == osdefs.ERROR_PIPE_BUSY: + case err + of ERROR_PIPE_BUSY: discard setTimer(Moment.fromNow(50.milliseconds), pipeContinuation, nil) else: @@ -856,23 +871,22 @@ when defined(windows): openMode = if FirstPipe notin server.flags: server.flags.incl(FirstPipe) - osdefs.PIPE_ACCESS_DUPLEX or osdefs.FILE_FLAG_OVERLAPPED or - osdefs.FILE_FLAG_FIRST_PIPE_INSTANCE + PIPE_ACCESS_DUPLEX or FILE_FLAG_OVERLAPPED or + FILE_FLAG_FIRST_PIPE_INSTANCE else: - osdefs.PIPE_ACCESS_DUPLEX or osdefs.FILE_FLAG_OVERLAPPED - pipeMode = osdefs.PIPE_TYPE_BYTE or osdefs.PIPE_READMODE_BYTE or - osdefs.PIPE_WAIT + PIPE_ACCESS_DUPLEX or FILE_FLAG_OVERLAPPED + pipeMode = PIPE_TYPE_BYTE or PIPE_READMODE_BYTE or PIPE_WAIT pipeHandle = createNamedPipe(pipeName, openMode, pipeMode, - osdefs.PIPE_UNLIMITED_INSTANCES, + PIPE_UNLIMITED_INSTANCES, DWORD(server.bufferSize), DWORD(server.bufferSize), DWORD(0), nil) free(pipeName) - if pipeHandle == osdefs.INVALID_HANDLE_VALUE: + if pipeHandle == INVALID_HANDLE_VALUE: return err(osLastError()) let res = register2(AsyncFD(pipeHandle)) if res.isErr(): - discard osdefs.closeHandle(pipeHandle) + discard closeHandle(pipeHandle) return err(res.error()) ok(AsyncFD(pipeHandle)) @@ -886,7 +900,8 @@ when defined(windows): ## Continuation server.apending = false if server.status notin {ServerStatus.Stopped, ServerStatus.Closed}: - if ovl.data.errCode == OSErrorCode(-1): + case ovl.data.errCode + of OSErrorCode(-1): var ntransp: StreamTransport var flags = {WinServerPipe} if NoPipeFlash in server.flags: @@ -901,7 +916,7 @@ when defined(windows): # Start tracking transport trackStream(ntransp) asyncSpawn server.function(server, ntransp) - elif int32(ovl.data.errCode) == osdefs.ERROR_OPERATION_ABORTED: + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or close call. if server.status in {ServerStatus.Closed, ServerStatus.Stopped}: server.clean() @@ -931,19 +946,18 @@ when defined(windows): openMode = if FirstPipe notin server.flags: server.flags.incl(FirstPipe) - osdefs.PIPE_ACCESS_DUPLEX or osdefs.FILE_FLAG_OVERLAPPED or - osdefs.FILE_FLAG_FIRST_PIPE_INSTANCE + PIPE_ACCESS_DUPLEX or FILE_FLAG_OVERLAPPED or + FILE_FLAG_FIRST_PIPE_INSTANCE else: - osdefs.PIPE_ACCESS_DUPLEX or osdefs.FILE_FLAG_OVERLAPPED - pipeMode = osdefs.PIPE_TYPE_BYTE or osdefs.PIPE_READMODE_BYTE or - osdefs.PIPE_WAIT + PIPE_ACCESS_DUPLEX or FILE_FLAG_OVERLAPPED + pipeMode = PIPE_TYPE_BYTE or PIPE_READMODE_BYTE or PIPE_WAIT pipeHandle = createNamedPipe(pipeName, openMode, pipeMode, - osdefs.PIPE_UNLIMITED_INSTANCES, + PIPE_UNLIMITED_INSTANCES, DWORD(server.bufferSize), DWORD(server.bufferSize), DWORD(0), nil) free(pipeName) - if pipeHandle == osdefs.INVALID_HANDLE_VALUE: + if pipeHandle == INVALID_HANDLE_VALUE: raiseOsDefect(osLastError(), "acceptPipeLoop(): Unable to create " & "new pipe") server.sock = AsyncFD(pipeHandle) @@ -955,12 +969,12 @@ when defined(windows): cast[POVERLAPPED](addr server.aovl)) if res == 0: let errCode = osLastError() - if errCode == osdefs.ERROR_OPERATION_ABORTED: + if errCode == ERROR_OPERATION_ABORTED: server.apending = false break - elif errCode == osdefs.ERROR_IO_PENDING: + elif errCode == ERROR_IO_PENDING: discard - elif errCode == osdefs.ERROR_PIPE_CONNECTED: + elif errCode == ERROR_PIPE_CONNECTED: discard else: raiseOsDefect(errCode, "acceptPipeLoop(): Unable to establish " & @@ -983,9 +997,10 @@ when defined(windows): ## Continuation server.apending = false if server.status notin {ServerStatus.Stopped, ServerStatus.Closed}: - if ovl.data.errCode == OSErrorCode(-1): - if setsockopt(SocketHandle(server.asock), cint(osdefs.SOL_SOCKET), - cint(osdefs.SO_UPDATE_ACCEPT_CONTEXT), + case ovl.data.errCode + of OSErrorCode(-1): + if setsockopt(SocketHandle(server.asock), cint(SOL_SOCKET), + cint(SO_UPDATE_ACCEPT_CONTEXT), addr server.sock, SockLen(sizeof(SocketHandle))) != 0'i32: let errCode = OSErrorCode(wsaGetLastError()) @@ -1006,7 +1021,7 @@ when defined(windows): trackStream(ntransp) asyncSpawn server.function(server, ntransp) - elif int32(ovl.data.errCode) == osdefs.ERROR_OPERATION_ABORTED: + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or close. server.asock.closeSocket() if server.status in {ServerStatus.Closed, ServerStatus.Stopped}: @@ -1049,12 +1064,12 @@ when defined(windows): dwReceiveDataLength, dwLocalAddressLength, dwRemoteAddressLength, addr dwBytesReceived, cast[POVERLAPPED](addr server.aovl)) - if res == osdefs.FALSE: + if res == FALSE: let errCode = osLastError() - if errCode == osdefs.ERROR_OPERATION_ABORTED: + if errCode == ERROR_OPERATION_ABORTED: server.apending = false break - elif errCode == osdefs.ERROR_IO_PENDING: + elif errCode == ERROR_IO_PENDING: discard else: raiseOsDefect(errCode, "acceptLoop(): Unable to accept " & @@ -1121,13 +1136,14 @@ when defined(windows): else: case ovl.data.errCode of OSErrorCode(-1): - if setsockopt(SocketHandle(server.asock), cint(osdefs.SOL_SOCKET), - cint(osdefs.SO_UPDATE_ACCEPT_CONTEXT), + if setsockopt(SocketHandle(server.asock), cint(SOL_SOCKET), + cint(SO_UPDATE_ACCEPT_CONTEXT), addr server.sock, SockLen(sizeof(SocketHandle))) != 0'i32: let err = osLastError() server.asock.closeSocket() - if err == osdefs.WSAENOTSOCK: + case err + of WSAENOTSOCK: # This can be happened when server get closed, but continuation # was already scheduled, so we failing it not with OS error. retFuture.fail(getServerUseClosedError()) @@ -1147,18 +1163,15 @@ when defined(windows): # Start tracking transport trackStream(ntransp) retFuture.complete(ntransp) - of OSErrorCode(osdefs.ERROR_OPERATION_ABORTED): + of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or close. server.asock.closeSocket() retFuture.fail(getServerUseClosedError()) server.clean() - of OSErrorCode(osdefs.WSAENETDOWN), - OSErrorCode(osdefs.WSAENETRESET), - OSErrorCode(osdefs.WSAECONNABORTED), - OSErrorCode(osdefs.WSAECONNRESET), - OSErrorCode(osdefs.WSAETIMEDOUT): + of WSAENETDOWN, WSAENETRESET, WSAECONNABORTED, WSAECONNRESET, + WSAETIMEDOUT: server.asock.closeSocket() - retFuture.fail(getConnectionAbortedError(int(ovl.data.errCode))) + retFuture.fail(getConnectionAbortedError(ovl.data.errCode)) server.clean() else: server.asock.closeSocket() @@ -1189,7 +1202,8 @@ when defined(windows): server.sock.closeHandle() server.clean() else: - if ovl.data.errCode == OSErrorCode(-1): + case ovl.data.errCode + of OSErrorCode(-1): var ntransp: StreamTransport var flags = {WinServerPipe} if NoPipeFlash in server.flags: @@ -1210,8 +1224,7 @@ when defined(windows): trackStream(ntransp) retFuture.complete(ntransp) - elif int32(ovl.data.errCode) in {osdefs.ERROR_OPERATION_ABORTED, - osdefs.ERROR_PIPE_NOT_CONNECTED}: + of ERROR_OPERATION_ABORTED, ERROR_PIPE_NOT_CONNECTED: # CancelIO() interrupt or close call. retFuture.fail(getServerUseClosedError()) server.clean() @@ -1236,10 +1249,9 @@ when defined(windows): Protocol.IPPROTO_TCP) if server.asock == asyncInvalidSocket: let err = osLastError() - case int(err) - of osdefs.ERROR_TOO_MANY_OPEN_FILES, - osdefs.WSAENOBUFS, osdefs.WSAEMFILE: - retFuture.fail(getTransportTooManyError(int(err))) + case err + of ERROR_TOO_MANY_OPEN_FILES, WSAENOBUFS, WSAEMFILE: + retFuture.fail(getTransportTooManyError(err)) else: retFuture.fail(getTransportOsError(err)) return retFuture @@ -1258,19 +1270,19 @@ when defined(windows): dwReceiveDataLength, dwLocalAddressLength, dwRemoteAddressLength, addr dwBytesReceived, cast[POVERLAPPED](addr server.aovl)) - if res == osdefs.FALSE: + if res == FALSE: let err = osLastError() - case int(err) - of osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: server.apending = false retFuture.fail(getServerUseClosedError()) return retFuture - of osdefs.ERROR_IO_PENDING: + of ERROR_IO_PENDING: discard - of osdefs.WSAECONNRESET, osdefs.WSAECONNABORTED, osdefs.WSAENETDOWN, - osdefs.WSAENETRESET, osdefs.WSAETIMEDOUT: + of WSAECONNRESET, WSAECONNABORTED, WSAENETDOWN, + WSAENETRESET, WSAETIMEDOUT: server.apending = false - retFuture.fail(getConnectionAbortedError(int(err))) + retFuture.fail(getConnectionAbortedError(err)) return retFuture else: server.apending = false @@ -1284,7 +1296,8 @@ when defined(windows): server.apending = true if server.sock == asyncInvalidPipe: let err = server.errorCode - if int32(err) == osdefs.ERROR_TOO_MANY_OPEN_FILES: + case err + of ERROR_TOO_MANY_OPEN_FILES: retFuture.fail(getTransportTooManyError()) else: retFuture.fail(getTransportOsError(err)) @@ -1297,12 +1310,12 @@ when defined(windows): cast[POVERLAPPED](addr server.aovl)) if res == 0: let err = osLastError() - if int32(err) == osdefs.ERROR_OPERATION_ABORTED: + case err + of ERROR_OPERATION_ABORTED: server.apending = false retFuture.fail(getServerUseClosedError()) return retFuture - elif int32(err) in {osdefs.ERROR_IO_PENDING, - osdefs.ERROR_PIPE_CONNECTED}: + of ERROR_IO_PENDING, ERROR_PIPE_CONNECTED: discard else: server.apending = false @@ -1317,7 +1330,7 @@ else: import ../sendfile proc isConnResetError(err: OSErrorCode): bool {.inline.} = - (err == OSErrorCode(ECONNRESET)) or (err == OSErrorCode(EPIPE)) + (err == oserrno.ECONNRESET) or (err == oserrno.EPIPE) proc writeStreamLoop(udata: pointer) = if isNil(udata): @@ -1345,33 +1358,32 @@ else: while len(transp.queue) > 0: template handleError() = let err = osLastError() - - if cint(err) == EINTR: + case err + of oserrno.EINTR: # Signal happened while writing - try again with all data transp.queue.addFirst(vector) continue - - if cint(err) in [EWOULDBLOCK, EAGAIN]: + of oserrno.EWOULDBLOCK: # Socket buffer is full - wait until next write notification - in # particular, ensure removeWriter is not called transp.queue.addFirst(vector) return - - # The errors below will clear the write queue, meaning we'll exit the - # loop - if isConnResetError(err): - # Soft error happens which indicates that remote peer got - # disconnected, complete all pending writes in queue with 0. - transp.state.incl({WriteEof}) - if not(vector.writer.finished()): - vector.writer.complete(0) - completePendingWriteQueue(transp.queue, 0) else: - transp.state.incl({WriteError}) - let error = getTransportOsError(err) - if not(vector.writer.finished()): - vector.writer.fail(error) - failPendingWriteQueue(transp.queue, error) + # The errors below will clear the write queue, meaning we'll exit the + # loop + if isConnResetError(err): + # Soft error happens which indicates that remote peer got + # disconnected, complete all pending writes in queue with 0. + transp.state.incl({WriteEof}) + if not(vector.writer.finished()): + vector.writer.complete(0) + completePendingWriteQueue(transp.queue, 0) + else: + transp.state.incl({WriteError}) + let error = getTransportOsError(err) + if not(vector.writer.finished()): + vector.writer.fail(error) + failPendingWriteQueue(transp.queue, error) var vector = transp.queue.popFirst() case vector.kind @@ -1443,7 +1455,8 @@ else: len(transp.buffer) - transp.offset, cint(0))) if res < 0: let err = osLastError() - if int(err) == ECONNRESET: + case err + of oserrno.ECONNRESET: transp.state.incl({ReadEof, ReadPaused}) let rres = removeReader2(transp.fd) if rres.isErr(): @@ -1553,7 +1566,8 @@ else: proto) if sock == asyncInvalidSocket: let err = osLastError() - if int(err) == EMFILE: + case err + of oserrno.EMFILE: retFuture.fail(getTransportTooManyError()) else: retFuture.fail(getTransportOsError(err)) @@ -1641,7 +1655,8 @@ else: # and the connection shall be established asynchronously. # # http://www.madore.org/~david/computers/connect-intr.html - if (errorCode == EINPROGRESS) or (errorCode == EINTR): + case errorCode + of oserrno.EINPROGRESS, oserrno.EINTR: let res = addWriter2(sock, continuation) if res.isErr(): discard unregisterAndCloseFd(sock) @@ -1692,7 +1707,7 @@ else: discard closeFd(cint(sock)) else: let errorCode = sres.error() - if errorCode != EAGAIN: + if errorCode != oserrno.EAGAIN: # This EAGAIN error appears only when server get closed, while # acceptLoop() reader callback is already scheduled. raiseOsDefect(errorCode, "acceptLoop(): Unable to accept connection") @@ -1745,14 +1760,15 @@ else: addr slen, flags) if sres.isErr(): let errorCode = sres.error() - if errorCode == EAGAIN: + case errorCode + of oserrno.EAGAIN: # This error appears only when server get closed, while accept() # continuation is already scheduled. retFuture.fail(getServerUseClosedError()) - elif cint(errorCode) in {EMFILE, ENFILE, ENOBUFS, ENOMEM}: - retFuture.fail(getTransportTooManyError(cint(errorCode))) - elif cint(errorCode) in {ECONNABORTED, EPERM, ETIMEDOUT}: - retFuture.fail(getConnectionAbortedError(cint(errorCode))) + of oserrno.EMFILE, oserrno.ENFILE, oserrno.ENOBUFS, oserrno.ENOMEM: + retFuture.fail(getTransportTooManyError(errorCode)) + of oserrno.ECONNABORTED, oserrno.EPERM, oserrno.ETIMEDOUT: + retFuture.fail(getConnectionAbortedError(errorCode)) else: retFuture.fail(getTransportOsError(errorCode)) # Error is already happened so we ignore removeReader2() errors. @@ -1933,15 +1949,13 @@ proc createStreamServer*(host: TransportAddress, serverSocket = sock # SO_REUSEADDR is not useful for Unix domain sockets. if ServerFlags.ReuseAddr in flags: - if not(setSockOpt(serverSocket, osdefs.SOL_SOCKET, - osdefs.SO_REUSEADDR, 1)): + if not(setSockOpt(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1)): let err = osLastError() if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) raiseTransportOsError(err) if ServerFlags.ReusePort in flags: - if not(setSockOpt(serverSocket, osdefs.SOL_SOCKET, - osdefs.SO_REUSEPORT, 1)): + if not(setSockOpt(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1)): let err = osLastError() if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) @@ -1955,8 +1969,8 @@ proc createStreamServer*(host: TransportAddress, discard closeFd(SocketHandle(serverSocket)) raiseTransportOsError(err) host.toSAddr(saddr, slen) - if osdefs.bindSocket(SocketHandle(serverSocket), - cast[ptr SockAddr](addr saddr), slen) != 0: + if bindSocket(SocketHandle(serverSocket), + cast[ptr SockAddr](addr saddr), slen) != 0: let err = osLastError() if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) @@ -2165,21 +2179,21 @@ template fastWrite(transp: auto, pbytes: var ptr byte, rbytes: var int, # Not all bytes written - keep going else: let err = osLastError() - if cint(err) in [EAGAIN, EWOULDBLOCK]: + case err + of oserrno.EWOULDBLOCK: break # No bytes written, add to queue - - if cint(err) == EINTR: + of oserrno.EINTR: continue - - if isConnResetError(err): - transp.state.incl({WriteEof}) - retFuture.complete(0) - return retFuture else: - transp.state.incl({WriteError}) - let error = getTransportOsError(err) - retFuture.fail(error) - return retFuture + if isConnResetError(err): + transp.state.incl({WriteEof}) + retFuture.complete(0) + return retFuture + else: + transp.state.incl({WriteError}) + let error = getTransportOsError(err) + retFuture.fail(error) + return retFuture proc write*(transp: StreamTransport, pbytes: pointer, nbytes: int): Future[int] = @@ -2340,7 +2354,7 @@ template readLoop(name, body: untyped): untyped = raiseOsDefect(errorCode, "readLoop(): Unable to resume reading") else: transp.reader.complete() - if errorCode == ESRCH: + if errorCode == oserrno.ESRCH: # ESRCH 3 "No such process" # This error could be happened on pipes only, when process which # owns and communicates through this pipe (stdin, stdout, stderr) is diff --git a/tests/teststream.nim b/tests/teststream.nim index c76ccf6fa..ea1de43d2 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -7,7 +7,7 @@ # MIT license (LICENSE-MIT) import std/[strutils, os] import unittest2 -import ".."/chronos, ".."/chronos/osdefs +import ".."/chronos, ".."/chronos/[osdefs, oserrno] {.used.} @@ -639,12 +639,11 @@ suite "Stream Transport test suite": var transp = await connect(address) doAssert(isNil(transp)) except TransportOsError as e: - let ecode = int(e.code) when defined(windows): - result = (ecode == ERROR_FILE_NOT_FOUND) or - (ecode == ERROR_CONNECTION_REFUSED) + return (e.code == ERROR_FILE_NOT_FOUND) or + (e.code == ERROR_CONNECTION_REFUSED) else: - result = (ecode == ECONNREFUSED) or (ecode == ENOENT) + return (e.code == oserrno.ECONNREFUSED) or (e.code == oserrno.ENOENT) proc serveClient16(server: StreamServer, transp: StreamTransport) {.async.} = var res = await transp.write(BigMessagePattern) From 9ba418cd3be9593b967bf74d2110fa71640806fa Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 30 Apr 2023 20:09:36 +0300 Subject: [PATCH 009/146] Eliminate warnings on Windows. (#382) --- chronos/asyncfutures2.nim | 2 +- tests/testbugs.nim | 6 +++--- tests/testhttpclient.nim | 2 +- tests/testhttpserver.nim | 2 +- tests/testshttpserver.nim | 2 +- tests/teststream.nim | 5 +++-- 6 files changed, 10 insertions(+), 9 deletions(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index bac4ba113..546aecb8d 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -10,7 +10,7 @@ import std/sequtils import stew/base10 -import "."/[config, srcloc] +import "."/srcloc export srcloc when defined(nimHasStacktracesModule): diff --git a/tests/testbugs.nim b/tests/testbugs.nim index 19a8edbac..ba1e6df24 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -117,14 +117,14 @@ suite "Asynchronous issues test suite": let inpTransp = await afut let bytesSent = await outTransp.write(msg) check bytesSent == messageSize - var rfut = inpTransp.readExactly(addr buffer[0], messageSize) + var rfut {.used.} = inpTransp.readExactly(addr buffer[0], messageSize) proc waiterProc(udata: pointer) {.raises: [Defect], gcsafe.} = try: waitFor(sleepAsync(0.milliseconds)) - except CatchableError as exc: + except CatchableError: raiseAssert "Unexpected exception happened" - let timer = setTimer(Moment.fromNow(0.seconds), waiterProc, nil) + let timer {.used.} = setTimer(Moment.fromNow(0.seconds), waiterProc, nil) await sleepAsync(100.milliseconds) await inpTransp.closeWait() diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index e04e2ab8c..16d1dc7fa 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -5,7 +5,7 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import std/[strutils, strutils, sha1] +import std/[strutils, sha1] import unittest2 import ../chronos, ../chronos/apps/http/[httpserver, shttpserver, httpclient] import stew/base10 diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 8b71ca2e8..acf8b20b8 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -5,7 +5,7 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import std/[strutils, algorithm, strutils] +import std/[strutils, algorithm] import unittest2 import ../chronos, ../chronos/apps/http/httpserver, ../chronos/apps/http/httpcommon diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index 4b55b2701..a258cc953 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -5,7 +5,7 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import std/[strutils, strutils] +import std/strutils import unittest2 import ../chronos, ../chronos/apps/http/shttpserver import stew/base10 diff --git a/tests/teststream.nim b/tests/teststream.nim index ea1de43d2..416e0a9c1 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1286,10 +1286,11 @@ suite "Stream Transport test suite": var transp3 = await connect(dst2, localAddress = dst3, flags={SocketFlags.ReusePort}) expect(TransportOsError): - var transp2 = await connect(dst3, localAddress = ta) + var transp2 {.used.} = await connect(dst3, localAddress = ta) expect(TransportOsError): - var transp3 = await connect(dst3, localAddress = initTAddress(":::35000")) + var transp3 {.used.} = + await connect(dst3, localAddress = initTAddress(":::35000")) await transp1.closeWait() await transp2.closeWait() From 5755d79b67ff3aea1f626561d3407f7da8eda1a6 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 1 May 2023 00:46:15 +0300 Subject: [PATCH 010/146] Eliminate warnings (#383) * Eliminate warnings on Windows. * Deprecated toHex() in osnet. --- chronos/transports/osnet.nim | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/chronos/transports/osnet.nim b/chronos/transports/osnet.nim index 29b956268..02c0c64f0 100644 --- a/chronos/transports/osnet.nim +++ b/chronos/transports/osnet.nim @@ -16,7 +16,6 @@ else: {.push raises: [].} import std/algorithm -from std/strutils import toHex import ".."/osdefs import "."/ipnet export ipnet @@ -293,6 +292,9 @@ proc `$`*(ifa: InterfaceAddress): string = else: "Unknown" +proc hexDigit(x: uint8, lowercase: bool = false): char = + char(0x30'u8 + x + (uint32(7) and not((uint32(x) - 10) shr 8))) + proc `$`*(iface: NetworkInterface): string = ## Return string representation of network interface ``iface``. var res = $iface.ifIndex @@ -316,7 +318,8 @@ proc `$`*(iface: NetworkInterface): string = res.add(" ") if iface.maclen > 0: for i in 0 ..< iface.maclen: - res.add(toHex(iface.mac[i])) + res.add(hexDigit(iface.mac[i] shr 4)) + res.add(hexDigit(iface.mac[i] and 15)) if i < iface.maclen - 1: res.add(":") for item in iface.addresses: From ef94d75d612642b1df3144f5a18dd66ef9705c29 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 1 May 2023 16:02:59 +0300 Subject: [PATCH 011/146] Eliminate last 2 macos warnings. (#384) --- chronos/transports/osnet.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chronos/transports/osnet.nim b/chronos/transports/osnet.nim index 02c0c64f0..973a2dd89 100644 --- a/chronos/transports/osnet.nim +++ b/chronos/transports/osnet.nim @@ -823,12 +823,12 @@ elif defined(macosx) or defined(macos) or defined(bsd): res[i].ifIndex = int(link.sdl_index) let nlen = int(link.sdl_nlen) if nlen < len(link.sdl_data): - let minsize = min(cast[int](link.sdl_alen), len(res[i].mac)) + let minsize = min(int(link.sdl_alen), len(res[i].mac)) copyMem(addr res[i].mac[0], addr link.sdl_data[nlen], minsize) res[i].maclen = int(link.sdl_alen) res[i].ifType = toInterfaceType(data.ifi_type) res[i].state = toInterfaceState(ifap.ifa_flags) - res[i].mtu = cast[int](data.ifi_mtu) + res[i].mtu = int64(data.ifi_mtu) elif family == osdefs.AF_INET: fromSAddr(cast[ptr Sockaddr_storage](ifap.ifa_addr), SockLen(sizeof(Sockaddr_in)), ifaddress.host) From 8563c936733a28bc1e773542e9741b5b55dfcaeb Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 12 May 2023 07:11:05 +0300 Subject: [PATCH 012/146] Reexport osdefs where Sockaddr_storage was re-declared. (#385) --- chronos/transports/common.nim | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 0b3f431e5..66eef1b11 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -21,8 +21,7 @@ from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, from std/nativesockets import toInt, `$` export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, - Protocol, Port, toInt, `$` - + Protocol, Port, toInt, `$`, osdefs const DefaultStreamBufferSize* = 4096 ## Default buffer size for stream From 8aa8ee8822dac0e7a724fde99a91ac98932318d6 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 12 May 2023 17:00:37 +0300 Subject: [PATCH 013/146] Add flag to completely disable HTTP/1.1 pipelining support. (#387) * Add flag for HTTP/1.1 pipeline disable. * Switch from NoHttp11Pipeline to Http11Pipeline. --- chronos/apps/http/httpclient.nim | 83 +++++++++++++++---------- tests/testhttpclient.nim | 103 ++++++++++++++++++++++++++++--- 2 files changed, 144 insertions(+), 42 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 2997d0429..90539b9be 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -183,7 +183,8 @@ type NoInet4Resolution, ## Do not resolve server hostname to IPv4 addresses NoInet6Resolution, ## Do not resolve server hostname to IPv6 addresses NoAutomaticRedirect, ## Do not handle HTTP redirection automatically - NewConnectionAlways ## Always create new connection to HTTP server + NewConnectionAlways, ## Always create new connection to HTTP server + Http11Pipeline ## Enable HTTP/1.1 pipelining HttpClientFlags* = set[HttpClientFlag] @@ -365,7 +366,11 @@ proc new*(t: typedesc[HttpSessionRef], idlePeriod: idlePeriod, connections: initTable[string, seq[HttpClientConnectionRef]](), ) - res.watcherFut = sessionWatcher(res) + res.watcherFut = + if HttpClientFlag.Http11Pipeline in flags: + sessionWatcher(res) + else: + newFuture[void]("session.watcher.placeholder") res proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [Defect] .} = @@ -658,6 +663,12 @@ proc removeConnection(session: HttpSessionRef, dec(session.connectionsCount) await conn.closeWait() +func connectionPoolEnabled(session: HttpSessionRef, + flags: set[HttpClientRequestFlag]): bool = + (HttpClientFlag.NewConnectionAlways notin session.flags) and + (HttpClientRequestFlag.DedicatedConnection notin flags) and + (HttpClientFlag.Http11Pipeline in session.flags) + proc acquireConnection( session: HttpSessionRef, ha: HttpAddress, @@ -665,8 +676,7 @@ proc acquireConnection( ): Future[HttpClientConnectionRef] {.async.} = ## Obtain connection from ``session`` or establish a new one. var default: seq[HttpClientConnectionRef] - if (HttpClientFlag.NewConnectionAlways notin session.flags) and - (HttpClientRequestFlag.DedicatedConnection notin flags): + if session.connectionPoolEnabled(flags): # Trying to reuse existing connection from our connection's pool. let timestamp = Moment.now() # We looking for non-idle connection at `Ready` state, all idle connections @@ -691,29 +701,32 @@ proc releaseConnection(session: HttpSessionRef, connection: HttpClientConnectionRef) {.async.} = ## Return connection back to the ``session``. let removeConnection = - case connection.state - of HttpClientConnectionState.ResponseBodyReceived: - if HttpClientConnectionFlag.KeepAlive in connection.flags: - # HTTP response body has been received and "Connection: keep-alive" is - # present in response headers. - false - else: - # HTTP response body has been received, but "Connection: keep-alive" is - # not present or not supported. - true - of HttpClientConnectionState.ResponseHeadersReceived: - if (HttpClientConnectionFlag.NoBody in connection.flags) and - (HttpClientConnectionFlag.KeepAlive in connection.flags): - # HTTP response headers received with an empty response body and - # "Connection: keep-alive" is present in response headers. - false + if HttpClientFlag.Http11Pipeline notin session.flags: + true + else: + case connection.state + of HttpClientConnectionState.ResponseBodyReceived: + if HttpClientConnectionFlag.KeepAlive in connection.flags: + # HTTP response body has been received and "Connection: keep-alive" is + # present in response headers. + false + else: + # HTTP response body has been received, but "Connection: keep-alive" + # is not present or not supported. + true + of HttpClientConnectionState.ResponseHeadersReceived: + if (HttpClientConnectionFlag.NoBody in connection.flags) and + (HttpClientConnectionFlag.KeepAlive in connection.flags): + # HTTP response headers received with an empty response body and + # "Connection: keep-alive" is present in response headers. + false + else: + # HTTP response body is not received or "Connection: keep-alive" is + # not present or not supported. + true else: - # HTTP response body is not received or "Connection: keep-alive" is not - # present or not supported. + # Connection not in proper state. true - else: - # Connection not in proper state. - true if removeConnection: await session.removeConnection(connection) @@ -753,7 +766,8 @@ proc closeWait*(session: HttpSessionRef) {.async.} = ## This closes all the connections opened to remote servers. var pending: seq[Future[void]] # Closing sessionWatcher to avoid race condition. - await cancelAndWait(session.watcherFut) + if not(isNil(session.watcherFut)): + await cancelAndWait(session.watcherFut) for connections in session.connections.values(): for conn in connections: pending.add(closeWait(conn)) @@ -924,11 +938,15 @@ proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] res.connection.state = HttpClientConnectionState.ResponseHeadersReceived if nobodyFlag: res.connection.flags.incl(HttpClientConnectionFlag.NoBody) - let newConnectionAlways = - HttpClientFlag.NewConnectionAlways in request.session.flags - let closeConnection = - HttpClientRequestFlag.CloseConnection in request.flags - if connectionFlag and not(newConnectionAlways) and not(closeConnection): + let + newConnectionAlways = + HttpClientFlag.NewConnectionAlways in request.session.flags + httpPipeline = + HttpClientFlag.Http11Pipeline in request.session.flags + closeConnection = + HttpClientRequestFlag.CloseConnection in request.flags + if connectionFlag and not(newConnectionAlways) and not(closeConnection) and + httpPipeline: res.connection.flags.incl(HttpClientConnectionFlag.KeepAlive) res.connection.flags.incl(HttpClientConnectionFlag.Response) trackHttpClientResponse(res) @@ -1049,7 +1067,8 @@ proc prepareRequest(request: HttpClientRequestRef): string {. discard request.headers.hasKeyOrPut(HostHeader, request.address.hostname) # We set `Connection` to value according to flags if its not set. if ConnectionHeader notin request.headers: - if HttpClientRequestFlag.CloseConnection in request.flags: + if (HttpClientFlag.Http11Pipeline notin request.session.flags) or + (HttpClientRequestFlag.CloseConnection in request.flags): request.headers.add(ConnectionHeader, "close") else: request.headers.add(ConnectionHeader, "keep-alive") diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 16d1dc7fa..ccd8eeffa 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -831,21 +831,30 @@ suite "HTTP client testing suite": d8 == @[(200, "ok", 0), (200, "ok", 0)] let - n1 = await test1(keepHa, HttpVersion11, {}, {}) - n2 = await test2(keepHa, keepHa, HttpVersion11, {}, {}) - n3 = await test1(dropHa, HttpVersion11, {}, {}) - n4 = await test2(dropHa, dropHa, HttpVersion11, {}, {}) + n1 = await test1(keepHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {}) + n2 = await test2(keepHa, keepHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {}) + n3 = await test1(dropHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {}) + n4 = await test2(dropHa, dropHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {}) n5 = await test1(keepHa, HttpVersion11, - {HttpClientFlag.NewConnectionAlways}, {}) - n6 = await test1(keepHa, HttpVersion11, {}, + {HttpClientFlag.NewConnectionAlways, + HttpClientFlag.Http11Pipeline}, {}) + n6 = await test1(keepHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {HttpClientRequestFlag.DedicatedConnection}) - n7 = await test1(keepHa, HttpVersion11, {}, + n7 = await test1(keepHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {HttpClientRequestFlag.DedicatedConnection, HttpClientRequestFlag.CloseConnection}) - n8 = await test1(keepHa, HttpVersion11, {}, + n8 = await test1(keepHa, HttpVersion11, + {HttpClientFlag.Http11Pipeline}, {HttpClientRequestFlag.CloseConnection}) n9 = await test1(keepHa, HttpVersion11, - {HttpClientFlag.NewConnectionAlways}, + {HttpClientFlag.NewConnectionAlways, + HttpClientFlag.Http11Pipeline}, {HttpClientRequestFlag.CloseConnection}) check: n1 == (200, "ok", 1) @@ -895,7 +904,8 @@ suite "HTTP client testing suite": var server = createServer(address, process, false) server.start() - let session = HttpSessionRef.new(idleTimeout = 1.seconds, + let session = HttpSessionRef.new({HttpClientFlag.Http11Pipeline}, + idleTimeout = 1.seconds, idlePeriod = 200.milliseconds) try: var f1 = test(session, ha) @@ -922,6 +932,75 @@ suite "HTTP client testing suite": return true + proc testNoPipeline(address: TransportAddress): Future[bool] {. + async.} = + let + ha = getAddress(address, HttpClientScheme.NonSecure, "/test") + hb = getAddress(address, HttpClientScheme.NonSecure, "/keep-test") + + proc test( + session: HttpSessionRef, + a: HttpAddress + ): Future[TestResponseTuple] {.async.} = + + var + data: HttpResponseTuple + request = HttpClientRequestRef.new(session, a, version = HttpVersion11) + try: + data = await request.fetch() + finally: + await request.closeWait() + return (data.status, data.data.bytesToString(), 0) + + proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + if r.isOk(): + let request = r.get() + case request.uri.path + of "/test": + return await request.respond(Http200, "ok") + of "/keep-test": + let headers = HttpTable.init([("Connection", "keep-alive")]) + return await request.respond(Http200, "not-alive", headers) + else: + return await request.respond(Http404, "Page not found") + else: + return dumbResponse() + + var server = createServer(address, process, false) + server.start() + let session = HttpSessionRef.new(idleTimeout = 100.seconds, + idlePeriod = 10.milliseconds) + try: + var f1 = test(session, ha) + var f2 = test(session, ha) + await allFutures(f1, f2) + check: + f1.finished() + f1.done() + f2.finished() + f2.done() + f1.read() == (200, "ok", 0) + f2.read() == (200, "ok", 0) + session.connectionsCount == 0 + + await sleepAsync(100.milliseconds) + block: + let resp = await test(session, ha) + check: + resp == (200, "ok", 0) + session.connectionsCount == 0 + block: + let resp = await test(session, hb) + check: + resp == (200, "not-alive", 0) + session.connectionsCount == 0 + finally: + await session.closeWait() + await server.stop() + await server.closeWait() + + return true + test "HTTP all request methods test": let address = initTAddress("127.0.0.1:30080") check waitFor(testMethods(address, false)) == 18 @@ -997,6 +1076,10 @@ suite "HTTP client testing suite": let address = initTAddress("127.0.0.1:30080") check waitFor(testIdleConnection(address)) == true + test "HTTP client no-pipeline test": + let address = initTAddress("127.0.0.1:30080") + check waitFor(testNoPipeline(address)) == true + test "Leaks test": proc getTrackerLeaks(tracker: string): bool = let tracker = getTracker(tracker) From 956ae5af557de8e5201db43a425d95c122857dca Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 15 May 2023 19:45:26 +0300 Subject: [PATCH 014/146] Fix import poison regression. (#388) * Initial commit. * One more linux fix. * Eliminate posix from asyncloop. * Fix Linux warnings. * Fix MacOS issues. * Fix BSD issues. * Fix comma. * Fix Windows issues. --- chronos/asyncloop.nim | 23 +- chronos/ioselects/ioselectors_kqueue.nim | 2 +- chronos/osdefs.nim | 453 +++++++++++++---------- chronos/sendfile.nim | 15 +- chronos/timer.nim | 18 - chronos/transports/common.nim | 2 +- 6 files changed, 279 insertions(+), 234 deletions(-) diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index 6e3bd9acd..5314933b5 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -16,7 +16,7 @@ else: from nativesockets import Port import std/[tables, strutils, heapqueue, options, deques] import stew/results -import "."/[config, osdefs, osutils, timer] +import "."/[config, osdefs, oserrno, osutils, timer] export Port export timer, results @@ -152,11 +152,6 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or defined(openbsd) or defined(dragonfly) or defined(macos) or defined(linux) or defined(android) or defined(solaris): import "."/selectors2 - import "."/oserrno - from posix import MSG_PEEK, MSG_NOSIGNAL, - SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, - SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE export SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE @@ -575,7 +570,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or proc globalInit() = # We are ignoring SIGPIPE signal, because we are working with EPIPE. - posix.signal(cint(SIGPIPE), SIG_IGN) + signal(cint(SIGPIPE), SIG_IGN) proc initAPI(disp: PDispatcher) = discard @@ -635,7 +630,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if not(isNil(adata.writer.function)): newEvents.incl(Event.Write) do: - return err(OSErrorCode(osdefs.EBADF)) + return err(osdefs.EBADF) loop.selector.updateHandle2(cint(fd), newEvents) proc removeReader2*(fd: AsyncFD): Result[void, OSErrorCode] = @@ -648,7 +643,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if not(isNil(adata.writer.function)): newEvents.incl(Event.Write) do: - return err(OSErrorCode(osdefs.EBADF)) + return err(osdefs.EBADF) loop.selector.updateHandle2(cint(fd), newEvents) proc addWriter2*(fd: AsyncFD, cb: CallbackFunc, @@ -663,7 +658,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if not(isNil(adata.reader.function)): newEvents.incl(Event.Read) do: - return err(OSErrorCode(osdefs.EBADF)) + return err(osdefs.EBADF) loop.selector.updateHandle2(cint(fd), newEvents) proc removeWriter2*(fd: AsyncFD): Result[void, OSErrorCode] = @@ -676,7 +671,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if not(isNil(adata.reader.function)): newEvents.incl(Event.Read) do: - return err(OSErrorCode(osdefs.EBADF)) + return err(osdefs.EBADF) loop.selector.updateHandle2(cint(fd), newEvents) proc register*(fd: AsyncFD) {.raises: [Defect, OSError].} = @@ -743,7 +738,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or else: OSErrorCode(0) else: - OSErrorCode(osdefs.EBADF) + osdefs.EBADF ) if not(isNil(aftercb)): aftercb(param) @@ -789,7 +784,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or withData(loop.selector, sigfd, adata) do: adata.reader = AsyncCallback(function: cb, udata: udata) do: - return err(OSErrorCode(osdefs.EBADF)) + return err(osdefs.EBADF) ok(sigfd) proc addProcess2*(pid: int, cb: CallbackFunc, @@ -803,7 +798,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or withData(loop.selector, procfd, adata) do: adata.reader = AsyncCallback(function: cb, udata: udata) do: - return err(OSErrorCode(osdefs.EBADF)) + return err(osdefs.EBADF) ok(procfd) proc removeSignal2*(sigfd: int): Result[void, OSErrorCode] = diff --git a/chronos/ioselects/ioselectors_kqueue.nim b/chronos/ioselects/ioselectors_kqueue.nim index dc9567114..eadb5cd39 100644 --- a/chronos/ioselects/ioselectors_kqueue.nim +++ b/chronos/ioselects/ioselectors_kqueue.nim @@ -40,7 +40,7 @@ proc getVirtualId[T](s: Selector[T]): SelectResult[int32] = ok(s.virtualHoles.popLast()) else: if s.virtualId == low(int32): - err(OSErrorCode(EMFILE)) + err(EMFILE) else: dec(s.virtualId) ok(s.virtualId) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 0d7bfabe6..836529700 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -10,6 +10,11 @@ import oserrno export oserrno when defined(windows): + from std/winlean import Sockaddr_storage, InAddr, In6Addr, Sockaddr_in, + Sockaddr_in6, SockLen, SockAddr, AddrInfo, + SocketHandle + export Sockaddr_storage, InAddr, In6Addr, Sockaddr_in, Sockaddr_in6, SockLen, + SockAddr, AddrInfo, SocketHandle # Prerequisites for constants template WSAIORW*(x, y): untyped = (IOC_INOUT or x or y) template WSAIOW*(x, y): untyped = @@ -17,49 +22,6 @@ when defined(windows): ((clong(sizeof(int32)) and clong(IOCPARM_MASK)) shl 16) or (x shl 8) or y type - Sockaddr_storage* {.final, pure.} = object - ss_family*: uint16 - ss_pad1: array[6, byte] - ss_align: int64 - ss_pad2: array[112, byte] - - InAddr* {.final, pure, union.} = object - s_addr*: uint32 - - In6Addr* {.final, pure, union.} = object - s_addr*: array[16, byte] - - Sockaddr_in* {.final, pure.} = object - sin_family*: uint16 - sin_port*: uint16 - sin_addr*: InAddr - sin_zero*: array[0..7, char] - - Sockaddr_in6* {.final, pure.} = object - sin6_family*: uint16 - sin6_port*: uint16 - sin6_flowinfo*: uint32 - sin6_addr*: In6Addr - sin6_scope_id*: uint32 - - SockLen* = cuint - - SockAddr* {.final, pure.} = object - sa_family*: uint16 - sa_data*: array[14, char] - - AddrInfo* {.final, pure.} = object - ai_flags*: cint ## Input flags. - ai_family*: cint ## Address family of socket. - ai_socktype*: cint ## Socket type. - ai_protocol*: cint ## Protocol of socket. - ai_addrlen*: csize_t ## Length of socket address. - ai_canonname*: pointer ## Canonical name of service location. - ai_addr*: ptr SockAddr ## Socket address of socket. - ai_next*: ptr AddrInfo ## Pointer to next in list. - - SocketHandle* = distinct int - HANDLE* = distinct uint GUID* {.final, pure.} = object D1*: uint32 @@ -860,8 +822,43 @@ when defined(windows): ) elif defined(macos) or defined(macosx): - import std/posix - export posix + from std/posix import close, shutdown, socket, getpeername, getsockname, + recvfrom, sendto, send, bindSocket, recv, connect, + unlink, listen, getaddrinfo, gai_strerror, getrlimit, + setrlimit, getpid, pthread_sigmask, sigemptyset, + sigaddset, sigismember, fcntl, accept, pipe, write, + signal, read, setsockopt, getsockopt, + Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, + SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, + F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, + AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, + SIG_BLOCK, SIG_UNBLOCK, + SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + + export close, shutdown, socket, getpeername, getsockname, + recvfrom, sendto, send, bindSocket, recv, connect, + unlink, listen, getaddrinfo, gai_strerror, getrlimit, + setrlimit, getpid, pthread_sigmask, sigemptyset, + sigaddset, sigismember, fcntl, accept, pipe, write, + signal, read, setsockopt, getsockopt, + Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, + SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, + F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, + AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, + SIG_BLOCK, SIG_UNBLOCK, + SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD type MachTimebaseInfo* {.importc: "struct mach_timebase_info", @@ -879,8 +876,47 @@ elif defined(macos) or defined(macosx): importc, header: "".} elif defined(linux): - import std/[posix] - export posix + from std/posix import close, shutdown, sigemptyset, sigaddset, sigismember, + sigdelset, write, read, waitid, getaddrinfo, + gai_strerror, setsockopt, getsockopt, socket, + getrlimit, setrlimit, getpeername, getsockname, + recvfrom, sendto, send, bindSocket, recv, connect, + unlink, listen, sendmsg, recvmsg, getpid, fcntl, + pthread_sigmask, clock_gettime, signal, + ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, + SigInfo, Id, Tmsghdr, IOVec, RLimit, + SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, + Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle, + CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD, + FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK, + SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, + AF_INET, AF_INET6, SO_REUSEADDR, SO_REUSEPORT, + SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, + SOCK_DGRAM, + SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + + export close, shutdown, sigemptyset, sigaddset, sigismember, + sigdelset, write, read, waitid, getaddrinfo, + gai_strerror, setsockopt, getsockopt, socket, + getrlimit, setrlimit, getpeername, getsockname, + recvfrom, sendto, send, bindSocket, recv, connect, + unlink, listen, sendmsg, recvmsg, getpid, fcntl, + pthread_sigmask, clock_gettime, signal, + ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, + SigInfo, Id, Tmsghdr, IOVec, RLimit, + SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, + Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle, + CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD, + FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK, + SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, + AF_INET, AF_INET6, SO_REUSEADDR, SO_REUSEPORT, + SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, + SOCK_DGRAM, + SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD when not defined(android) and defined(amd64): const IP_MULTICAST_TTL*: cint = 33 @@ -968,9 +1004,45 @@ elif defined(linux): proc signalfd*(fd: cint, mask: var Sigset, flags: cint): cint {. cdecl, importc: "signalfd", header: "".} -else: - import std/posix - export posix +elif defined(freebsd) or defined(openbsd) or defined(netbsd) or + defined(dragonfly): + from std/posix import close, shutdown, socket, getpeername, getsockname, + recvfrom, sendto, send, bindSocket, recv, connect, + unlink, listen, getaddrinfo, gai_strerror, getrlimit, + setrlimit, getpid, pthread_sigmask, sigemptyset, + sigaddset, sigismember, fcntl, accept, pipe, write, + signal, read, setsockopt, getsockopt, clock_gettime, + Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, + SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, + F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, + AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, + SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, + SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + + export close, shutdown, socket, getpeername, getsockname, + recvfrom, sendto, send, bindSocket, recv, connect, + unlink, listen, getaddrinfo, gai_strerror, getrlimit, + setrlimit, getpid, pthread_sigmask, sigemptyset, + sigaddset, sigismember, fcntl, accept, pipe, write, + signal, read, setsockopt, getsockopt, clock_gettime, + Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, + SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, + F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, + AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, + SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, + SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD var IP_MULTICAST_TTL* {.importc: "IP_MULTICAST_TTL", header: "".}: cint @@ -1021,9 +1093,6 @@ elif defined(dragonfly): when defined(linux) or defined(macos) or defined(macosx) or defined(freebsd) or defined(openbsd) or defined(netbsd) or defined(dragonfly): - export SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, - SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE const POSIX_SPAWN_RESETIDS* = 0x01 @@ -1144,152 +1213,150 @@ when defined(linux) or defined(macos) or defined(macosx) or defined(freebsd) or importc: "posix_spawnattr_setsigmask", header: "", sideEffect.} -when defined(posix): - when defined(linux): - const - P_PID* = cint(1) - WNOHANG* = cint(1) - WSTOPPED* = cint(2) - WEXITTED* = cint(4) - WNOWAIT* = cint(0x01000000) - template WSTATUS(s: cint): cint = - s and 0x7F - template WAITEXITSTATUS*(s: cint): cint = - (s and 0xFF00) shr 8 - template WAITTERMSIG*(s: cint): cint = - WSTATUS(s) - template WAITSTOPSIG*(s: cint): cint = - WAITEXITSTATUS(s) - template WAITIFEXITED*(s: cint): bool = - WSTATUS(s) == 0 - template WAITIFSIGNALED*(s: cint): bool = - (cast[int8](WSTATUS(s) + 1) shr 1) > 0 - template WAITIFSTOPPED*(s: cint): bool = - (s and 0xFF) == 0x7F - template WAITIFCONTINUED*(s: cint): bool = - s == 0xFFFF - elif defined(openbsd): - const WNOHANG* = 1 - template WSTATUS(s: cint): cint = - s and 0x7F - template WAITEXITSTATUS*(s: cint): cint = - (s shr 8) and 0xFF - template WAITTERMSIG*(s: cint): cint = - WSTATUS(s) - template WAITSTOPSIG*(s: cint): cint = - WAITEXITSTATUS(s) - template WAITIFEXITED*(s: cint): bool = - WSTATUS(s) == 0 - template WAITIFSIGNALED*(s: cint): bool = - (WAITTERMSIG(s) != 0x7F) and (WSTATUS(s) != 0) - template WAITIFSTOPPED*(s: cint): bool = - WSTATUS(s) == 0x7F - template WAITIFCONTINUED*(s: cint): bool = - s == 0xFFFF - elif defined(dragonfly): - const WNOHANG* = 1 - template WSTATUS(s: cint): cint = - s and 0x7F - template WAITEXITSTATUS*(s: cint): cint = - (s shr 8) - template WAITTERMSIG*(s: cint): cint = - WSTATUS(s) - template WAITSTOPSIG*(s: cint): cint = - WAITEXITSTATUS(s) - template WAITIFEXITED*(s: cint): bool = - WSTATUS(s) == 0 - template WAITIFSIGNALED*(s: cint): bool = - (WAITTERMSIG(s) != 0x7F) and (WSTATUS(s) != 0) - template WAITIFSTOPPED*(s: cint): bool = - WSTATUS(s) == 0x7F - template WAITIFCONTINUED*(s: cint): bool = - s == 19 - elif defined(netbsd): - const WNOHANG* = 1 - template WSTATUS(s: cint): cint = - s and 0x7F - template WAITEXITSTATUS*(s: cint): cint = - (s shr 8) and 0xFF - template WAITTERMSIG*(s: cint): cint = - WSTATUS(s) - template WAITSTOPSIG*(s: cint): cint = - WAITEXITSTATUS(s) - template WAITIFEXITED*(s: cint): bool = - WSTATUS(s) == 0 - template WAITIFSIGNALED*(s: cint): bool = - not(WAITIFSTOPPED(s)) and not(WAITIFCONTINUED(s)) and not(WAITIFEXITED(s)) - template WAITIFSTOPPED*(s: cint): bool = - (WSTATUS(s) == 0x7F) and not(WAITIFCONTINUED(s)) - template WAITIFCONTINUED*(s: cint): bool = - s == 0xFFFF - elif defined(freebsd): - const WNOHANG* = 1 - template WSTATUS(s: cint): cint = - s and 0x7F - template WAITEXITSTATUS*(s: cint): cint = - s shr 8 - template WAITTERMSIG*(s: cint): cint = - WSTATUS(s) - template WAITSTOPSIG*(s: cint): cint = - s shr 8 - template WAITIFEXITED*(s: cint): bool = - WSTATUS(s) == 0 - template WAITIFSIGNALED*(s: cint): bool = - let wstatus = WSTATUS(s) - (wstatus != 0x7F) and (wstatus != 0) and (s != 0x13) - template WAITIFSTOPPED*(s: cint): bool = - WSTATUS(s) == 0x7F - template WAITIFCONTINUED*(s: cint): bool = - x == 0x13 - elif defined(macos) or defined(macosx): - const WNOHANG* = 1 - template WSTATUS(s: cint): cint = - s and 0x7F - template WAITEXITSTATUS*(s: cint): cint = - (s shr 8) and 0xFF - template WAITTERMSIG*(s: cint): cint = - WSTATUS(s) - template WAITSTOPSIG*(s: cint): cint = - s shr 8 - template WAITIFEXITED*(s: cint): bool = - WSTATUS(s) == 0 - template WAITIFSIGNALED*(s: cint): bool = - let wstatus = WSTATUS(s) - (wstatus != 0x7F) and (wstatus != 0) - template WAITIFSTOPPED*(s: cint): bool = - (WSTATUS(s) == 0x7F) and (WAITSTOPSIG(s) != 0x13) - template WAITIFCONTINUED*(s: cint): bool = - (WSTATUS(s) == 0x7F) and (WAITSTOPSIG(s) == 0x13) - else: - proc WAITEXITSTATUS*(s: cint): cint {. - importc: "WEXITSTATUS", header: "".} - ## Exit code, iff WIFEXITED(s) - proc WAITTERMSIG*(s: cint): cint {. - importc: "WTERMSIG", header: "".} - ## Termination signal, iff WIFSIGNALED(s) - proc WAITSTOPSIG*(s: cint): cint {. - importc: "WSTOPSIG", header: "".} - ## Stop signal, iff WIFSTOPPED(s) - proc WAITIFEXITED*(s: cint): bool {. - importc: "WIFEXITED", header: "".} - ## True if child exited normally. - proc WAITIFSIGNALED*(s: cint): bool {. - importc: "WIFSIGNALED", header: "".} - ## True if child exited due to uncaught signal. - proc WAITIFSTOPPED*(s: cint): bool {. - importc: "WIFSTOPPED", header: "".} - ## True if child is currently stopped. - proc WAITIFCONTINUED*(s: cint): bool {. - importc: "WIFCONTINUED", header: "".} - ## True if child has been continued. +when defined(linux): + const + P_PID* = cint(1) + WNOHANG* = cint(1) + WSTOPPED* = cint(2) + WEXITED* = cint(4) + WNOWAIT* = cint(0x01000000) + template WSTATUS(s: cint): cint = + s and 0x7F + template WAITEXITSTATUS*(s: cint): cint = + (s and 0xFF00) shr 8 + template WAITTERMSIG*(s: cint): cint = + WSTATUS(s) + template WAITSTOPSIG*(s: cint): cint = + WAITEXITSTATUS(s) + template WAITIFEXITED*(s: cint): bool = + WSTATUS(s) == 0 + template WAITIFSIGNALED*(s: cint): bool = + (cast[int8](WSTATUS(s) + 1) shr 1) > 0 + template WAITIFSTOPPED*(s: cint): bool = + (s and 0xFF) == 0x7F + template WAITIFCONTINUED*(s: cint): bool = + s == 0xFFFF +elif defined(openbsd): + const WNOHANG* = 1 + template WSTATUS(s: cint): cint = + s and 0x7F + template WAITEXITSTATUS*(s: cint): cint = + (s shr 8) and 0xFF + template WAITTERMSIG*(s: cint): cint = + WSTATUS(s) + template WAITSTOPSIG*(s: cint): cint = + WAITEXITSTATUS(s) + template WAITIFEXITED*(s: cint): bool = + WSTATUS(s) == 0 + template WAITIFSIGNALED*(s: cint): bool = + (WAITTERMSIG(s) != 0x7F) and (WSTATUS(s) != 0) + template WAITIFSTOPPED*(s: cint): bool = + WSTATUS(s) == 0x7F + template WAITIFCONTINUED*(s: cint): bool = + s == 0xFFFF +elif defined(dragonfly): + const WNOHANG* = 1 + template WSTATUS(s: cint): cint = + s and 0x7F + template WAITEXITSTATUS*(s: cint): cint = + (s shr 8) + template WAITTERMSIG*(s: cint): cint = + WSTATUS(s) + template WAITSTOPSIG*(s: cint): cint = + WAITEXITSTATUS(s) + template WAITIFEXITED*(s: cint): bool = + WSTATUS(s) == 0 + template WAITIFSIGNALED*(s: cint): bool = + (WAITTERMSIG(s) != 0x7F) and (WSTATUS(s) != 0) + template WAITIFSTOPPED*(s: cint): bool = + WSTATUS(s) == 0x7F + template WAITIFCONTINUED*(s: cint): bool = + s == 19 +elif defined(netbsd): + const WNOHANG* = 1 + template WSTATUS(s: cint): cint = + s and 0x7F + template WAITEXITSTATUS*(s: cint): cint = + (s shr 8) and 0xFF + template WAITTERMSIG*(s: cint): cint = + WSTATUS(s) + template WAITSTOPSIG*(s: cint): cint = + WAITEXITSTATUS(s) + template WAITIFEXITED*(s: cint): bool = + WSTATUS(s) == 0 + template WAITIFSIGNALED*(s: cint): bool = + not(WAITIFSTOPPED(s)) and not(WAITIFCONTINUED(s)) and not(WAITIFEXITED(s)) + template WAITIFSTOPPED*(s: cint): bool = + (WSTATUS(s) == 0x7F) and not(WAITIFCONTINUED(s)) + template WAITIFCONTINUED*(s: cint): bool = + s == 0xFFFF +elif defined(freebsd): + const WNOHANG* = 1 + template WSTATUS(s: cint): cint = + s and 0x7F + template WAITEXITSTATUS*(s: cint): cint = + s shr 8 + template WAITTERMSIG*(s: cint): cint = + WSTATUS(s) + template WAITSTOPSIG*(s: cint): cint = + s shr 8 + template WAITIFEXITED*(s: cint): bool = + WSTATUS(s) == 0 + template WAITIFSIGNALED*(s: cint): bool = + let wstatus = WSTATUS(s) + (wstatus != 0x7F) and (wstatus != 0) and (s != 0x13) + template WAITIFSTOPPED*(s: cint): bool = + WSTATUS(s) == 0x7F + template WAITIFCONTINUED*(s: cint): bool = + x == 0x13 +elif defined(macos) or defined(macosx): + const WNOHANG* = 1 + template WSTATUS(s: cint): cint = + s and 0x7F + template WAITEXITSTATUS*(s: cint): cint = + (s shr 8) and 0xFF + template WAITTERMSIG*(s: cint): cint = + WSTATUS(s) + template WAITSTOPSIG*(s: cint): cint = + s shr 8 + template WAITIFEXITED*(s: cint): bool = + WSTATUS(s) == 0 + template WAITIFSIGNALED*(s: cint): bool = + let wstatus = WSTATUS(s) + (wstatus != 0x7F) and (wstatus != 0) + template WAITIFSTOPPED*(s: cint): bool = + (WSTATUS(s) == 0x7F) and (WAITSTOPSIG(s) != 0x13) + template WAITIFCONTINUED*(s: cint): bool = + (WSTATUS(s) == 0x7F) and (WAITSTOPSIG(s) == 0x13) +elif defined(posix): + proc WAITEXITSTATUS*(s: cint): cint {. + importc: "WEXITSTATUS", header: "".} + ## Exit code, iff WIFEXITED(s) + proc WAITTERMSIG*(s: cint): cint {. + importc: "WTERMSIG", header: "".} + ## Termination signal, iff WIFSIGNALED(s) + proc WAITSTOPSIG*(s: cint): cint {. + importc: "WSTOPSIG", header: "".} + ## Stop signal, iff WIFSTOPPED(s) + proc WAITIFEXITED*(s: cint): bool {. + importc: "WIFEXITED", header: "".} + ## True if child exited normally. + proc WAITIFSIGNALED*(s: cint): bool {. + importc: "WIFSIGNALED", header: "".} + ## True if child exited due to uncaught signal. + proc WAITIFSTOPPED*(s: cint): bool {. + importc: "WIFSTOPPED", header: "".} + ## True if child is currently stopped. + proc WAITIFCONTINUED*(s: cint): bool {. + importc: "WIFCONTINUED", header: "".} + ## True if child has been continued. when defined(posix): const INVALID_SOCKET* = SocketHandle(-1) INVALID_HANDLE_VALUE* = cint(-1) -proc `==`*(x: SocketHandle, y: int): bool = - x == SocketHandle(y) +proc `==`*(x: SocketHandle, y: int): bool = int(x) == y when defined(macosx) or defined(macos) or defined(bsd): const diff --git a/chronos/sendfile.nim b/chronos/sendfile.nim index 6993cb7ab..e1e14b2f9 100644 --- a/chronos/sendfile.nim +++ b/chronos/sendfile.nim @@ -59,9 +59,9 @@ elif defined(linux) or defined(android): elif defined(freebsd) or defined(openbsd) or defined(netbsd) or defined(dragonflybsd): - import posix, os + import oserrno type - SendfileHeader* {.importc: "sf_hdtr", + SendfileHeader* {.importc: "struct sf_hdtr", header: """#include #include #include """, @@ -76,20 +76,21 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or proc sendfile*(outfd, infd: int, offset: int, count: var int): int = var o = 0'u - let res = osSendFile(cint(infd), cint(outfd), uint(offset), uint(count), nil, - addr o, 0) + let res = osSendFile(cint(infd), cint(outfd), uint(offset), uint(count), + nil, addr o, 0) if res >= 0: count = int(o) 0 else: let err = osLastError() count = - if int(err) == EAGAIN: int(o) + if err == EAGAIN: int(o) else: 0 -1 elif defined(macosx): - import posix, os + import oserrno + type SendfileHeader* {.importc: "struct sf_hdtr", header: """#include @@ -113,6 +114,6 @@ elif defined(macosx): else: let err = osLastError() count = - if int(err) == EAGAIN: int(o) + if err == EAGAIN: int(o) else: 0 -1 diff --git a/chronos/timer.nim b/chronos/timer.nim index c469c8367..8e7cb8fa4 100644 --- a/chronos/timer.nim +++ b/chronos/timer.nim @@ -454,21 +454,3 @@ func epochNanoSeconds*(moment: Moment): int64 = proc fromNow*(t: typedesc[Moment], a: Duration): Moment {.inline.} = ## Returns moment in time which is equal to current moment + Duration. Moment.now() + a - -when defined(posix): - from posix import Time, Suseconds, Timeval, Timespec - - func toTimeval*(a: Duration): Timeval = - ## Convert Duration ``a`` to ``Timeval`` object. - let m = a.value mod Second.value - Timeval( - tv_sec: Time(a.value div Second.value), - tv_usec: Suseconds(m div Microsecond.value) - ) - - func toTimespec*(a: Duration): Timespec = - ## Convert Duration ``a`` to ``Timespec`` object. - Timespec( - tv_sec: Time(a.value div Second.value), - tv_nsec: int(a.value mod Second.value) - ) diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 66eef1b11..8255d2621 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -21,7 +21,7 @@ from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, from std/nativesockets import toInt, `$` export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, - Protocol, Port, toInt, `$`, osdefs + Protocol, Port, toInt, `$` const DefaultStreamBufferSize* = 4096 ## Default buffer size for stream From 61d52b1ef8f837ec77678f459ec7b9860590b8fc Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 18 May 2023 10:10:01 +0300 Subject: [PATCH 015/146] Export SocketFlags for HTTP client. (#389) --- chronos/apps/http/httpclient.nim | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 90539b9be..30a05e2e8 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -14,6 +14,7 @@ import httptable, httpcommon, httpagent, httpbodyrw, multipart export results, asyncloop, asyncsync, asyncstream, tlsstream, chunkstream, boundstream, httptable, httpcommon, httpagent, httpbodyrw, multipart, httputils +export SocketFlags const HttpMaxHeadersSize* = 8192 @@ -122,6 +123,7 @@ type connectionBufferSize*: int maxConnections*: int connectionsCount*: int + socketFlags*: set[SocketFlags] flags*: HttpClientFlags HttpAddress* = object @@ -345,7 +347,8 @@ proc new*(t: typedesc[HttpSessionRef], connectionBufferSize = DefaultStreamBufferSize, maxConnections = -1, idleTimeout = HttpConnectionIdleTimeout, - idlePeriod = HttpConnectionCheckPeriod): HttpSessionRef {. + idlePeriod = HttpConnectionCheckPeriod, + socketFlags: set[SocketFlags] = {}): HttpSessionRef {. raises: [Defect] .} = ## Create new HTTP session object. ## @@ -365,6 +368,7 @@ proc new*(t: typedesc[HttpSessionRef], idleTimeout: idleTimeout, idlePeriod: idlePeriod, connections: initTable[string, seq[HttpClientConnectionRef]](), + socketFlags: socketFlags ) res.watcherFut = if HttpClientFlag.Http11Pipeline in flags: @@ -619,7 +623,8 @@ proc connect(session: HttpSessionRef, for address in ha.addresses: let transp = try: - await connect(address, bufferSize = session.connectionBufferSize) + await connect(address, bufferSize = session.connectionBufferSize, + flags = session.socketFlags) except CancelledError as exc: raise exc except CatchableError: From 38cc233700dbd1601bf44ec5884ccc4d48b44bf9 Mon Sep 17 00:00:00 2001 From: Bung Date: Fri, 19 May 2023 18:43:33 +0800 Subject: [PATCH 016/146] fix missing sigprocmask import (#390) --- chronos/osdefs.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 836529700..d7bb868fa 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -825,7 +825,7 @@ elif defined(macos) or defined(macosx): from std/posix import close, shutdown, socket, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, getaddrinfo, gai_strerror, getrlimit, - setrlimit, getpid, pthread_sigmask, sigemptyset, + setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, @@ -844,7 +844,7 @@ elif defined(macos) or defined(macosx): export close, shutdown, socket, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, getaddrinfo, gai_strerror, getrlimit, - setrlimit, getpid, pthread_sigmask, sigemptyset, + setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, From f748387462b2472a941e916285f04fc14fd1180a Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 19 May 2023 17:25:22 +0300 Subject: [PATCH 017/146] Add HTTP client helper to read `text/event-stream` streaming response. (#375) --- chronos/apps/http/httpclient.nim | 105 ++++++++++++++++- chronos/apps/http/httpcommon.nim | 1 + tests/testhttpclient.nim | 186 +++++++++++++++++++++++++++++++ 3 files changed, 290 insertions(+), 2 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 30a05e2e8..1462c53ae 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -6,8 +6,8 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import std/[uri, tables, strutils, sequtils] -import stew/[results, base10, base64], httputils +import std/[uri, tables, sequtils] +import stew/[results, base10, base64, byteutils], httputils import ../../asyncloop, ../../asyncsync import ../../streams/[asyncstream, tlsstream, chunkstream, boundstream] import httptable, httpcommon, httpagent, httpbodyrw, multipart @@ -194,6 +194,10 @@ type opened*: int64 closed*: int64 + ServerSentEvent* = object + name*: string + data*: string + # HttpClientRequestRef valid states are: # Ready -> Open -> (Finished, Error) -> (Closing, Closed) # @@ -1511,3 +1515,100 @@ proc fetch*(session: HttpSessionRef, url: Uri): Future[HttpResponseTuple] {. if not(isNil(request)): await closeWait(request) if not(isNil(redirect)): await closeWait(redirect) raise exc + +proc getServerSentEvents*( + response: HttpClientResponseRef, + maxEventSize: int = -1 + ): Future[seq[ServerSentEvent]] {.async.} = + ## Read number of server-sent events (SSE) from HTTP response ``response``. + ## + ## ``maxEventSize`` - maximum size of events chunk in one message, use + ## `-1` or `0` to set size to unlimited. + ## + ## Server-sent events parsing is done according to: + ## https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream + ## + ## Note: Server-sent event comments are ignored and silently skipped. + const + CR = byte(0x0D) + LF = byte(0x0A) + COLON = byte(':') + SPACE = byte(' ') + + let reader = response.getBodyReader() + + var + error: ref HttpReadError = nil + res: seq[ServerSentEvent] + buffer: seq[byte] + + proc consumeBuffer() = + if len(buffer) == 0: return + + let pos = buffer.find(COLON) + if pos == 0: + # comment line + discard + elif pos > 0: + # field_name: field_value + let + name = string.fromBytes(buffer.toOpenArray(0, pos - 1)) + value = + if (pos + 1) < len(buffer): + let spos = if buffer[pos + 1] == SPACE: pos + 2 else: pos + 1 + string.fromBytes(buffer.toOpenArray(spos, len(buffer) - 1)) + else: + "" + res.add(ServerSentEvent(name: name, data: value)) + else: + # field_name only + let name = string.fromBytes(buffer.toOpenArray(0, len(buffer) - 1)) + res.add(ServerSentEvent(name: name, data: "")) + + # Reset internal buffer to zero length. + buffer.setLen(0) + + proc discardBuffer() = + if len(buffer) == 0: return + # Reset internal buffer to 1 byte length to keep comment sign. + buffer.setLen(1) + + proc predicate(data: openArray[byte]): tuple[consumed: int, done: bool] = + var i = 0 + while i < len(data): + if data[i] in {CR, LF}: + # CR or LF encountered + inc(i) + if (data[i - 1] == CR) and ((i < len(data)) and data[i] == LF): + # We trying to check for CRLF + inc(i) + + if len(buffer) == 0: + if len(res) == 0: + res.add(ServerSentEvent(name: "", data: "")) + return (i, true) + consumeBuffer() + else: + buffer.add(data[i]) + if (maxEventSize >= 0) and (len(buffer) > maxEventSize): + if buffer[0] != COLON: + # We only check limits for events and ignore comments size. + error = newException(HttpReadLimitError, + "Size of event exceeded maximum size") + return (0, true) + discardBuffer() + + inc(i) + + if len(data) == 0: + # Stream is at EOF + consumeBuffer() + return (0, true) + + (i, false) + + await reader.readMessage(predicate) + if not isNil(error): + raise error + else: + return res diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index 860a7a7ee..515920e86 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -54,6 +54,7 @@ type HttpRedirectError* = object of HttpError HttpAddressError* = object of HttpError HttpUseClosedError* = object of HttpError + HttpReadLimitError* = object of HttpReadError KeyValueTuple* = tuple key: string diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index ccd8eeffa..07128d9d7 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -1001,6 +1001,188 @@ suite "HTTP client testing suite": return true + proc testServerSentEvents(address: TransportAddress, + secure: bool): Future[bool] {.async.} = + const + SingleGoodTests = [ + ("/test/single/1", "a:b\r\nc: d\re:f\n:comment\r\ng:\n h: j \n\n", + @[("a", "b"), ("c", "d"), ("e", "f"), ("g", ""), (" h", "j ")]), + ("/test/single/2", ":comment\r:\nfield1\r\nfield2:\n\n", + @[("field1", ""), ("field2", "")]), + ("/test/single/3", ":c1\r:c2\nfield1:value1", @[("field1", "value1")]), + ("/test/single/4", ":c1\r:c2\nfield1:", @[("field1", "")]), + ("/test/single/5", ":c1\r:c2\nfield1", @[("field1", "")]), + ("/test/single/6", "a", @[("a", "")]), + ("/test/single/7", "b:", @[("b", "")]), + ("/test/single/8", "c:d", @[("c", "d")]), + ("/test/single/9", ":", @[]), + ("/test/single/10", "", @[]), + ("/test/single/11", ":c1\n", @[]), + ("/test/single/12", ":c1\n:c2\n", @[]), + ("/test/single/13", ":c1\n:c2\n:c3\n", @[]), + ("/test/single/14", ":c1\n:c2\n:c3\n:c4", @[]), + ("/test/single/15", "\r\r", @[("", "")]), + ("/test/single/15", "\n\n", @[("", "")]), + ("/test/single/17", "\r\n\r\n", @[("", "")]), + ("/test/single/18", "\r\n", @[("", "")]), + ("/test/single/19", "\r", @[("", "")]), + ("/test/single/20", "\n", @[("", "")]) + ] + MultipleGoodTests = [ + ("/test/multiple/1", "a:b\nc:d\n\ne:f\rg:h\r\ri:j\r\nk:l\r\n\r\n", 3, + @[@[("a", "b"), ("c", "d")], @[("e", "f"), ("g", "h")], + @[("i", "j"), ("k", "l")]]), + ("/test/multiple/2", "a:b\nc:d\n\ne:f\rg:h\r\ri:j\r\nk:l\r\n\r\n\r\n", + 4, @[@[("a", "b"), ("c", "d")], @[("e", "f"), ("g", "h")], + @[("i", "j"), ("k", "l")], @[("", "")]]), + ] + OverflowTests = [ + ("/test/overflow/1", ":verylongcomment", 1, false), + ("/test/overflow/2", ":verylongcomment\n:anotherone", 1, false), + ("/test/overflow/3", "aa\n", 1, true), + ("/test/overflow/4", "a:b\n", 2, true) + ] + + proc `==`(a: ServerSentEvent, b: tuple[name: string, value: string]): bool = + a.name == b.name and a.data == b.value + + proc `==`(a: seq[ServerSentEvent], + b: seq[tuple[name: string, value: string]]): bool = + if len(a) != len(b): + return false + for index, value in a.pairs(): + if value != b[index]: + return false + true + + proc `==`(a: seq[seq[ServerSentEvent]], + b: seq[seq[tuple[name: string, value: string]]]): bool = + if len(a) != len(b): + return false + for index, value in a.pairs(): + if value != b[index]: + return false + true + + proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + if r.isOk(): + let request = r.get() + if request.uri.path.startsWith("/test/single/"): + let index = + block: + var res = -1 + for index, value in SingleGoodTests.pairs(): + if value[0] == request.uri.path: + res = index + break + res + if index < 0: + return await request.respond(Http404, "Page not found") + var response = request.getResponse() + response.status = Http200 + await response.sendBody(SingleGoodTests[index][1]) + return response + elif request.uri.path.startsWith("/test/multiple/"): + let index = + block: + var res = -1 + for index, value in MultipleGoodTests.pairs(): + if value[0] == request.uri.path: + res = index + break + res + if index < 0: + return await request.respond(Http404, "Page not found") + var response = request.getResponse() + response.status = Http200 + await response.sendBody(MultipleGoodTests[index][1]) + return response + elif request.uri.path.startsWith("/test/overflow/"): + let index = + block: + var res = -1 + for index, value in OverflowTests.pairs(): + if value[0] == request.uri.path: + res = index + break + res + if index < 0: + return await request.respond(Http404, "Page not found") + var response = request.getResponse() + response.status = Http200 + await response.sendBody(OverflowTests[index][1]) + return response + else: + return await request.respond(Http404, "Page not found") + else: + return dumbResponse() + + var server = createServer(address, process, secure) + server.start() + + var session = createSession(secure) + + try: + for item in SingleGoodTests: + let ha = + if secure: + getAddress(address, HttpClientScheme.Secure, item[0]) + else: + getAddress(address, HttpClientScheme.NonSecure, item[0]) + let + req = HttpClientRequestRef.new(session, ha, HttpMethod.MethodGet) + response = await req.send() + events = await response.getServerSentEvents() + check events == item[2] + await response.closeWait() + await req.closeWait() + + for item in MultipleGoodTests: + let ha = + if secure: + getAddress(address, HttpClientScheme.Secure, item[0]) + else: + getAddress(address, HttpClientScheme.NonSecure, item[0]) + var req = HttpClientRequestRef.new(session, ha, HttpMethod.MethodGet) + var response = await send(req) + let events = + block: + var res: seq[seq[ServerSentEvent]] + for i in 0 ..< item[2]: + let ires = await response.getServerSentEvents() + res.add(ires) + res + check events == item[3] + await closeWait(response) + await closeWait(req) + + for item in OverflowTests: + let ha = + if secure: + getAddress(address, HttpClientScheme.Secure, item[0]) + else: + getAddress(address, HttpClientScheme.NonSecure, item[0]) + var req = HttpClientRequestRef.new(session, ha, HttpMethod.MethodGet) + var response = await send(req) + let error = + try: + let events {.used.} = await response.getServerSentEvents(item[2]) + false + except HttpReadLimitError: + true + except CatchableError: + false + check error == item[3] + await closeWait(response) + await closeWait(req) + + finally: + await closeWait(session) + await server.stop() + await server.closeWait() + + return true + test "HTTP all request methods test": let address = initTAddress("127.0.0.1:30080") check waitFor(testMethods(address, false)) == 18 @@ -1079,6 +1261,10 @@ suite "HTTP client testing suite": test "HTTP client no-pipeline test": let address = initTAddress("127.0.0.1:30080") check waitFor(testNoPipeline(address)) == true + + test "HTTP client server-sent events test": + let address = initTAddress("127.0.0.1:30080") + check waitFor(testServerSentEvents(address, false)) == true test "Leaks test": proc getTrackerLeaks(tracker: string): bool = From 148ddf49c2ff51a223ca0df237893afe5dbaec07 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 23 May 2023 13:39:35 +0300 Subject: [PATCH 018/146] Asyncproc (Part 3/3) (#374) * Initial commit. * Some Linux fixes. * Address review comments on Windows. * Fix issues on Linux. * Fix 1.2 issue and Windows warnings. * Fix posix compilation issues. --- chronos.nim | 5 +- chronos/asyncloop.nim | 282 ++++++++- chronos/asyncproc.nim | 1311 +++++++++++++++++++++++++++++++++++++++++ chronos/config.nim | 32 +- chronos/osdefs.nim | 54 +- tests/testall.nim | 2 +- tests/testproc.bat | 36 ++ tests/testproc.nim | 425 +++++++++++++ tests/testproc.sh | 20 + tests/testsignal.nim | 2 +- 10 files changed, 2119 insertions(+), 50 deletions(-) create mode 100644 chronos/asyncproc.nim create mode 100644 tests/testproc.bat create mode 100644 tests/testproc.nim create mode 100755 tests/testproc.sh diff --git a/chronos.nim b/chronos.nim index 8295924dd..6801b2894 100644 --- a/chronos.nim +++ b/chronos.nim @@ -5,5 +5,6 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import chronos/[asyncloop, asyncsync, handles, transport, timer, debugutils] -export asyncloop, asyncsync, handles, transport, timer, debugutils +import chronos/[asyncloop, asyncsync, handles, transport, timer, + asyncproc, debugutils] +export asyncloop, asyncsync, handles, transport, timer, asyncproc, debugutils diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index 5314933b5..ff2f07946 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -14,7 +14,7 @@ else: {.push raises: [].} from nativesockets import Port -import std/[tables, strutils, heapqueue, options, deques] +import std/[tables, strutils, heapqueue, deques] import stew/results import "."/[config, osdefs, oserrno, osutils, timer] @@ -320,6 +320,20 @@ when defined(windows): RefCustomOverlapped* = ref CustomOverlapped + PostCallbackData = object + ioPort: HANDLE + handleFd: AsyncFD + waitFd: HANDLE + udata: pointer + ovlref: RefCustomOverlapped + ovl: pointer + + WaitableHandle* = ref PostCallbackData + ProcessHandle* = distinct WaitableHandle + + WaitableResult* {.pure.} = enum + Ok, Timeout + AsyncFD* = distinct int proc hash(x: AsyncFD): Hash {.borrow.} @@ -328,9 +342,9 @@ when defined(windows): proc getFunc(s: SocketHandle, fun: var pointer, guid: GUID): bool = var bytesRet: DWORD fun = nil - result = wsaIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, unsafeAddr(guid), - sizeof(GUID).DWORD, addr fun, sizeof(pointer).DWORD, - addr(bytesRet), nil, nil) == 0 + wsaIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, unsafeAddr(guid), + DWORD(sizeof(GUID)), addr fun, DWORD(sizeof(pointer)), + addr(bytesRet), nil, nil) == 0 proc globalInit() = var wsa = WSAData() @@ -428,6 +442,141 @@ when defined(windows): ## Unregisters ``fd``. getThreadDispatcher().handles.excl(fd) + {.push stackTrace: off.} + proc waitableCallback(param: pointer, timerOrWaitFired: WINBOOL) {. + stdcall, gcsafe.} = + # This procedure will be executed in `wait thread`, so it must not use + # GC related objects. + # We going to ignore callbacks which was spawned when `isNil(param) == true` + # because we unable to indicate this error. + if isNil(param): return + var wh = cast[ptr PostCallbackData](param) + # We ignore result of postQueueCompletionStatus() call because we unable to + # indicate error. + discard postQueuedCompletionStatus(wh[].ioPort, DWORD(timerOrWaitFired), + ULONG_PTR(wh[].handleFd), + wh[].ovl) + {.pop.} + + proc registerWaitable( + handle: HANDLE, + flags: ULONG, + timeout: Duration, + cb: CallbackFunc, + udata: pointer + ): Result[WaitableHandle, OSErrorCode] = + ## Register handle of (Change notification, Console input, Event, + ## Memory resource notification, Mutex, Process, Semaphore, Thread, + ## Waitable timer) for waiting, using specific Windows' ``flags`` and + ## ``timeout`` value. + ## + ## Callback ``cb`` will be scheduled with ``udata`` parameter when + ## ``handle`` become signaled. + ## + ## Result of this procedure call ``WaitableHandle`` should be closed using + ## closeWaitable() call. + ## + ## NOTE: This is private procedure, not supposed to be publicly available, + ## please use ``waitForSingleObject()``. + let loop = getThreadDispatcher() + var ovl = RefCustomOverlapped(data: CompletionData(cb: cb)) + + var whandle = (ref PostCallbackData)( + ioPort: loop.getIoHandler(), + handleFd: AsyncFD(handle), + udata: udata, + ovlref: ovl, + ovl: cast[pointer](ovl) + ) + + ovl.data.udata = cast[pointer](whandle) + + let dwordTimeout = + if timeout == InfiniteDuration: + DWORD(INFINITE) + else: + DWORD(timeout.milliseconds) + + if registerWaitForSingleObject(addr(whandle[].waitFd), handle, + cast[WAITORTIMERCALLBACK](waitableCallback), + cast[pointer](whandle), + dwordTimeout, + flags) == WINBOOL(0): + ovl.data.udata = nil + whandle.ovlref = nil + whandle.ovl = nil + return err(osLastError()) + + ok(WaitableHandle(whandle)) + + proc closeWaitable(wh: WaitableHandle): Result[void, OSErrorCode] = + ## Close waitable handle ``wh`` and clear all the resources. It is safe + ## to close this handle, even if wait operation is pending. + ## + ## NOTE: This is private procedure, not supposed to be publicly available, + ## please use ``waitForSingleObject()``. + doAssert(not(isNil(wh))) + + let pdata = (ref PostCallbackData)(wh) + # We are not going to clear `ref` fields in PostCallbackData object because + # it possible that callback is already scheduled. + if unregisterWait(pdata.waitFd) == 0: + let res = osLastError() + if res != ERROR_IO_PENDING: + return err(res) + ok() + + proc addProcess2*(pid: int, cb: CallbackFunc, + udata: pointer = nil): Result[ProcessHandle, OSErrorCode] = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process identifier, which can be + ## used to clear process callback via ``removeProcess``. + doAssert(pid > 0, "Process identifier must be positive integer") + let + hProcess = openProcess(SYNCHRONIZE, WINBOOL(0), DWORD(pid)) + flags = WT_EXECUTEINWAITTHREAD or WT_EXECUTEONLYONCE + + var wh: WaitableHandle = nil + + if hProcess == HANDLE(0): + return err(osLastError()) + + proc continuation(udata: pointer) {.gcsafe.} = + doAssert(not(isNil(udata))) + doAssert(not(isNil(wh))) + discard closeFd(hProcess) + cb(wh[].udata) + + wh = + block: + let res = registerWaitable(hProcess, flags, InfiniteDuration, + continuation, udata) + if res.isErr(): + discard closeFd(hProcess) + return err(res.error()) + res.get() + ok(ProcessHandle(wh)) + + proc removeProcess2*(procHandle: ProcessHandle): Result[void, OSErrorCode] = + ## Remove process' watching using process' descriptor ``procHandle``. + let waitableHandle = WaitableHandle(procHandle) + doAssert(not(isNil(waitableHandle))) + ? closeWaitable(waitableHandle) + ok() + + proc addProcess*(pid: int, cb: CallbackFunc, + udata: pointer = nil): ProcessHandle {. + raises: [Defect, OSError].} = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process identifier, which can be + ## used to clear process callback via ``removeProcess``. + addProcess2(pid, cb, udata).tryGet() + + proc removeProcess*(procHandle: ProcessHandle) {. + raises: [Defect, OSError].} = + ## Remove process' watching using process' descriptor ``procHandle``. + removeProcess2(procHandle).tryGet() + proc poll*() = ## Perform single asynchronous step, processing timers and completing ## tasks. Blocks until at least one event has completed. @@ -772,8 +921,15 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or closeSocket(fd, aftercb) when asyncEventEngine in ["epoll", "kqueue"]: - proc addSignal2*(signal: int, cb: CallbackFunc, - udata: pointer = nil): Result[int, OSErrorCode] = + type + ProcessHandle* = distinct int + SignalHandle* = distinct int + + proc addSignal2*( + signal: int, + cb: CallbackFunc, + udata: pointer = nil + ): Result[SignalHandle, OSErrorCode] = ## Start watching signal ``signal``, and when signal appears, call the ## callback ``cb`` with specified argument ``udata``. Returns signal ## identifier code, which can be used to remove signal callback @@ -785,10 +941,13 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or adata.reader = AsyncCallback(function: cb, udata: udata) do: return err(osdefs.EBADF) - ok(sigfd) + ok(SignalHandle(sigfd)) - proc addProcess2*(pid: int, cb: CallbackFunc, - udata: pointer = nil): Result[int, OSErrorCode] = + proc addProcess2*( + pid: int, + cb: CallbackFunc, + udata: pointer = nil + ): Result[ProcessHandle, OSErrorCode] = ## Registers callback ``cb`` to be called when process with process ## identifier ``pid`` exited. Returns process' descriptor, which can be ## used to clear process callback via ``removeProcess``. @@ -799,31 +958,42 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or adata.reader = AsyncCallback(function: cb, udata: udata) do: return err(osdefs.EBADF) - ok(procfd) + ok(ProcessHandle(procfd)) - proc removeSignal2*(sigfd: int): Result[void, OSErrorCode] = + proc removeSignal2*(signalHandle: SignalHandle): Result[void, OSErrorCode] = ## Remove watching signal ``signal``. - getThreadDispatcher().selector.unregister2(cint(sigfd)) + getThreadDispatcher().selector.unregister2(cint(signalHandle)) - proc removeProcess2*(procfd: int): Result[void, OSErrorCode] = + proc removeProcess2*(procHandle: ProcessHandle): Result[void, OSErrorCode] = ## Remove process' watching using process' descriptor ``procfd``. - getThreadDispatcher().selector.unregister2(cint(procfd)) + getThreadDispatcher().selector.unregister2(cint(procHandle)) proc addSignal*(signal: int, cb: CallbackFunc, - udata: pointer = nil): int {.raises: [Defect, OSError].} = + udata: pointer = nil): SignalHandle {. + raises: [Defect, OSError].} = ## Start watching signal ``signal``, and when signal appears, call the ## callback ``cb`` with specified argument ``udata``. Returns signal ## identifier code, which can be used to remove signal callback ## via ``removeSignal``. addSignal2(signal, cb, udata).tryGet() - proc removeSignal*(sigfd: int) {.raises: [Defect, OSError].} = + proc removeSignal*(signalHandle: SignalHandle) {. + raises: [Defect, OSError].} = ## Remove watching signal ``signal``. - removeSignal2(sigfd).tryGet() + removeSignal2(signalHandle).tryGet() - proc removeProcess*(procfd: int) {.raises: [Defect, OSError].} = - ## Remove process' watching using process' descriptor ``procfd``. - removeProcess2(procfd).tryGet() + proc addProcess*(pid: int, cb: CallbackFunc, + udata: pointer = nil): ProcessHandle {. + raises: [Defect, OSError].} = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process identifier, which can be + ## used to clear process callback via ``removeProcess``. + addProcess2(pid, cb, udata).tryGet() + + proc removeProcess*(procHandle: ProcessHandle) {. + raises: [Defect, OSError].} = + ## Remove process' watching using process' descriptor ``procHandle``. + removeProcess2(procHandle).tryGet() proc poll*() {.gcsafe.} = ## Perform single asynchronous step. @@ -1002,7 +1172,7 @@ when not(defined(windows)): when asyncEventEngine in ["epoll", "kqueue"]: proc waitSignal*(signal: int): Future[void] {.raises: [Defect].} = var retFuture = newFuture[void]("chronos.waitSignal()") - var sigfd: int = -1 + var signalHandle: Opt[SignalHandle] template getSignalException(e: OSErrorCode): untyped = newException(AsyncError, "Could not manipulate signal handler, " & @@ -1010,8 +1180,8 @@ when not(defined(windows)): proc continuation(udata: pointer) {.gcsafe.} = if not(retFuture.finished()): - if sigfd != -1: - let res = removeSignal2(sigfd) + if signalHandle.isSome(): + let res = removeSignal2(signalHandle.get()) if res.isErr(): retFuture.fail(getSignalException(res.error())) else: @@ -1019,17 +1189,17 @@ when not(defined(windows)): proc cancellation(udata: pointer) {.gcsafe.} = if not(retFuture.finished()): - if sigfd != -1: - let res = removeSignal2(sigfd) + if signalHandle.isSome(): + let res = removeSignal2(signalHandle.get()) if res.isErr(): retFuture.fail(getSignalException(res.error())) - sigfd = + signalHandle = block: let res = addSignal2(signal, continuation) if res.isErr(): retFuture.fail(getSignalException(res.error())) - res.get() + Opt.some(res.get()) retFuture.cancelCallback = cancellation retFuture @@ -1283,5 +1453,63 @@ when chronosFutureTracking: ## completed, cancelled or failed). futureList.count +when defined(windows): + proc waitForSingleObject*(handle: HANDLE, + timeout: Duration): Future[WaitableResult] {. + raises: [Defect].} = + ## Waits until the specified object is in the signaled state or the + ## time-out interval elapses. WaitForSingleObject() for asynchronous world. + let flags = WT_EXECUTEONLYONCE + + var + retFuture = newFuture[WaitableResult]("chronos.waitForSingleObject()") + waitHandle: WaitableHandle = nil + + proc continuation(udata: pointer) {.gcsafe.} = + doAssert(not(isNil(waitHandle))) + if not(retFuture.finished()): + let + ovl = cast[PtrCustomOverlapped](udata) + returnFlag = WINBOOL(ovl.data.bytesCount) + res = closeWaitable(waitHandle) + if res.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(res.error()))) + else: + if returnFlag == TRUE: + retFuture.complete(WaitableResult.Timeout) + else: + retFuture.complete(WaitableResult.Ok) + + proc cancellation(udata: pointer) {.gcsafe.} = + doAssert(not(isNil(waitHandle))) + if not(retFuture.finished()): + discard closeWaitable(waitHandle) + + let wres = uint32(waitForSingleObject(handle, DWORD(0))) + if wres == WAIT_OBJECT_0: + retFuture.complete(WaitableResult.Ok) + return retFuture + elif wres == WAIT_ABANDONED: + retFuture.fail(newException(AsyncError, "Handle was abandoned")) + return retFuture + elif wres == WAIT_FAILED: + retFuture.fail(newException(AsyncError, osErrorMsg(osLastError()))) + return retFuture + + if timeout == ZeroDuration: + retFuture.complete(WaitableResult.Timeout) + return retFuture + + waitHandle = + block: + let res = registerWaitable(handle, flags, timeout, continuation, nil) + if res.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(res.error()))) + return retFuture + res.get() + + retFuture.cancelCallback = cancellation + return retFuture + # Perform global per-module initialization. globalInit() diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim new file mode 100644 index 000000000..6e9858f9c --- /dev/null +++ b/chronos/asyncproc.nim @@ -0,0 +1,1311 @@ +# +# Chronos' asynchronous process management +# +# (c) Copyright 2022-Present Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} + {.pragma: apforward, gcsafe, raises: [Defect].} +else: + {.push raises: [].} + {.pragma: apforward, gcsafe, raises: [].} + +import std/strtabs +import "."/[config, asyncloop, handles, osdefs, osutils, oserrno], + streams/asyncstream +import stew/[results, byteutils] +from std/os import quoteShell, quoteShellWindows, quoteShellPosix, envPairs + +export strtabs, results +export quoteShell, quoteShellWindows, quoteShellPosix, envPairs + +const + AsyncProcessTrackerName* = "async.process" + ## AsyncProcess leaks tracker name + + + +type + AsyncProcessError* = object of CatchableError + + AsyncProcessResult*[T] = Result[T, OSErrorCode] + + AsyncProcessOption* {.pure.} = enum + UsePath, + EvalCommand, + StdErrToStdOut, + ProcessGroup + + StandardKind {.pure.} = enum + Stdin, Stdout, Stderr + + ProcessFlag {.pure.} = enum + UserStdin, UserStdout, UserStderr, + AutoStdin, AutoStdout, AutoStderr, + NoStdin, NoStdout, NoStderr, + CopyStdout + + ProcessStreamHandleKind {.pure.} = enum + None, Auto, ProcHandle, Transport, StreamReader, StreamWriter + + ProcessStreamHandle* = object + case kind: ProcessStreamHandleKind + of ProcessStreamHandleKind.None: + discard + of ProcessStreamHandleKind.Auto: + discard + of ProcessStreamHandleKind.ProcHandle: + handle: AsyncFD + of ProcessStreamHandleKind.Transport: + transp: StreamTransport + of ProcessStreamHandleKind.StreamReader: + reader: AsyncStreamReader + of ProcessStreamHandleKind.StreamWriter: + writer: AsyncStreamWriter + + StreamHolderFlag {.pure.} = enum + Transport, Stream + + StreamKind {.pure.} = enum + None, Reader, Writer + + AsyncStreamHolder = object + case kind: StreamKind + of StreamKind.Reader: + reader: AsyncStreamReader + of StreamKind.Writer: + writer: AsyncStreamWriter + of StreamKind.None: + discard + flags: set[StreamHolderFlag] + + AsyncProcessPipes = object + flags: set[ProcessFlag] + stdinHolder: AsyncStreamHolder + stdoutHolder: AsyncStreamHolder + stderrHolder: AsyncStreamHolder + stdinHandle: AsyncFD + stdoutHandle: AsyncFD + stderrHandle: AsyncFD + + AsyncProcess* = object + + AsyncProcessImpl = object of RootObj + when defined(windows): + processHandle: HANDLE + threadHandle: HANDLE + processId: DWORD + else: + processId: Pid + pipes: AsyncProcessPipes + exitStatus: Opt[int] + flags: set[ProcessFlag] + options: set[AsyncProcessOption] + + AsyncProcessRef* = ref AsyncProcessImpl + + CommandExResponse* = object + stdOutput*: string + stdError*: string + status*: int + + AsyncProcessTracker* = ref object of TrackerBase + opened*: int64 + closed*: int64 + +template Pipe*(t: typedesc[AsyncProcess]): ProcessStreamHandle = + ProcessStreamHandle(kind: ProcessStreamHandleKind.Auto) + +proc setupAsyncProcessTracker(): AsyncProcessTracker {.gcsafe.} + +proc getAsyncProcessTracker(): AsyncProcessTracker {.inline.} = + var res = cast[AsyncProcessTracker](getTracker(AsyncProcessTrackerName)) + if isNil(res): + res = setupAsyncProcessTracker() + res + +proc dumpAsyncProcessTracking(): string {.gcsafe.} = + var tracker = getAsyncProcessTracker() + let res = "Started async processes: " & $tracker.opened & "\n" & + "Closed async processes: " & $tracker.closed + res + +proc leakAsyncProccessTracker(): bool {.gcsafe.} = + var tracker = getAsyncProcessTracker() + tracker.opened != tracker.closed + +proc trackAsyncProccess(t: AsyncProcessRef) {.inline.} = + var tracker = getAsyncProcessTracker() + inc(tracker.opened) + +proc untrackAsyncProcess(t: AsyncProcessRef) {.inline.} = + var tracker = getAsyncProcessTracker() + inc(tracker.closed) + +proc setupAsyncProcessTracker(): AsyncProcessTracker {.gcsafe.} = + var res = AsyncProcessTracker( + opened: 0, + closed: 0, + dump: dumpAsyncProcessTracking, + isLeaked: leakAsyncProccessTracker + ) + addTracker(AsyncProcessTrackerName, res) + res + +proc init*(t: typedesc[AsyncFD], handle: ProcessStreamHandle): AsyncFD = + case handle.kind + of ProcessStreamHandleKind.ProcHandle: + handle.handle + of ProcessStreamHandleKind.Transport: + handle.transp.fd + of ProcessStreamHandleKind.StreamReader: + doAssert(not(isNil(handle.reader.tsource))) + handle.reader.tsource.fd + of ProcessStreamHandleKind.StreamWriter: + doAssert(not(isNil(handle.writer.tsource))) + handle.writer.tsource.fd + of ProcessStreamHandleKind.Auto: + raiseAssert "ProcessStreamHandle could not be auto at this moment" + of ProcessStreamHandleKind.None: + raiseAssert "ProcessStreamHandle could not be empty at this moment" + +proc init*(t: typedesc[AsyncStreamHolder], handle: AsyncStreamReader, + baseFlags: set[StreamHolderFlag] = {}): AsyncStreamHolder = + AsyncStreamHolder(kind: StreamKind.Reader, reader: handle, flags: baseFlags) + +proc init*(t: typedesc[AsyncStreamHolder], handle: AsyncStreamWriter, + baseFlags: set[StreamHolderFlag] = {}): AsyncStreamHolder = + AsyncStreamHolder(kind: StreamKind.Writer, writer: handle, flags: baseFlags) + +proc init*(t: typedesc[AsyncStreamHolder]): AsyncStreamHolder = + AsyncStreamHolder(kind: StreamKind.None) + +proc init*(t: typedesc[AsyncStreamHolder], handle: ProcessStreamHandle, + kind: StreamKind, baseFlags: set[StreamHolderFlag] = {} + ): AsyncProcessResult[AsyncStreamHolder] = + case handle.kind + of ProcessStreamHandleKind.ProcHandle: + case kind + of StreamKind.Reader: + let + transp = ? fromPipe2(handle.handle) + reader = newAsyncStreamReader(transp) + flags = baseFlags + {StreamHolderFlag.Stream, + StreamHolderFlag.Transport} + ok(AsyncStreamHolder(kind: StreamKind.Reader, reader: reader, + flags: flags)) + of StreamKind.Writer: + let + transp = ? fromPipe2(handle.handle) + writer = newAsyncStreamWriter(transp) + flags = baseFlags + {StreamHolderFlag.Stream, + StreamHolderFlag.Transport} + ok(AsyncStreamHolder(kind: StreamKind.Writer, writer: writer, + flags: flags)) + of StreamKind.None: + ok(AsyncStreamHolder(kind: StreamKind.None)) + of ProcessStreamHandleKind.Transport: + case kind + of StreamKind.Reader: + let + reader = newAsyncStreamReader(handle.transp) + flags = baseFlags + {StreamHolderFlag.Stream} + ok(AsyncStreamHolder(kind: StreamKind.Reader, reader: reader, + flags: flags)) + of StreamKind.Writer: + let + writer = newAsyncStreamWriter(handle.transp) + flags = baseFlags + {StreamHolderFlag.Stream} + ok(AsyncStreamHolder(kind: StreamKind.Writer, writer: writer, + flags: flags)) + of StreamKind.None: + ok(AsyncStreamHolder(kind: StreamKind.None)) + of ProcessStreamHandleKind.StreamReader: + ok(AsyncStreamHolder(kind: StreamKind.Reader, reader: handle.reader, + flags: baseFlags)) + of ProcessStreamHandleKind.StreamWriter: + ok(AsyncStreamHolder(kind: StreamKind.Writer, writer: handle.writer, + flags: baseFlags)) + of ProcessStreamHandleKind.None, ProcessStreamHandleKind.Auto: + ok(AsyncStreamHolder(kind: StreamKind.None)) + +proc init*(t: typedesc[ProcessStreamHandle]): ProcessStreamHandle = + ProcessStreamHandle(kind: ProcessStreamHandleKind.None) + +proc init*(t: typedesc[ProcessStreamHandle], + handle: AsyncFD): ProcessStreamHandle = + ProcessStreamHandle(kind: ProcessStreamHandleKind.ProcHandle, handle: handle) + +proc init*(t: typedesc[ProcessStreamHandle], + transp: StreamTransport): ProcessStreamHandle = + doAssert(transp.kind == TransportKind.Pipe, + "Only pipe transports can be used as process streams") + ProcessStreamHandle(kind: ProcessStreamHandleKind.ProcHandle, transp: transp) + +proc init*(t: typedesc[ProcessStreamHandle], + reader: AsyncStreamReader): ProcessStreamHandle = + ProcessStreamHandle(kind: ProcessStreamHandleKind.StreamReader, + reader: reader) + +proc init*(t: typedesc[ProcessStreamHandle], + writer: AsyncStreamWriter): ProcessStreamHandle = + ProcessStreamHandle(kind: ProcessStreamHandleKind.StreamWriter, + writer: writer) + +proc isEmpty*(handle: ProcessStreamHandle): bool = + handle.kind == ProcessStreamHandleKind.None + +proc suspend*(p: AsyncProcessRef): AsyncProcessResult[void] {.apforward.} +proc resume*(p: AsyncProcessRef): AsyncProcessResult[void] {.apforward.} +proc terminate*(p: AsyncProcessRef): AsyncProcessResult[void] {.apforward.} +proc kill*(p: AsyncProcessRef): AsyncProcessResult[void] {.apforward.} +proc running*(p: AsyncProcessRef): AsyncProcessResult[bool] {.apforward.} +proc peekExitCode*(p: AsyncProcessRef): AsyncProcessResult[int] {.apforward.} +proc preparePipes(options: set[AsyncProcessOption], + stdinHandle, stdoutHandle, stderrHandle: ProcessStreamHandle + ): AsyncProcessResult[AsyncProcessPipes] {.apforward.} +proc closeProcessHandles(pipes: var AsyncProcessPipes, + options: set[AsyncProcessOption], + lastError: OSErrorCode): OSErrorCode {.apforward.} +proc closeProcessStreams(pipes: AsyncProcessPipes, + options: set[AsyncProcessOption]): Future[void] {. + apforward.} +proc closeWait(holder: AsyncStreamHolder): Future[void] {.apforward.} + +template isOk(code: OSErrorCode): bool = + when defined(windows): + code == ERROR_SUCCESS + else: + code == OSErrorCode(0) + +template closePipe(handle: AsyncFD): bool = + let fd = + when defined(windows): + osdefs.HANDLE(handle) + else: + cint(handle) + closeFd(fd) != -1 + +proc closeProcessHandles(pipes: var AsyncProcessPipes, + options: set[AsyncProcessOption], + lastError: OSErrorCode): OSErrorCode = + # We trying to preserve error code of last failed operation. + var currentError = lastError + + if ProcessFlag.AutoStdin in pipes.flags: + if pipes.stdinHandle != asyncInvalidPipe: + if currentError.isOk(): + if not(closePipe(pipes.stdinHandle)): + currentError = osLastError() + else: + discard closePipe(pipes.stdinHandle) + pipes.stdinHandle = asyncInvalidPipe + + if ProcessFlag.AutoStdout in pipes.flags: + if pipes.stdoutHandle != asyncInvalidPipe: + if currentError.isOk(): + if not(closePipe(pipes.stdoutHandle)): + currentError = osLastError() + else: + discard closePipe(pipes.stdoutHandle) + pipes.stdoutHandle = asyncInvalidPipe + + if ProcessFlag.AutoStderr in pipes.flags: + if pipes.stderrHandle != asyncInvalidPipe: + if currentError.isOk(): + if not(closePipe(pipes.stderrHandle)): + currentError = osLastError() + else: + discard closePipe(pipes.stderrHandle) + pipes.stderrHandle = asyncInvalidPipe + + currentError + +template pipesPresent*(pipes: AsyncProcessPipes): bool = + let mask = {ProcessFlag.AutoStdin, ProcessFlag.AutoStdout, + ProcessFlag.AutoStderr,ProcessFlag.UserStdin, + ProcessFlag.UserStdout, ProcessFlag.UserStderr} + pipes.flags * mask != {} + +proc raiseAsyncProcessError(msg: string, exc: ref CatchableError = nil) {. + noreturn, noinit, noinline, raises: [AsyncProcessError].} = + let message = + if isNil(exc): + msg + else: + msg & " ([" & $exc.name & "]: " & $exc.msg & ")" + raise newException(AsyncProcessError, message) + +proc raiseAsyncProcessError(msg: string, error: OSErrorCode|cint) {. + noreturn, noinit, noinline, raises: [AsyncProcessError].} = + when error is OSErrorCode: + let message = msg & " ([OSError]: " & osErrorMsg(error) & ")" + else: + let message = msg & " ([OSError]: " & osErrorMsg(OSErrorCode(error)) & ")" + raise newException(AsyncProcessError, message) + +when defined(windows): + + proc getStdinHandle(pipes: AsyncProcessPipes): HANDLE = + if pipes.flags * {ProcessFlag.AutoStdin, ProcessFlag.UserStdin} != {}: + HANDLE(pipes.stdinHandle) + else: + getStdHandle(STD_INPUT_HANDLE) + + proc getStdoutHandle(pipes: AsyncProcessPipes): HANDLE = + if pipes.flags * {ProcessFlag.AutoStdout, ProcessFlag.UserStdout} != {}: + HANDLE(pipes.stdoutHandle) + else: + getStdHandle(STD_OUTPUT_HANDLE) + + proc getStderrHandle(pipes: AsyncProcessPipes): HANDLE = + if pipes.flags * {ProcessFlag.AutoStderr, ProcessFlag.UserStderr, + ProcessFlag.CopyStdout} != {}: + HANDLE(pipes.stderrHandle) + else: + getStdHandle(STD_ERROR_HANDLE) + + proc getProcessEnvironment*(): StringTableRef = + var res = newStringTable(modeCaseInsensitive) + var env = getEnvironmentStringsW() + if isNil(env): + return res + var slider = env + while int(slider[]) != 0: + let pos = wcschr(slider, WCHAR(0x0000)) + let line = slider.toString().valueOr("") + slider = cast[LPWSTR](cast[uint](pos) + uint(sizeof(WCHAR))) + if len(line) > 0: + let delim = line.find('=') + if delim > 0: + res[substr(line, 0, delim - 1)] = substr(line, delim + 1) + discard freeEnvironmentStringsW(env) + res + + proc buildCommandLine(a: string, args: openArray[string]): string = + # TODO: Procedures quoteShell/(Windows, Posix)() needs security and bug review + # or reimplementation, for example quoteShellWindows() do not handle `\` + # properly. + # https://docs.microsoft.com/en-us/cpp/cpp/main-function-command-line-args?redirectedfrom=MSDN&view=msvc-170#parsing-c-command-line-arguments + var res = quoteShell(a) + for i in 0 ..< len(args): + res.add(' ') + res.add(quoteShell(args[i])) + res + + proc buildEnvironment(env: StringTableRef): Result[LPWSTR, OSErrorCode] = + var str: string + for key, value in pairs(env): + doAssert('=' notin key, "`=` must not be present in key name") + str.add(key) + str.add('=') + str.add(value) + str.add('\x00') + str.add("\x00\x00") + toWideString(str) + + proc closeThreadAndProcessHandle(p: AsyncProcessRef + ): AsyncProcessResult[void] = + if p.threadHandle != HANDLE(0): + if closeHandle(p.threadHandle) == FALSE: + discard closeHandle(p.processHandle) + return err(osLastError()) + p.threadHandle = HANDLE(0) + + if p.processHandle != HANDLE(0): + if closeHandle(p.processHandle) == FALSE: + return err(osLastError()) + p.processHandle = HANDLE(0) + + proc startProcess*(command: string, workingDir: string = "", + arguments: seq[string] = @[], + environment: StringTableRef = nil, + options: set[AsyncProcessOption] = {}, + stdinHandle = ProcessStreamHandle(), + stdoutHandle = ProcessStreamHandle(), + stderrHandle = ProcessStreamHandle(), + ): Future[AsyncProcessRef] {.async.} = + var + pipes = preparePipes(options, stdinHandle, stdoutHandle, + stderrHandle).valueOr: + raiseAsyncProcessError("Unable to initialze process pipes", error) + + let + commandLine = + if AsyncProcessOption.EvalCommand in options: + chronosProcShell & " /C " & command + else: + buildCommandLine(command, arguments) + workingDirectory = + if len(workingDir) > 0: + workingDir.toWideString().valueOr: + raiseAsyncProcessError("Unable to proceed working directory path", + error) + else: + nil + environment = + if not(isNil(environment)): + buildEnvironment(environment).valueOr: + raiseAsyncProcessError("Unable to build child process environment", + error) + else: + nil + flags = CREATE_UNICODE_ENVIRONMENT + var + psa = getSecurityAttributes(false) + tsa = getSecurityAttributes(false) + startupInfo = + block: + var res = STARTUPINFO(cb: DWORD(sizeof(STARTUPINFO))) + if pipes.pipesPresent(): + res.dwFlags = STARTF_USESTDHANDLES + res.hStdInput = pipes.getStdinHandle() + res.hStdOutput = pipes.getStdoutHandle() + res.hStdError = pipes.getStderrHandle() + res + procInfo = PROCESS_INFORMATION() + + let wideCommandLine = commandLine.toWideString().valueOr: + raiseAsyncProcessError("Unable to proceed command line", error) + + let res = createProcess( + nil, + wideCommandLine, + addr psa, addr tsa, + TRUE, # NOTE: This is very important flag and MUST not be modified. + # All overloaded pipe handles will not work if this flag will be + # set to FALSE. + flags, + environment, + workingDirectory, + startupInfo, procInfo + ) + + if(not(isNil(environment))): + free(environment) + free(wideCommandLine) + + var currentError = osLastError() + if res == FALSE: + await pipes.closeProcessStreams(options) + currentError = closeProcessHandles(pipes, options, currentError) + + if res == FALSE: + raiseAsyncProcessError("Unable to spawn process", currentError) + + let process = AsyncProcessRef( + processHandle: procInfo.hProcess, + threadHandle: procInfo.hThread, + processId: procInfo.dwProcessId, + pipes: pipes, + options: options, + flags: pipes.flags + ) + + trackAsyncProccess(process) + return process + + proc peekProcessExitCode(p: AsyncProcessRef): AsyncProcessResult[int] = + var wstatus: DWORD = 0 + if p.exitStatus.isSome(): + return ok(p.exitStatus.get()) + + let res = getExitCodeProcess(p.processHandle, wstatus) + if res == TRUE: + if wstatus != STILL_ACTIVE: + let status = int(wstatus) + p.exitStatus = Opt.some(status) + ok(status) + else: + ok(-1) + else: + err(osLastError()) + + proc suspend(p: AsyncProcessRef): AsyncProcessResult[void] = + if suspendThread(p.threadHandle) != 0xFFFF_FFFF'u32: + ok() + else: + err(osLastError()) + + proc resume(p: AsyncProcessRef): AsyncProcessResult[void] = + if resumeThread(p.threadHandle) != 0xFFFF_FFFF'u32: + ok() + else: + err(osLastError()) + + proc terminate(p: AsyncProcessRef): AsyncProcessResult[void] = + if terminateProcess(p.processHandle, 0) != 0'u32: + ok() + else: + err(osLastError()) + + proc kill(p: AsyncProcessRef): AsyncProcessResult[void] = + p.terminate() + + proc running(p: AsyncProcessRef): AsyncProcessResult[bool] = + let res = ? p.peekExitCode() + if res == -1: + ok(true) + else: + ok(false) + + proc waitForExit*(p: AsyncProcessRef, + timeout = InfiniteDuration): Future[int] {.async.} = + if p.exitStatus.isSome(): + return p.exitStatus.get() + + let wres = + try: + await waitForSingleObject(p.processHandle, timeout) + except ValueError as exc: + raiseAsyncProcessError("Unable to wait for process handle", exc) + + if wres == WaitableResult.Timeout: + let res = p.kill() + if res.isErr(): + raiseAsyncProcessError("Unable to terminate process", res.error()) + + let exitCode = p.peekProcessExitCode().valueOr: + raiseAsyncProcessError("Unable to peek process exit code", error) + + if exitCode >= 0: + p.exitStatus = Opt.some(exitCode) + return exitCode + + proc peekExitCode(p: AsyncProcessRef): AsyncProcessResult[int] = + if p.exitStatus.isSome(): + return ok(p.exitStatus.get()) + let res = waitForSingleObject(p.processHandle, DWORD(0)) + if res != WAIT_TIMEOUT: + let exitCode = ? p.peekProcessExitCode() + ok(exitCode) + else: + ok(-1) +else: + import std/strutils + + type + SpawnAttr = object + attrs: PosixSpawnAttr + actions: PosixSpawnFileActions + + proc fd(h: AsyncStreamHolder): cint = + case h.kind + of StreamKind.Reader: + cint(h.reader.tsource.fd) + of StreamKind.Writer: + cint(h.writer.tsource.fd) + of StreamKind.None: + raiseAssert "Incorrect stream holder" + + proc isEmpty(h: AsyncStreamHolder): bool = + h.kind == StreamKind.None + + proc initSpawn(pipes: AsyncProcessPipes, options: set[AsyncProcessOption] + ): Result[SpawnAttr, OSErrorCode] = + template doCheck(body: untyped): untyped = + let res = body + if res != 0: + return err(OSErrorCode(res)) + + var + attrs = + block: + var value: PosixSpawnAttr + let res = posixSpawnAttrInit(value) + if res != 0: + return err(OSErrorCode(res)) + value + actions = + block: + var value: PosixSpawnFileActions + let res = posixSpawnFileActionsInit(value) + if res != 0: + discard posixSpawnAttrDestroy(attrs) + return err(OSErrorCode(res)) + value + mask = + block: + var res: Sigset + discard sigemptyset(res) + res + + doCheck(posixSpawnAttrSetSigMask(attrs, mask)) + if AsyncProcessOption.ProcessGroup in options: + doCheck(posixSpawnAttrSetPgroup(attrs, 0)) + doCheck(posixSpawnAttrSetFlags(attrs, osdefs.POSIX_SPAWN_USEVFORK or + osdefs.POSIX_SPAWN_SETSIGMASK or + osdefs.POSIX_SPAWN_SETPGROUP)) + else: + doCheck(posixSpawnAttrSetFlags(attrs, osdefs.POSIX_SPAWN_USEVFORK or + osdefs.POSIX_SPAWN_SETSIGMASK)) + + if pipes.flags * {ProcessFlag.AutoStdin, ProcessFlag.UserStdin} != {}: + # Close child process STDIN. + doCheck(posixSpawnFileActionsAddClose(actions, cint(0))) + # Make a duplicate of `stdinHandle` as child process STDIN. + doCheck(posixSpawnFileActionsAddDup2(actions, cint(pipes.stdinHandle), + cint(0))) + # Close child process side of `stdinHandle`. + doCheck(posixSpawnFileActionsAddClose(actions, + cint(pipes.stdinHandle))) + # Close parent process side of `stdinHandle`. + if not(pipes.stdinHolder.isEmpty()): + let fd = cint(pipes.stdinHolder.fd()) + doCheck(posixSpawnFileActionsAddClose(actions, fd)) + + if pipes.flags * {ProcessFlag.AutoStdout, ProcessFlag.UserStdout} != {}: + # Close child process STDOUT. + doCheck(posixSpawnFileActionsAddClose(actions, cint(1))) + # Make a duplicate of `stdoutHandle` as child process STDOUT. + doCheck(posixSpawnFileActionsAddDup2(actions, cint(pipes.stdoutHandle), + cint(1))) + if AsyncProcessOption.StdErrToStdOut notin options: + # Close child process side of `stdoutHandle`. + doCheck(posixSpawnFileActionsAddClose(actions, + cint(pipes.stdoutHandle))) + # Close parent process side of `stdoutHandle`. + if not(pipes.stdoutHolder.isEmpty()): + let fd = cint(pipes.stdoutHolder.fd()) + doCheck(posixSpawnFileActionsAddClose(actions, fd)) + + if pipes.flags * {ProcessFlag.AutoStderr, ProcessFlag.UserStderr} != {}: + # Close child process STDERR. + doCheck(posixSpawnFileActionsAddClose(actions, cint(2))) + # Make a duplicate of `stderrHandle` as child process STDERR. + doCheck(posixSpawnFileActionsAddDup2(actions, cint(pipes.stderrHandle), + cint(2))) + # Close child process side of `stderrHandle`. + doCheck(posixSpawnFileActionsAddClose(actions, + cint(pipes.stderrHandle))) + # Close parent process side of `stderrHandle`. + if not(pipes.stderrHolder.isEmpty()): + let fd = cint(pipes.stderrHolder.fd()) + doCheck(posixSpawnFileActionsAddClose(actions, fd)) + else: + if AsyncProcessOption.StdErrToStdOut in options: + # Close child process STDERR. + doCheck(posixSpawnFileActionsAddClose(actions, cint(2))) + # Make a duplicate of `stdoutHandle` as child process STDERR. + doCheck(posixSpawnFileActionsAddDup2(actions, cint(pipes.stdoutHandle), + cint(2))) + # Close child process side of `stdoutHandle`. + doCheck(posixSpawnFileActionsAddClose(actions, + cint(pipes.stdoutHandle))) + # Close parent process side of `stdoutHandle`. + if not(pipes.stdoutHolder.isEmpty()): + let fd = cint(pipes.stdoutHolder.fd()) + doCheck(posixSpawnFileActionsAddClose(actions, fd)) + ok(SpawnAttr(attrs: attrs, actions: actions)) + + proc free(v: var SpawnAttr): Result[void, OSErrorCode] = + block: + let res = posixSpawnAttrDestroy(v.attrs) + if res != 0: + discard posixSpawnFileActionsDestroy(v.actions) + return err(OSErrorCode(res)) + block: + let res = posixSpawnFileActionsDestroy(v.actions) + if res != 0: + return err(OSErrorCode(res)) + ok() + + proc getKeyValueItem(key: string, value: string): cstring = + var p = cast[cstring](alloc(len(key) + len(value) + 1 + 1)) + var offset = 0 + if len(key) > 0: + copyMem(addr p[offset], unsafeAddr(key[0]), len(key)) + inc(offset, len(key)) + p[offset] = '=' + inc(offset) + if len(value) > 0: + copyMem(addr p[offset], unsafeAddr(value[0]), len(value)) + inc(offset, len(value)) + p[offset] = '\x00' + p + + proc envToCStringArray(t: StringTableRef): cstringArray = + let itemsCount = len(t) + var + res = cast[cstringArray](alloc((itemsCount + 1) * sizeof(cstring))) + i = 0 + for key, value in pairs(t): + res[i] = getKeyValueItem(key, value) + inc(i) + res[i] = nil # Last item in CStringArray should be `nil`. + res + + proc envToCStringArray(): cstringArray = + let itemsCount = + block: + var res = 0 + for key, value in envPairs(): inc(res) + res + var + res = cast[cstringArray](alloc((itemsCount + 1) * sizeof(cstring))) + i = 0 + for key, value in envPairs(): + res[i] = getKeyValueItem(key, value) + inc(i) + res[i] = nil # Last item in CStringArray should be `nil`. + res + + when defined(macosx) or defined(macos) or defined(ios): + proc getEnvironment(): ptr cstringArray {. + importc: "_NSGetEnviron", header: "".} + else: + var globalEnv {.importc: "environ", header: "".}: cstringArray + + proc getProcessEnvironment*(): StringTableRef = + var res = newStringTable(modeCaseInsensitive) + let env = + when defined(macosx) or defined(macos) or defined(ios): + getEnvironment()[] + else: + globalEnv + var i = 0 + while not(isNil(env[i])): + let line = $env[i] + if len(line) > 0: + let delim = line.find('=') + if delim > 0: + res[substr(line, 0, delim - 1)] = substr(line, delim + 1) + inc(i) + res + + func exitStatusLikeShell(status: int): int = + if WAITIFSIGNALED(cint(status)): + # like the shell! + 128 + WAITTERMSIG(cint(status)) + else: + WAITEXITSTATUS(cint(status)) + + proc getCurrentDirectory(): AsyncProcessResult[string] = + var bufsize = 1024 + var res = newString(bufsize) + + proc strLength(a: string): int {.nimcall.} = + for i in 0 ..< len(a): + if a[i] == '\x00': + return i + len(a) + + while true: + if osdefs.getcwd(cstring(res), bufsize) != nil: + setLen(res, strLength(res)) + return ok(res) + else: + let errorCode = osLastError() + if errorCode == oserrno.ERANGE: + bufsize = bufsize shl 1 + doAssert(bufsize >= 0) + res = newString(bufsize) + else: + return err(errorCode) + + proc setCurrentDirectory(dir: string): AsyncProcessResult[void] = + let res = osdefs.chdir(cstring(dir)) + if res == -1: + return err(osLastError()) + ok() + + proc closeThreadAndProcessHandle(p: AsyncProcessRef + ): AsyncProcessResult[void] = + discard + + proc startProcess*(command: string, workingDir: string = "", + arguments: seq[string] = @[], + environment: StringTableRef = nil, + options: set[AsyncProcessOption] = {}, + stdinHandle = ProcessStreamHandle(), + stdoutHandle = ProcessStreamHandle(), + stderrHandle = ProcessStreamHandle(), + ): Future[AsyncProcessRef] {.async.} = + var + pid: Pid + pipes = preparePipes(options, stdinHandle, stdoutHandle, + stderrHandle).valueOr: + raiseAsyncProcessError("Unable to initialze process pipes", + error) + sa = pipes.initSpawn(options).valueOr: + discard closeProcessHandles(pipes, options, OSErrorCode(0)) + await pipes.closeProcessStreams(options) + raiseAsyncProcessError("Unable to initalize spawn attributes", 0) + + let + (commandLine, commandArguments) = + if AsyncProcessOption.EvalCommand in options: + let args = @[chronosProcShell, "-c", command] + (chronosProcShell, allocCStringArray(args)) + else: + var res = @[command] + for arg in arguments.items(): + res.add(arg) + (command, allocCStringArray(res)) + commandEnv = + if isNil(environment): + envToCStringArray() + else: + envToCStringArray(environment) + + var currentError: OSErrorCode + var currentDir: string + + try: + currentDir = + if len(workingDir) > 0: + # Save current working directory and change it to `workingDir`. + let cres = getCurrentDirectory() + if cres.isErr(): + raiseAsyncProcessError("Unable to obtain current directory", + cres.error()) + let sres = setCurrentDirectory(workingDir) + if sres.isErr(): + raiseAsyncProcessError("Unable to change current directory", + sres.error()) + cres.get() + else: + "" + + let res = + if AsyncProcessOption.UsePath in options: + posixSpawnp(pid, cstring(commandLine), sa.actions, sa.attrs, + commandArguments, commandEnv) + else: + posixSpawn(pid, cstring(commandLine), sa.actions, sa.attrs, + commandArguments, commandEnv) + + if res != 0: + await pipes.closeProcessStreams(options) + currentError = closeProcessHandles(pipes, options, OSErrorCode(res)) + + finally: + # Restore working directory + if (len(workingDir) > 0) and (len(currentDir) > 0): + # Restore working directory. + let cres = getCurrentDirectory() + if cres.isErr(): + # On error we still try to restore original working directory. + if currentError.isOk(): + currentError = cres.error() + discard setCurrentDirectory(currentDir) + else: + if cres.get() != currentDir: + let sres = setCurrentDirectory(currentDir) + if sres.isErr(): + if currentError.isOk(): + currentError = sres.error() + + # Cleanup allocated memory + deallocCStringArray(commandArguments) + deallocCStringArray(commandEnv) + + # Cleanup posix_spawn attributes and file operations + if not(currentError.isOk()): + discard sa.free() + else: + let res = sa.free() + if res.isErr(): + currentError = res.error() + + # If currentError has been set, raising an exception. + if not(currentError.isOk()): + raiseAsyncProcessError("Unable to spawn process", currentError) + + let process = AsyncProcessRef( + processId: pid, + pipes: pipes, + options: options, + flags: pipes.flags + ) + + trackAsyncProccess(process) + return process + + proc peekProcessExitCode(p: AsyncProcessRef, + reap = false): AsyncProcessResult[int] = + var wstatus: cint = 0 + if p.exitStatus.isSome(): + return ok(p.exitStatus.get()) + let + flags = if reap: cint(0) else: osdefs.WNOHANG + waitRes = + block: + var res: cint = 0 + while true: + res = osdefs.waitpid(p.processId, wstatus, flags) + if not((res == -1) and (osLastError() == oserrno.EINTR)): + break + res + if waitRes == p.processId: + if WAITIFEXITED(wstatus) or WAITIFSIGNALED(wstatus): + let status = int(wstatus) + p.exitStatus = Opt.some(status) + ok(status) + else: + ok(-1) + elif waitRes == 0: + ok(-1) + else: + err(osLastError()) + + proc suspend(p: AsyncProcessRef): AsyncProcessResult[void] = + if osdefs.kill(p.processId, osdefs.SIGSTOP) == 0: + ok() + else: + err(osLastError()) + + proc resume(p: AsyncProcessRef): AsyncProcessResult[void] = + if osdefs.kill(p.processId, osdefs.SIGCONT) == 0: + ok() + else: + err(osLastError()) + + proc terminate(p: AsyncProcessRef): AsyncProcessResult[void] = + if osdefs.kill(p.processId, osdefs.SIGTERM) == 0: + ok() + else: + err(osLastError()) + + proc kill(p: AsyncProcessRef): AsyncProcessResult[void] = + if osdefs.kill(p.processId, osdefs.SIGKILL) == 0: + ok() + else: + err(osLastError()) + + proc running(p: AsyncProcessRef): AsyncProcessResult[bool] = + let res = ? p.peekProcessExitCode() + if res == -1: + ok(true) + else: + ok(false) + + proc waitForExit*(p: AsyncProcessRef, + timeout = InfiniteDuration): Future[int] = + var + retFuture = newFuture[int]("chronos.waitForExit()") + processHandle: ProcessHandle + timer: TimerCallback = nil + + if p.exitStatus.isSome(): + retFuture.complete(p.exitStatus.get()) + return retFuture + + if timeout == ZeroDuration: + let res = p.kill() + if res.isErr(): + retFuture.fail(newException(AsyncProcessError, osErrorMsg(res.error()))) + return retFuture + + block: + let exitCode = p.peekProcessExitCode().valueOr: + retFuture.fail(newException(AsyncProcessError, osErrorMsg(error))) + return retFuture + if exitCode != -1: + retFuture.complete(exitStatusLikeShell(exitCode)) + return retFuture + + if timeout == ZeroDuration: + retFuture.complete(-1) + return retFuture + + proc continuation(udata: pointer) {.gcsafe.} = + let source = cast[int](udata) + if not(retFuture.finished()): + if source == 1: + # Process exited. + let res = removeProcess2(processHandle) + if res.isErr(): + retFuture.fail(newException(AsyncProcessError, + osErrorMsg(res.error()))) + return + if not(isNil(timer)): + clearTimer(timer) + let exitCode = p.peekProcessExitCode().valueOr: + retFuture.fail(newException(AsyncProcessError, osErrorMsg(error))) + return + if exitCode == -1: + retFuture.complete(-1) + else: + retFuture.complete(exitStatusLikeShell(exitCode)) + else: + # Timeout exceeded. + let res = p.kill() + if res.isErr(): + retFuture.fail(newException(AsyncProcessError, + osErrorMsg(res.error()))) + + proc cancellation(udata: pointer) {.gcsafe.} = + if not(retFuture.finished()): + if not(isNil(timer)): + clearTimer(timer) + # Ignore any errors because of cancellation. + discard removeProcess2(processHandle) + + if timeout != InfiniteDuration: + timer = setTimer(Moment.fromNow(timeout), continuation, cast[pointer](2)) + + processHandle = addProcess2(int(p.processId), continuation, + cast[pointer](1)).valueOr: + if error == oserrno.ESRCH: + # "zombie death race" problem. + # If process exited right after `waitpid()` - `kqueue` call + # could return ESRCH error. So we need to handle it properly and + # try to reap process code from exiting process. + let exitCode = p.peekProcessExitCode(true).valueOr: + retFuture.fail(newException(AsyncProcessError, osErrorMsg(error))) + return retFuture + if exitCode == -1: + # This should not be happens one more time, so we just report + # original error. + retFuture.fail(newException(AsyncProcessError, + osErrorMsg(oserrno.ESRCH))) + else: + retFuture.complete(exitStatusLikeShell(exitCode)) + else: + retFuture.fail(newException(AsyncProcessError, osErrorMsg(error))) + return retFuture + + # addProcess2() has race condition problem inside. Its possible that child + # process (we going to wait) sends SIGCHLD right after addProcess2() blocks + # signals and before it starts monitoring for signal (`signalfd` or + # `kqueue`). To avoid this problem we going to check process for completion + # one more time. + block: + let exitCode = p.peekProcessExitCode().valueOr: + discard removeProcess2(processHandle) + retFuture.fail(newException(AsyncProcessError, osErrorMsg(error))) + return retFuture + if exitCode != -1: + discard removeProcess2(processHandle) + retFuture.complete(exitStatusLikeShell(exitCode)) + return retFuture + + # Process is still running, so we going to wait for SIGCHLD. + retFuture.cancelCallback = cancellation + return retFuture + + proc peekExitCode(p: AsyncProcessRef): AsyncProcessResult[int] = + let res = ? p.peekProcessExitCode() + ok(exitStatusLikeShell(res)) + +proc createPipe(kind: StandardKind + ): Result[tuple[read: AsyncFD, write: AsyncFD], OSErrorCode] = + case kind + of StandardKind.Stdin: + let pipes = + when defined(windows): + let + readFlags: set[DescriptorFlag] = {DescriptorFlag.NonBlock} + writeFlags: set[DescriptorFlag] = {DescriptorFlag.NonBlock} + ? createOsPipe(readFlags, writeFlags) + else: + let + readFlags: set[DescriptorFlag] = {} + writeFlags: set[DescriptorFlag] = {DescriptorFlag.NonBlock} + ? createOsPipe(readFlags, writeFlags) + ok((read: AsyncFD(pipes.read), write: AsyncFD(pipes.write))) + of StandardKind.Stdout, StandardKind.Stderr: + let pipes = + when defined(windows): + let + readFlags: set[DescriptorFlag] = {DescriptorFlag.NonBlock} + writeFlags: set[DescriptorFlag] = {DescriptorFlag.NonBlock} + ? createOsPipe(readFlags, writeFlags) + else: + let + readFlags: set[DescriptorFlag] = {DescriptorFlag.NonBlock} + writeFlags: set[DescriptorFlag] = {} + ? createOsPipe(readFlags, writeFlags) + ok((read: AsyncFD(pipes.read), write: AsyncFD(pipes.write))) + +proc preparePipes(options: set[AsyncProcessOption], + stdinHandle, stdoutHandle, + stderrHandle: ProcessStreamHandle + ): AsyncProcessResult[AsyncProcessPipes] = + + let + (stdinFlags, localStdin, remoteStdin) = + case stdinHandle.kind + of ProcessStreamHandleKind.None: + ({ProcessFlag.NoStdin}, AsyncStreamHolder.init(), + asyncInvalidPipe) + of ProcessStreamHandleKind.Auto: + let (pipeIn, pipeOut) = ? createPipe(StandardKind.Stdin) + let holder = ? AsyncStreamHolder.init( + ProcessStreamHandle.init(pipeOut), StreamKind.Writer, {}) + ({ProcessFlag.AutoStdin}, holder, pipeIn) + else: + ({ProcessFlag.UserStdin}, + AsyncStreamHolder.init(), AsyncFD.init(stdinHandle)) + (stdoutFlags, localStdout, remoteStdout) = + case stdoutHandle.kind + of ProcessStreamHandleKind.None: + ({ProcessFlag.NoStdout}, AsyncStreamHolder.init(), + asyncInvalidPipe) + of ProcessStreamHandleKind.Auto: + let (pipeIn, pipeOut) = ? createPipe(StandardKind.Stdout) + let holder = ? AsyncStreamHolder.init( + ProcessStreamHandle.init(pipeIn), StreamKind.Reader, {}) + ({ProcessFlag.AutoStdout}, holder, pipeOut) + else: + ({ProcessFlag.UserStdout}, + AsyncStreamHolder.init(), AsyncFD.init(stdoutHandle)) + (stderrFlags, localStderr, remoteStderr) = + if AsyncProcessOption.StdErrToStdOut in options: + doAssert(stderrHandle.isEmpty(), + "`stderrHandle` argument must not be set, when" & + "`AsyncProcessOption.StdErrToStdOut` flag is used") + case stdoutHandle.kind + of ProcessStreamHandleKind.None: + raiseAssert "`stdoutHandle` argument must be present, when " & + "`AsyncProcessOption.StdErrToStdOut` flag is used" + of ProcessStreamHandleKind.Auto: + ({ProcessFlag.CopyStdout}, localStdout, remoteStdout) + else: + ({ProcessFlag.CopyStdout}, localStdout, remoteStdout) + else: + case stderrHandle.kind + of ProcessStreamHandleKind.None: + ({ProcessFlag.NoStderr}, AsyncStreamHolder.init(), + asyncInvalidPipe) + of ProcessStreamHandleKind.Auto: + let (pipeIn, pipeOut) = ? createPipe(StandardKind.Stderr) + let holder = ? AsyncStreamHolder.init( + ProcessStreamHandle.init(pipeIn), StreamKind.Reader, {}) + ({ProcessFlag.AutoStderr}, holder, pipeOut) + else: + ({ProcessFlag.UserStderr}, + AsyncStreamHolder.init(), AsyncFD.init(stderrHandle)) + + ok(AsyncProcessPipes( + flags: stdinFlags + stdoutFlags + stderrFlags, + stdinHolder: localStdin, + stdoutHolder: localStdout, + stderrHolder: localStderr, + stdinHandle: remoteStdin, + stdoutHandle: remoteStdout, + stderrHandle: remoteStderr + )) + +proc closeWait(holder: AsyncStreamHolder) {.async.} = + let (future, transp) = + case holder.kind + of StreamKind.None: + (nil, nil) + of StreamKind.Reader: + if StreamHolderFlag.Stream in holder.flags: + (holder.reader.closeWait(), holder.reader.tsource) + else: + (nil, holder.reader.tsource) + of StreamKind.Writer: + if StreamHolderFlag.Stream in holder.flags: + (holder.writer.closeWait(), holder.writer.tsource) + else: + (nil, holder.writer.tsource) + + let pending = + block: + var res: seq[Future[void]] + if not(isNil(future)): + res.add(future) + if not(isNil(transp)): + if StreamHolderFlag.Transport in holder.flags: + res.add(transp.closeWait()) + res + + if len(pending) > 0: + await allFutures(pending) + +proc closeProcessStreams(pipes: AsyncProcessPipes, + options: set[AsyncProcessOption]): Future[void] = + let pending = + block: + var res: seq[Future[void]] + if ProcessFlag.AutoStdin in pipes.flags: + res.add(pipes.stdinHolder.closeWait()) + if ProcessFlag.AutoStdout in pipes.flags: + res.add(pipes.stdoutHolder.closeWait()) + if ProcessFlag.AutoStderr in pipes.flags: + res.add(pipes.stderrHolder.closeWait()) + res + allFutures(pending) + +proc closeWait*(p: AsyncProcessRef) {.async.} = + # Here we ignore all possible errrors, because we do not want to raise + # exceptions. + discard closeProcessHandles(p.pipes, p.options, OSErrorCode(0)) + await p.pipes.closeProcessStreams(p.options) + discard p.closeThreadAndProcessHandle() + untrackAsyncProcess(p) + +proc stdinStream*(p: AsyncProcessRef): AsyncStreamWriter = + doAssert(p.pipes.stdinHolder.kind == StreamKind.Writer, + "StdinStreamWriter is not available") + p.pipes.stdinHolder.writer + +proc stdoutStream*(p: AsyncProcessRef): AsyncStreamReader = + doAssert(p.pipes.stdoutHolder.kind == StreamKind.Reader, + "StdoutStreamReader is not available") + p.pipes.stdoutHolder.reader + +proc stderrStream*(p: AsyncProcessRef): AsyncStreamReader = + doAssert(p.pipes.stderrHolder.kind == StreamKind.Reader, + "StderrStreamReader is not available") + p.pipes.stderrHolder.reader + +proc execCommand*(command: string, + options = {AsyncProcessOption.EvalCommand}, + timeout = InfiniteDuration + ): Future[int] {.async.} = + let poptions = options + {AsyncProcessOption.EvalCommand} + let process = await startProcess(command, options = poptions) + let res = + try: + await process.waitForExit(timeout) + finally: + await process.closeWait() + return res + +proc execCommandEx*(command: string, + options = {AsyncProcessOption.EvalCommand}, + timeout = InfiniteDuration + ): Future[CommandExResponse] {.async.} = + let + process = await startProcess(command, options = options, + stdoutHandle = AsyncProcess.Pipe, + stderrHandle = AsyncProcess.Pipe) + outputReader = process.stdoutStream.read() + errorReader = process.stderrStream.read() + res = + try: + await allFutures(outputReader, errorReader) + let + status = await process.waitForExit(timeout) + output = + try: + string.fromBytes(outputReader.read()) + except AsyncStreamError as exc: + raiseAsyncProcessError("Unable to read process' stdout channel", + exc) + error = + try: + string.fromBytes(errorReader.read()) + except AsyncStreamError as exc: + raiseAsyncProcessError("Unable to read process' stderr channel", + exc) + CommandExResponse(status: status, stdOutput: output, stdError: error) + finally: + await process.closeWait() + + return res + +proc pid*(p: AsyncProcessRef): int = + ## Returns process ``p`` identifier. + int(p.processId) + +template processId*(p: AsyncProcessRef): int = pid(p) diff --git a/chronos/config.nim b/chronos/config.nim index abc9c375f..cef8a63dc 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -14,10 +14,10 @@ when (NimMajor, NimMinor) >= (1, 4): const chronosStrictException* {.booldefine.}: bool = defined(chronosPreviewV4) - ## Require that `async` code raises only derivatives of `CatchableError` and - ## not `Exception` - forward declarations, methods and `proc` types used - ## from within `async` code may need to be be explicitly annotated with - ## `raises: [CatchableError]` when this mode is enabled. + ## Require that `async` code raises only derivatives of `CatchableError` + ## and not `Exception` - forward declarations, methods and `proc` types + ## used from within `async` code may need to be be explicitly annotated + ## with `raises: [CatchableError]` when this mode is enabled. chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug) ## Include stack traces in futures for creation and completion points @@ -32,6 +32,21 @@ when (NimMajor, NimMinor) >= (1, 4): chronosDumpAsync* {.booldefine.}: bool = defined(nimDumpAsync) ## Print code generated by {.async.} transformation + + chronosProcShell* {.strdefine.}: string = + when defined(windows): + "cmd.exe" + else: + when defined(android): + "/system/bin/sh" + else: + "/bin/sh" + ## Default shell binary path. + ## + ## The shell is used as command for command line when process started + ## using `AsyncProcessOption.EvalCommand` and API calls such as + ## ``execCommand(command)`` and ``execCommandEx(command)``. + else: # 1.2 doesn't support `booldefine` in `when` properly const @@ -42,6 +57,14 @@ else: chronosFutureTracking*: bool = defined(chronosDebug) or defined(chronosFutureTracking) chronosDumpAsync*: bool = defined(nimDumpAsync) + chronosProcShell* {.strdefine.}: string = + when defined(windows): + "cmd.exe" + else: + when defined(android): + "/system/bin/sh" + else: + "/bin/sh" when defined(debug) or defined(chronosConfig): import std/macros @@ -55,3 +78,4 @@ when defined(debug) or defined(chronosConfig): printOption("chronosFutureId", chronosFutureId) printOption("chronosFutureTracking", chronosFutureTracking) printOption("chronosDumpAsync", chronosDumpAsync) + printOption("chronosProcShell", chronosProcShell) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index d7bb868fa..971a9a931 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -827,7 +827,8 @@ elif defined(macos) or defined(macosx): unlink, listen, getaddrinfo, gai_strerror, getrlimit, setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, - signal, read, setsockopt, getsockopt, + signal, read, setsockopt, getsockopt, getcwd, chdir, + waitpid, kill, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, @@ -839,14 +840,16 @@ elif defined(macos) or defined(macosx): SIG_BLOCK, SIG_UNBLOCK, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, + SIGCONT export close, shutdown, socket, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, getaddrinfo, gai_strerror, getrlimit, setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, - signal, read, setsockopt, getsockopt, + signal, read, setsockopt, getsockopt, getcwd, chdir, + waitpid, kill, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, @@ -858,7 +861,8 @@ elif defined(macos) or defined(macosx): SIG_BLOCK, SIG_UNBLOCK, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, + SIGCONT type MachTimebaseInfo* {.importc: "struct mach_timebase_info", @@ -882,7 +886,8 @@ elif defined(linux): getrlimit, setrlimit, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, sendmsg, recvmsg, getpid, fcntl, - pthread_sigmask, clock_gettime, signal, + pthread_sigmask, clock_gettime, signal, getcwd, chdir, + waitpid, kill, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, @@ -895,7 +900,8 @@ elif defined(linux): SOCK_DGRAM, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, + SIGCONT export close, shutdown, sigemptyset, sigaddset, sigismember, sigdelset, write, read, waitid, getaddrinfo, @@ -903,7 +909,8 @@ elif defined(linux): getrlimit, setrlimit, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, sendmsg, recvmsg, getpid, fcntl, - pthread_sigmask, clock_gettime, signal, + pthread_sigmask, clock_gettime, signal, getcwd, chdir, + waitpid, kill, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, @@ -916,7 +923,8 @@ elif defined(linux): SOCK_DGRAM, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, + SIGCONT when not defined(android) and defined(amd64): const IP_MULTICAST_TTL*: cint = 33 @@ -1012,6 +1020,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or setrlimit, getpid, pthread_sigmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, + getcwd, chdir, waitpid, kill, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, @@ -1023,7 +1032,8 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, + SIGCONT export close, shutdown, socket, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, @@ -1042,7 +1052,8 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, + SIGCONT var IP_MULTICAST_TTL* {.importc: "IP_MULTICAST_TTL", header: "".}: cint @@ -1081,15 +1092,28 @@ elif defined(macos) or defined(macosx): IPPROTO_TCP* = 6 when defined(linux): - const O_CLOEXEC* = 0x80000 + const + O_CLOEXEC* = 0x80000 + POSIX_SPAWN_USEVFORK* = 0x40 elif defined(freebsd): - const O_CLOEXEC* = 0x00100000 + const + O_CLOEXEC* = 0x00100000 + POSIX_SPAWN_USEVFORK* = 0x00 elif defined(openbsd): - const O_CLOEXEC* = 0x10000 + const + O_CLOEXEC* = 0x10000 + POSIX_SPAWN_USEVFORK* = 0x00 elif defined(netbsd): - const O_CLOEXEC* = 0x00400000 + const + O_CLOEXEC* = 0x00400000 + POSIX_SPAWN_USEVFORK* = 0x00 elif defined(dragonfly): - const O_CLOEXEC* = 0x00020000 + const + O_CLOEXEC* = 0x00020000 + POSIX_SPAWN_USEVFORK* = 0x00 +elif defined(macos) or defined(macosx): + const + POSIX_SPAWN_USEVFORK* = 0x00 when defined(linux) or defined(macos) or defined(macosx) or defined(freebsd) or defined(openbsd) or defined(netbsd) or defined(dragonfly): diff --git a/tests/testall.nim b/tests/testall.nim index c0713bf0d..eabe0a586 100644 --- a/tests/testall.nim +++ b/tests/testall.nim @@ -8,7 +8,7 @@ import testmacro, testsync, testsoon, testtime, testfut, testsignal, testaddress, testdatagram, teststream, testserver, testbugs, testnet, testasyncstream, testhttpserver, testshttpserver, testhttpclient, - testratelimit + testproc, testratelimit # Must be imported last to check for Pending futures import testutils diff --git a/tests/testproc.bat b/tests/testproc.bat new file mode 100644 index 000000000..314bea731 --- /dev/null +++ b/tests/testproc.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +IF /I "%1" == "STDIN" ( + GOTO :STDINTEST +) ELSE IF /I "%1" == "TIMEOUT2" ( + GOTO :TIMEOUTTEST2 +) ELSE IF /I "%1" == "TIMEOUT10" ( + GOTO :TIMEOUTTEST10 +) ELSE IF /I "%1" == "BIGDATA" ( + GOTO :BIGDATA +) ELSE IF /I "%1" == "ENVTEST" ( + GOTO :ENVTEST +) + +EXIT 0 + +:STDINTEST +SET /P "INPUTDATA=" +ECHO STDIN DATA: %INPUTDATA% +EXIT 0 + +:TIMEOUTTEST2 +ping -n 2 127.0.0.1 > NUL +EXIT 2 + +:TIMEOUTTEST10 +ping -n 10 127.0.0.1 > NUL +EXIT 0 + +:BIGDATA +FOR /L %%G IN (1, 1, 400000) DO ECHO ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO +EXIT 0 + +:ENVTEST +ECHO %CHRONOSASYNC% +EXIT 0 diff --git a/tests/testproc.nim b/tests/testproc.nim new file mode 100644 index 000000000..05f793db8 --- /dev/null +++ b/tests/testproc.nim @@ -0,0 +1,425 @@ +# Chronos Test Suite +# (c) Copyright 2022-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +import std/os +import unittest2, stew/[base10, byteutils] +import ".."/chronos/unittest2/asynctests + +when defined(posix): + from ".."/chronos/osdefs import SIGKILL + +when defined(nimHasUsed): {.used.} + +suite "Asynchronous process management test suite": + const OutputTests = + when defined(windows): + [ + ("ECHO TESTOUT", "TESTOUT\r\n", ""), + ("ECHO TESTERR 1>&2", "", "TESTERR \r\n"), + ("ECHO TESTBOTH && ECHO TESTBOTH 1>&2", "TESTBOTH \r\n", + "TESTBOTH \r\n") + ] + else: + [ + ("echo TESTOUT", "TESTOUT\n", ""), + ("echo TESTERR 1>&2", "", "TESTERR\n"), + ("echo TESTBOTH && echo TESTBOTH 1>&2", "TESTBOTH\n", "TESTBOTH\n") + ] + + const ExitCodes = [5, 13, 64, 100, 126, 127, 128, 130, 255] + + proc createBigMessage(size: int): seq[byte] = + var message = "MESSAGE" + result = newSeq[byte](size) + for i in 0 ..< len(result): + result[i] = byte(message[i mod len(message)]) + + when not(defined(windows)): + proc getCurrentFD(): int = + let local = initTAddress("127.0.0.1:34334") + let sock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM, + Protocol.IPPROTO_UDP) + closeSocket(sock) + return int(sock) + var markFD = getCurrentFD() + + asyncTest "execCommand() exit codes test": + for item in ExitCodes: + let command = "exit " & Base10.toString(uint64(item)) + let res = await execCommand(command) + check res == item + + asyncTest "execCommandEx() exit codes and outputs test": + for test in OutputTests: + let response = await execCommandEx(test[0]) + check: + response.stdOutput == test[1] + response.stdError == test[2] + response.status == 0 + + asyncTest "waitForExit() & peekExitCode() exit codes test": + let options = {AsyncProcessOption.EvalCommand} + for item in ExitCodes: + let command = "exit " & Base10.toString(uint64(item)) + let process = await startProcess(command, options = options) + try: + let res = await process.waitForExit(InfiniteDuration) + check: + res == item + process.peekExitCode().tryGet() == item + process.running().tryGet() == false + finally: + await process.closeWait() + + asyncTest "addProcess() test": + var + handlerFut = newFuture[void]("process.handler.future") + pidFd: ProcessHandle + processCounter = 0 + processExitCode = 0 + process: AsyncProcessRef + + proc processHandler(udata: pointer) {.gcsafe.} = + processCounter = cast[int](udata) + processExitCode = process.peekExitCode().valueOr: + handlerFut.fail(newException(ValueError, osErrorMsg(error))) + return + let res = removeProcess2(pidFd) + if res.isErr(): + handlerFut.fail(newException(ValueError, osErrorMsg(res.error()))) + else: + handlerFut.complete() + + let + options = {AsyncProcessOption.EvalCommand} + command = "exit 1" + + process = await startProcess(command, options = options) + + try: + pidFd = + block: + let res = addProcess2(process.pid(), processHandler, + cast[pointer](31337)) + if res.isErr(): + raiseAssert osErrorMsg(res.error()) + res.get() + await handlerFut.wait(5.seconds) + check: + processExitCode == 1 + processCounter == 31337 + finally: + await process.closeWait() + + asyncTest "STDIN stream test": + let + command = + when defined(windows): + "tests\\testproc.bat stdin" + else: + "tests/testproc.sh stdin" + options = {AsyncProcessOption.EvalCommand} + shellHeader = "STDIN DATA: ".toBytes() + smallTest = + when defined(windows): + "SMALL AMOUNT\r\n".toBytes() + else: + "SMALL AMOUNT\n".toBytes() + + let bigTest = + when defined(windows): + var res = createBigMessage(256) + res.add(byte(0x0D)) + res.add(byte(0x0A)) + res + else: + var res = createBigMessage(256) + res.add(byte(0x0A)) + res + + for item in [smallTest, bigTest]: + let process = await startProcess(command, options = options, + stdinHandle = AsyncProcess.Pipe, + stdoutHandle = AsyncProcess.Pipe) + try: + await process.stdinStream.write(item) + let stdoutDataFut = process.stdoutStream.read() + let res = await process.waitForExit(InfiniteDuration) + await allFutures(stdoutDataFut) + check: + res == 0 + stdoutDataFut.read() == shellHeader & item + finally: + await process.closeWait() + + asyncTest "STDOUT and STDERR streams test": + let options = {AsyncProcessOption.EvalCommand} + + for test in OutputTests: + let process = await startProcess(test[0], options = options, + stdoutHandle = AsyncProcess.Pipe, + stderrHandle = AsyncProcess.Pipe) + try: + let outBytesFut = process.stdoutStream.read() + let errBytesFut = process.stderrStream.read() + let res = await process.waitForExit(InfiniteDuration) + await allFutures(outBytesFut, errBytesFut) + check: + string.fromBytes(outBytesFut.read()) == test[1] + string.fromBytes(errBytesFut.read()) == test[2] + res == 0 + finally: + await process.closeWait() + + asyncTest "STDERR to STDOUT streams test": + let options = {AsyncProcessOption.EvalCommand, + AsyncProcessOption.StdErrToStdOut} + let command = + when defined(windows): + "ECHO TESTSTDOUT && ECHO TESTSTDERR 1>&2" + else: + "echo TESTSTDOUT && echo TESTSTDERR 1>&2" + let expect = + when defined(windows): + "TESTSTDOUT \r\nTESTSTDERR \r\n" + else: + "TESTSTDOUT\nTESTSTDERR\n" + let process = await startProcess(command, options = options, + stdoutHandle = AsyncProcess.Pipe) + try: + let outBytesFut = process.stdoutStream.read() + let res = await process.waitForExit(InfiniteDuration) + await allFutures(outBytesFut) + check: + string.fromBytes(outBytesFut.read()) == expect + res == 0 + finally: + await process.closeWait() + + asyncTest "Capture big amount of bytes from STDOUT stream test": + let options = {AsyncProcessOption.EvalCommand} + let command = + when defined(windows): + "tests\\testproc.bat bigdata" + else: + "tests/testproc.sh bigdata" + let expect = + when defined(windows): + 400_000 * (64 + 2) + else: + 400_000 * (64 + 1) + let process = await startProcess(command, options = options, + stdoutHandle = AsyncProcess.Pipe, + stderrHandle = AsyncProcess.Pipe) + try: + let outBytesFut = process.stdoutStream.read() + let errBytesFut = process.stderrStream.read() + let res = await process.waitForExit(InfiniteDuration) + await allFutures(outBytesFut, errBytesFut) + check: + res == 0 + len(outBytesFut.read()) == expect + len(errBytesFut.read()) == 0 + finally: + await process.closeWait() + + asyncTest "Long-waiting waitForExit() test": + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout2") + else: + ("tests/testproc.sh", "timeout2") + let process = await startProcess(command[0], arguments = @[command[1]]) + try: + let res = await process.waitForExit(InfiniteDuration) + check res == 2 + finally: + await process.closeWait() + + asyncTest "waitForExit(duration) test": + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout10") + else: + ("tests/testproc.sh", "timeout10") + let expect = + when defined(windows): + 0 + else: + 128 + int(SIGKILL) + let process = await startProcess(command[0], arguments = @[command[1]]) + try: + let res = await process.waitForExit(1.seconds) + check res == expect + finally: + await process.closeWait() + + asyncTest "Child process environment test": + let command = + when defined(windows): + ("tests\\testproc.bat", "envtest", 0, "CHILDPROCESSTEST\r\n") + else: + ("tests/testproc.sh", "envtest", 0, "CHILDPROCESSTEST\n") + + let env = getProcessEnvironment() + env["CHRONOSASYNC"] = "CHILDPROCESSTEST" + let process = await startProcess(command[0], arguments = @[command[1]], + environment = env, + stdoutHandle = AsyncProcess.Pipe) + try: + let outBytesFut = process.stdoutStream.read() + let res = await process.waitForExit(InfiniteDuration) + let outBytes = await outBytesFut + check: + res == command[2] + string.fromBytes(outBytes) == command[3] + finally: + await process.closeWait() + + test "getProcessEnvironment() test": + let env = getProcessEnvironment() + when defined(windows): + check len(env["SYSTEMROOT"]) > 0 + else: + check len(env["USER"]) > 0 + + asyncTest "Multiple processes waiting test": + const ProcessesCount = 50 + + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout2", 2) + else: + ("tests/testproc.sh", "timeout2", 2) + + var processes: seq[AsyncProcessRef] + for n in 0 ..< ProcessesCount: + let process = await startProcess(command[0], arguments = @[command[1]]) + processes.add(process) + try: + var pending: seq[Future[int]] + for process in processes: + pending.add(process.waitForExit(10.seconds)) + await allFutures(pending) + for index in 0 ..< ProcessesCount: + check pending[index].read() == command[2] + finally: + var pending: seq[Future[void]] + for process in processes: + pending.add(process.closeWait()) + await allFutures(pending) + + asyncTest "Multiple processes exit codes test": + const ProcessesCount = 50 + + let options = {AsyncProcessOption.EvalCommand} + + var processes: seq[AsyncProcessRef] + for n in 0 ..< ProcessesCount: + let + command = "exit " & Base10.toString(uint64(n)) + process = await startProcess(command, options = options) + processes.add(process) + try: + var pending: seq[Future[int]] + for process in processes: + pending.add(process.waitForExit(10.seconds)) + await allFutures(pending) + for index in 0 ..< ProcessesCount: + check pending[index].read() == index + finally: + var pending: seq[Future[void]] + for process in processes: + pending.add(process.closeWait()) + await allFutures(pending) + + asyncTest "Multiple processes data capture test": + const ProcessesCount = 50 + + let options = {AsyncProcessOption.EvalCommand} + + var processes: seq[AsyncProcessRef] + for n in 0 ..< ProcessesCount: + let command = + when defined(windows): + "ECHO TEST" & $n + else: + "echo TEST" & $n + + let process = await startProcess(command, options = options, + stdoutHandle = AsyncProcess.Pipe) + processes.add(process) + + try: + var pendingReaders: seq[Future[seq[byte]]] + var pendingWaiters: seq[Future[int]] + for process in processes: + pendingReaders.add(process.stdoutStream.read()) + pendingWaiters.add(process.waitForExit(10.seconds)) + await allFutures(pendingReaders) + await allFutures(pendingWaiters) + + for index in 0 ..< ProcessesCount: + let expect = + when defined(windows): + "TEST" & $index & "\r\n" + else: + "TEST" & $index & "\n" + check string.fromBytes(pendingReaders[index].read()) == expect + check pendingWaiters[index].read() == 0 + finally: + var pending: seq[Future[void]] + for process in processes: + pending.add(process.closeWait()) + await allFutures(pending) + + asyncTest "terminate() test": + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout10", 0) + else: + ("tests/testproc.sh", "timeout10", 143) # 128 + SIGTERM + let process = await startProcess(command[0], arguments = @[command[1]]) + try: + let resFut = process.waitForExit(InfiniteDuration) + check process.terminate().isOk() + let res = await resFut + check res == command[2] + finally: + await process.closeWait() + + asyncTest "kill() test": + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout10", 0) + else: + ("tests/testproc.sh", "timeout10", 137) # 128 + SIGKILL + let process = await startProcess(command[0], arguments = @[command[1]]) + try: + let resFut = process.waitForExit(InfiniteDuration) + check process.kill().isOk() + let res = await resFut + check res == command[2] + finally: + await process.closeWait() + + test "File descriptors leaks test": + when defined(windows): + skip() + else: + check getCurrentFD() == markFD + + test "Leaks test": + proc getTrackerLeaks(tracker: string): bool = + let tracker = getTracker(tracker) + if isNil(tracker): false else: tracker.isLeaked() + + check: + getTrackerLeaks("async.process") == false + getTrackerLeaks("async.stream.reader") == false + getTrackerLeaks("async.stream.writer") == false + getTrackerLeaks("stream.transport") == false diff --git a/tests/testproc.sh b/tests/testproc.sh new file mode 100755 index 000000000..1725d49d3 --- /dev/null +++ b/tests/testproc.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +if [ "$1" == "stdin" ]; then + read -r inputdata + echo "STDIN DATA: $inputdata" +elif [ "$1" == "timeout2" ]; then + sleep 2 + exit 2 +elif [ "$1" == "timeout10" ]; then + sleep 10 +elif [ "$1" == "bigdata" ]; then + for i in {1..400000} + do + echo "ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO" + done +elif [ "$1" == "envtest" ]; then + echo "$CHRONOSASYNC" +else + echo "arguments missing" +fi diff --git a/tests/testsignal.nim b/tests/testsignal.nim index 5eca5a94f..0bcf79333 100644 --- a/tests/testsignal.nim +++ b/tests/testsignal.nim @@ -17,7 +17,7 @@ suite "Signal handling test suite": when not defined(windows): var signalCounter = 0 - sigfd = -1 + sigfd: SignalHandle proc signalProc(udata: pointer) = signalCounter = cast[int](udata) From 4c07da6abb4e66c0a8e4b7bd239dabf2f0c620c6 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 23 May 2023 18:40:52 +0200 Subject: [PATCH 019/146] add test for raise-only patterns (#391) * add test for raise-only patterns https://github.com/status-im/nim-chronos/issues/56 * fix --- tests/testmacro.nim | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/testmacro.nim b/tests/testmacro.nim index f50015fb1..680a2ba62 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -139,3 +139,20 @@ suite "Closure iterator's exception transformation issues": answer.inc(10) waitFor(a()) check answer == 42 + + test "raise-only": + # https://github.com/status-im/nim-chronos/issues/56 + proc trySync() {.async.} = + return + + proc x() {.async.} = + try: + await trySync() + return + except ValueError: + discard + + raiseAssert "shouldn't reach" + + waitFor(x()) + From 2fa6df08808e945967d7e9c3107c84fc278ec069 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 23 May 2023 19:45:12 +0200 Subject: [PATCH 020/146] asyncmacro: code cleanups (#392) * prefer `let` * prefer expressions * renames --- chronos/asyncmacro2.nim | 109 ++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 54 deletions(-) diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index 3fb0bb719..9da84b31c 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -11,12 +11,13 @@ import std/[macros] proc skipUntilStmtList(node: NimNode): NimNode {.compileTime.} = # Skips a nest of StmtList's. - result = node if node[0].kind == nnkStmtList: - result = skipUntilStmtList(node[0]) + skipUntilStmtList(node[0]) + else: + node proc processBody(node, retFutureSym: NimNode, - subTypeIsVoid: bool): NimNode {.compileTime.} = + baseTypeIsVoid: bool): NimNode {.compileTime.} = #echo(node.treeRepr) result = node case node.kind @@ -25,13 +26,13 @@ proc processBody(node, retFutureSym: NimNode, # As I've painfully found out, the order here really DOES matter. if node[0].kind == nnkEmpty: - if not subTypeIsVoid: + if not baseTypeIsVoid: result.add newCall(newIdentNode("complete"), retFutureSym, newIdentNode("result")) else: result.add newCall(newIdentNode("complete"), retFutureSym) else: - let x = node[0].processBody(retFutureSym, subTypeIsVoid) + let x = node[0].processBody(retFutureSym, baseTypeIsVoid) if x.kind == nnkYieldStmt: result.add x else: result.add newCall(newIdentNode("complete"), retFutureSym, x) @@ -47,7 +48,7 @@ proc processBody(node, retFutureSym: NimNode, # We must not transform nested procedures of any form, otherwise # `retFutureSym` will be used for all nested procedures as their own # `retFuture`. - result[i] = processBody(result[i], retFutureSym, subTypeIsVoid) + result[i] = processBody(result[i], retFutureSym, baseTypeIsVoid) proc getName(node: NimNode): string {.compileTime.} = case node.kind @@ -62,11 +63,8 @@ proc getName(node: NimNode): string {.compileTime.} = else: error("Unknown name.") -proc isInvalidReturnType(typeName: string): bool = - return typeName notin ["Future"] #, "FutureStream"] - proc verifyReturnType(typeName: string) {.compileTime.} = - if typeName.isInvalidReturnType: + if typeName != "Future": error("Expected return type of 'Future' got '" & typeName & "'") macro unsupported(s: static[string]): untyped = @@ -112,7 +110,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = else: raiseAssert("Unhandled async return type: " & $prc.kind) - let subtypeIsVoid = baseType.eqIdent("void") + let baseTypeIsVoid = baseType.eqIdent("void") if prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}: let @@ -123,57 +121,64 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt: outerProcBody.add(prc.body[0]) - # -> iterator nameIter(chronosInternalRetFuture: Future[T]): FutureBase {.closure.} = - # -> {.push warning[resultshadowed]: off.} - # -> var result: T - # -> {.pop.} - # -> - # -> complete(chronosInternalRetFuture, result) let internalFutureSym = ident "chronosInternalRetFuture" - iteratorNameSym = genSym(nskIterator, $prcName) - var - procBody = prc.body.processBody(internalFutureSym, subtypeIsVoid) + procBody = prc.body.processBody(internalFutureSym, baseTypeIsVoid) # don't do anything with forward bodies (empty) if procBody.kind != nnkEmpty: - if subtypeIsVoid: + # fix #13899, `defer` should not escape its original scope + let procBodyBlck = + newStmtList(newTree(nnkBlockStmt, newEmptyNode(), procBody)) + + # Avoid too much quote do to not lose original line numbers + let closureBody = if baseTypeIsVoid: let resultTemplate = quote do: template result: auto {.used.} = {.fatal: "You should not reference the `result` variable inside" & " a void async proc".} - procBody = newStmtList(resultTemplate, procBody) - - # fix #13899, `defer` should not escape its original scope - procBody = newStmtList(newTree(nnkBlockStmt, newEmptyNode(), procBody)) - - if not subtypeIsVoid: - procBody.insert(0, newNimNode(nnkPragma).add(newIdentNode("push"), - newNimNode(nnkExprColonExpr).add(newNimNode(nnkBracketExpr).add( - newIdentNode("warning"), newIdentNode("resultshadowed")), - newIdentNode("off")))) # -> {.push warning[resultshadowed]: off.} - - procBody.insert(1, newNimNode(nnkVarSection, prc.body).add( - newIdentDefs(newIdentNode("result"), baseType))) # -> var result: T - - procBody.insert(2, newNimNode(nnkPragma).add( - newIdentNode("pop"))) # -> {.pop.}) + # -> complete(chronosInternalRetFuture) + let complete = + newCall(newIdentNode("complete"), internalFutureSym) - procBody.add( - newCall(newIdentNode("complete"), - internalFutureSym, newIdentNode("result"))) # -> complete(chronosInternalRetFuture, result) + newStmtList(resultTemplate, procBodyBlck, complete) else: - # -> complete(chronosInternalRetFuture) - procBody.add(newCall(newIdentNode("complete"), internalFutureSym)) + # -> iterator nameIter(chronosInternalRetFuture: Future[T]): FutureBase {.closure.} = + # -> {.push warning[resultshadowed]: off.} + # -> var result: T + # -> {.pop.} + # -> + # -> complete(chronosInternalRetFuture, result) + newStmtList( + # -> {.push warning[resultshadowed]: off.} + newNimNode(nnkPragma).add(newIdentNode("push"), + newNimNode(nnkExprColonExpr).add(newNimNode(nnkBracketExpr).add( + newIdentNode("warning"), newIdentNode("resultshadowed")), + newIdentNode("off"))), + + # -> var result: T + newNimNode(nnkVarSection, prc.body).add( + newIdentDefs(newIdentNode("result"), baseType)), + + # -> {.pop.}) + newNimNode(nnkPragma).add( + newIdentNode("pop")), + + procBodyBlck, + + # -> complete(chronosInternalRetFuture, result) + newCall(newIdentNode("complete"), + internalFutureSym, newIdentNode("result"))) let internalFutureType = - if subtypeIsVoid: + if baseTypeIsVoid: newNimNode(nnkBracketExpr, prc).add(newIdentNode("Future")).add(newIdentNode("void")) else: returnType internalFutureParameter = nnkIdentDefs.newTree(internalFutureSym, internalFutureType, newEmptyNode()) + iteratorNameSym = genSym(nskIterator, $prcName) closureIterator = newProc(iteratorNameSym, [newIdentNode("FutureBase"), internalFutureParameter], - procBody, nnkIteratorDef) + closureBody, nnkIteratorDef) closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body) closureIterator.addPragma(newIdentNode("closure")) @@ -211,21 +216,17 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = closureIterator.addPragma(newIdentNode("gcsafe")) outerProcBody.add(closureIterator) - # -> var resultFuture = newFuture[T]() + # -> let resultFuture = newFuture[T]() # declared at the end to be sure that the closure # doesn't reference it, avoid cyclic ref (#203) - var retFutureSym = ident "resultFuture" - var subRetType = - if returnType.kind == nnkEmpty: - newIdentNode("void") - else: - baseType + let + retFutureSym = ident "resultFuture" # Do not change this code to `quote do` version because `instantiationInfo` # will be broken for `newFuture()` call. outerProcBody.add( - newVarStmt( + newLetStmt( retFutureSym, - newCall(newTree(nnkBracketExpr, ident "newFuture", subRetType), + newCall(newTree(nnkBracketExpr, ident "newFuture", baseType), newLit(prcName)) ) ) @@ -262,7 +263,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = raises )) - if subtypeIsVoid: + if baseTypeIsVoid: # Add discardable pragma. if returnType.kind == nnkEmpty: # Add Future[void] From b65b85533a187d4ccea2c95790e175e8a57054bd Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 25 May 2023 17:31:35 +0200 Subject: [PATCH 021/146] Future cleanup (#393) * FutureState.Finished -> FutureState.Completed (to avoid name clash with `proc finished` which means not-pending) * deprecate `done` - to avoid additional confusion over completed vs finished * remove ad leftovers in stack trace formatting * avoid some generic bloat * avoid unnecessary allocations in `race`/`one` --- chronos/asyncfutures2.nim | 302 +++++++++++++++++++------------------- chronos/debugutils.nim | 6 +- tests/testfut.nim | 8 +- tests/testutils.nim | 4 +- 4 files changed, 157 insertions(+), 163 deletions(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 546aecb8d..b575a1f94 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -2,7 +2,7 @@ # Chronos # # (c) Copyright 2015 Dominik Picheta -# (c) Copyright 2018-2021 Status Research & Development GmbH +# (c) Copyright 2018-2023 Status Research & Development GmbH # # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) @@ -13,23 +13,26 @@ import stew/base10 import "."/srcloc export srcloc -when defined(nimHasStacktracesModule): - import system/stacktraces -else: - const - reraisedFromBegin = -10 - reraisedFromEnd = -100 +when chronosStackTrace: + when defined(nimHasStacktracesModule): + import system/stacktraces + else: + const + reraisedFromBegin = -10 + reraisedFromEnd = -100 + + type StackTrace = string const LocCreateIndex* = 0 - LocCompleteIndex* = 1 + LocFinishIndex* = 1 -when chronosStackTrace: - type StackTrace = string +template LocCompleteIndex*: untyped {.deprecated: "LocFinishIndex".} = + LocFinishIndex type FutureState* {.pure.} = enum - Pending, Finished, Cancelled, Failed + Pending, Completed, Cancelled, Failed FutureBase* = ref object of RootObj ## Untyped future. location*: array[2, ptr SrcLoc] @@ -39,7 +42,9 @@ type state*: FutureState error*: ref CatchableError ## Stored exception mustCancel*: bool - id*: uint + + when chronosFutureId: + id*: uint when chronosStackTrace: errorStackTrace*: StackTrace @@ -55,10 +60,15 @@ type # Obviously, it will still be allocated on the heap when necessary. Future*[T] = ref object of FutureBase ## Typed future. when chronosStrictException: - closure*: iterator(f: Future[T]): FutureBase {.raises: [Defect, CatchableError], gcsafe.} + when (NimMajor, NimMinor) < (1, 4): + closure*: iterator(f: Future[T]): FutureBase {.raises: [Defect, CatchableError], gcsafe.} + else: + closure*: iterator(f: Future[T]): FutureBase {.raises: [CatchableError], gcsafe.} else: - closure*: iterator(f: Future[T]): FutureBase {.raises: [Defect, CatchableError, Exception], gcsafe.} - value: T ## Stored value + closure*: iterator(f: Future[T]): FutureBase {.raises: [Exception], gcsafe.} + + when T isnot void: + value*: T ## Stored value FutureStr*[T] = ref object of Future[T] ## Future to hold GC strings @@ -80,6 +90,10 @@ type tail*: FutureBase count*: uint +# Backwards compatibility for old FutureState name +template Finished* {.deprecated: "Use Completed instead".} = Completed +template Finished*(T: type FutureState): FutureState {.deprecated: "Use FutureState.Completed instead".} = FutureState.Completed + when chronosFutureId: var currentID* {.threadvar.}: uint else: @@ -88,7 +102,6 @@ else: when chronosFutureTracking: var futureList* {.threadvar.}: FutureList - futureList = FutureList() template setupFutureBase(loc: ptr SrcLoc) = new(result) @@ -143,30 +156,30 @@ template newFutureStr*[T](fromProc: static[string] = ""): FutureStr[T] = newFutureStrImpl[T](getSrcLocation(fromProc)) proc finished*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` has completed, i.e. ``future`` state changed + ## Determines whether ``future`` has finished, i.e. ``future`` state changed ## from state ``Pending`` to one of the states (``Finished``, ``Cancelled``, ## ``Failed``). - result = (future.state != FutureState.Pending) + (future.state != FutureState.Pending) proc cancelled*(future: FutureBase): bool {.inline.} = ## Determines whether ``future`` has cancelled. (future.state == FutureState.Cancelled) proc failed*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` completed with an error. + ## Determines whether ``future`` finished with an error. (future.state == FutureState.Failed) proc completed*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` completed without an error. - (future.state == FutureState.Finished) + ## Determines whether ``future`` finished with a value. + (future.state == FutureState.Completed) -proc done*(future: FutureBase): bool {.inline.} = +proc done*(future: FutureBase): bool {.deprecated: "Use `completed` instead".} = ## This is an alias for ``completed(future)`` procedure. completed(future) when chronosFutureTracking: proc futureDestructor(udata: pointer) = - ## This procedure will be called when Future[T] got finished, cancelled or + ## This procedure will be called when Future[T] got completed, cancelled or ## failed and all Future[T].callbacks are already scheduled and processed. let future = cast[FutureBase](udata) if future == futureList.tail: futureList.tail = future.prev @@ -189,7 +202,7 @@ proc checkFinished(future: FutureBase, loc: ptr SrcLoc) = msg.add("\n Creation location:") msg.add("\n " & $future.location[LocCreateIndex]) msg.add("\n First completion location:") - msg.add("\n " & $future.location[LocCompleteIndex]) + msg.add("\n " & $future.location[LocFinishIndex]) msg.add("\n Second completion location:") msg.add("\n " & $loc) when chronosStackTrace: @@ -202,7 +215,7 @@ proc checkFinished(future: FutureBase, loc: ptr SrcLoc) = err.cause = future raise err else: - future.location[LocCompleteIndex] = loc + future.location[LocFinishIndex] = loc proc finish(fut: FutureBase, state: FutureState) = # We do not perform any checks here, because: @@ -224,7 +237,7 @@ proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = checkFinished(FutureBase(future), loc) doAssert(isNil(future.error)) future.value = val - future.finish(FutureState.Finished) + future.finish(FutureState.Completed) template complete*[T](future: Future[T], val: T) = ## Completes ``future`` with value ``val``. @@ -234,13 +247,13 @@ proc complete(future: Future[void], loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(FutureBase(future), loc) doAssert(isNil(future.error)) - future.finish(FutureState.Finished) + future.finish(FutureState.Completed) template complete*(future: Future[void]) = ## Completes a void ``future``. complete(future, getSrcLocation()) -proc fail[T](future: Future[T], error: ref CatchableError, loc: ptr SrcLoc) = +proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(FutureBase(future), loc) future.error = error @@ -251,7 +264,7 @@ proc fail[T](future: Future[T], error: ref CatchableError, loc: ptr SrcLoc) = getStackTrace(error) future.finish(FutureState.Failed) -template fail*[T](future: Future[T], error: ref CatchableError) = +template fail*(future: FutureBase, error: ref CatchableError) = ## Completes ``future`` with ``error``. fail(future, error, getSrcLocation()) @@ -266,7 +279,7 @@ proc cancelAndSchedule(future: FutureBase, loc: ptr SrcLoc) = future.errorStackTrace = getStackTrace() future.finish(FutureState.Cancelled) -template cancelAndSchedule*[T](future: Future[T]) = +template cancelAndSchedule*(future: FutureBase) = cancelAndSchedule(FutureBase(future), getSrcLocation()) proc cancel(future: FutureBase, loc: ptr SrcLoc): bool = @@ -303,14 +316,10 @@ template cancel*(future: FutureBase) = ## Cancel ``future``. discard cancel(future, getSrcLocation()) -template cancel*[T](future: Future[T]) = - ## Cancel ``future``. - discard cancel(FutureBase(future), getSrcLocation()) - proc clearCallbacks(future: FutureBase) = future.callbacks = default(seq[AsyncCallback]) -proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer = nil) = +proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer) = ## Adds the callbacks proc to be called when the future completes. ## ## If future has already completed then ``cb`` will be called immediately. @@ -321,14 +330,14 @@ proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer = nil) = let acb = AsyncCallback(function: cb, udata: udata) future.callbacks.add acb -proc addCallback*[T](future: Future[T], cb: CallbackFunc) = +proc addCallback*(future: FutureBase, cb: CallbackFunc) = ## Adds the callbacks proc to be called when the future completes. ## ## If future has already completed then ``cb`` will be called immediately. future.addCallback(cb, cast[pointer](future)) proc removeCallback*(future: FutureBase, cb: CallbackFunc, - udata: pointer = nil) = + udata: pointer) = ## Remove future from list of callbacks - this operation may be slow if there ## are many registered callbacks! doAssert(not isNil(cb)) @@ -337,10 +346,10 @@ proc removeCallback*(future: FutureBase, cb: CallbackFunc, future.callbacks.keepItIf: it.function != cb or it.udata != udata -proc removeCallback*[T](future: Future[T], cb: CallbackFunc) = +proc removeCallback*(future: FutureBase, cb: CallbackFunc) = future.removeCallback(cb, cast[pointer](future)) -proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer = nil) = +proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer) = ## Clears the list of callbacks and sets the callback proc to be called when ## the future completes. ## @@ -351,13 +360,13 @@ proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer = nil) = future.clearCallbacks future.addCallback(cb, udata) -proc `callback=`*[T](future: Future[T], cb: CallbackFunc) = +proc `callback=`*(future: FutureBase, cb: CallbackFunc) = ## Sets the callback proc to be called when the future completes. ## ## If future has already completed then ``cb`` will be called immediately. `callback=`(future, cb, cast[pointer](future)) -proc `cancelCallback=`*[T](future: Future[T], cb: CallbackFunc) = +proc `cancelCallback=`*(future: FutureBase, cb: CallbackFunc) = ## Sets the callback procedure to be called when the future is cancelled. ## ## This callback will be called immediately as ``future.cancel()`` invoked. @@ -403,84 +412,66 @@ proc internalContinue[T](fut: pointer) {.gcsafe, raises: [Defect].} = {.pop.} -template getFilenameProcname(entry: StackTraceEntry): (string, string) = - when compiles(entry.filenameStr) and compiles(entry.procnameStr): - # We can't rely on "entry.filename" and "entry.procname" still being valid - # cstring pointers, because the "string.data" buffers they pointed to might - # be already garbage collected (this entry being a non-shallow copy, - # "entry.filename" no longer points to "entry.filenameStr.data", but to the - # buffer of the original object). - (entry.filenameStr, entry.procnameStr) - else: - ($entry.filename, $entry.procname) - -proc getHint(entry: StackTraceEntry): string = - ## We try to provide some hints about stack trace entries that the user - ## may not be familiar with, in particular calls inside the stdlib. - - let (filename, procname) = getFilenameProcname(entry) - - if procname == "processPendingCallbacks": - if cmpIgnoreStyle(filename, "asyncdispatch.nim") == 0: - return "Executes pending callbacks" - elif procname == "poll": - if cmpIgnoreStyle(filename, "asyncdispatch.nim") == 0: - return "Processes asynchronous completion events" - - if procname == "internalContinue": - if cmpIgnoreStyle(filename, "asyncfutures.nim") == 0: - return "Resumes an async procedure" - -proc `$`(stackTraceEntries: seq[StackTraceEntry]): string = - try: - when defined(nimStackTraceOverride) and declared(addDebuggingInfo): - let entries = addDebuggingInfo(stackTraceEntries) +when chronosStackTrace: + import std/strutils + + template getFilenameProcname(entry: StackTraceEntry): (string, string) = + when compiles(entry.filenameStr) and compiles(entry.procnameStr): + # We can't rely on "entry.filename" and "entry.procname" still being valid + # cstring pointers, because the "string.data" buffers they pointed to might + # be already garbage collected (this entry being a non-shallow copy, + # "entry.filename" no longer points to "entry.filenameStr.data", but to the + # buffer of the original object). + (entry.filenameStr, entry.procnameStr) else: - let entries = stackTraceEntries - - # Find longest filename & line number combo for alignment purposes. - var longestLeft = 0 - for entry in entries: - let (filename, procname) = getFilenameProcname(entry) - - if procname == "": continue - - let leftLen = filename.len + len($entry.line) - if leftLen > longestLeft: - longestLeft = leftLen - - var indent = 2 - # Format the entries. - for entry in entries: - let (filename, procname) = getFilenameProcname(entry) - - if procname == "": - if entry.line == reraisedFromBegin: - result.add(spaces(indent) & "#[\n") - indent.inc(2) - elif entry.line == reraisedFromEnd: - indent.dec(2) - result.add(spaces(indent) & "]#\n") - continue - - let left = "$#($#)" % [filename, $entry.line] - result.add((spaces(indent) & "$#$# $#\n") % [ - left, - spaces(longestLeft - left.len + 2), - procname - ]) - let hint = getHint(entry) - if hint.len > 0: - result.add(spaces(indent+2) & "## " & hint & "\n") - except ValueError as exc: - return exc.msg # Shouldn't actually happen since we set the formatting - # string + ($entry.filename, $entry.procname) -when chronosStackTrace: - proc injectStacktrace(future: FutureBase) = + proc `$`(stackTraceEntries: seq[StackTraceEntry]): string = + try: + when defined(nimStackTraceOverride) and declared(addDebuggingInfo): + let entries = addDebuggingInfo(stackTraceEntries) + else: + let entries = stackTraceEntries + + # Find longest filename & line number combo for alignment purposes. + var longestLeft = 0 + for entry in entries: + let (filename, procname) = getFilenameProcname(entry) + + if procname == "": continue + + let leftLen = filename.len + len($entry.line) + if leftLen > longestLeft: + longestLeft = leftLen + + var indent = 2 + # Format the entries. + for entry in entries: + let (filename, procname) = getFilenameProcname(entry) + + if procname == "": + if entry.line == reraisedFromBegin: + result.add(spaces(indent) & "#[\n") + indent.inc(2) + elif entry.line == reraisedFromEnd: + indent.dec(2) + result.add(spaces(indent) & "]#\n") + continue + + let left = "$#($#)" % [filename, $entry.line] + result.add((spaces(indent) & "$#$# $#\n") % [ + left, + spaces(longestLeft - left.len + 2), + procname + ]) + except ValueError as exc: + return exc.msg # Shouldn't actually happen since we set the formatting + # string + + proc injectStacktrace(error: ref Exception) = const header = "\nAsync traceback:\n" - var exceptionMsg = future.error.msg + var exceptionMsg = error.msg if header in exceptionMsg: # This is messy: extract the original exception message from the msg # containing the async traceback. @@ -489,7 +480,7 @@ when chronosStackTrace: var newMsg = exceptionMsg & header - let entries = getStackTraceEntries(future.error) + let entries = getStackTraceEntries(error) newMsg.add($entries) newMsg.add("Exception message: " & exceptionMsg & "\n") @@ -498,14 +489,14 @@ when chronosStackTrace: # newMsg.add("Exception type:") # for entry in getStackTraceEntries(future.error): # newMsg.add "\n" & $entry - future.error.msg = newMsg + error.msg = newMsg proc internalCheckComplete*(fut: FutureBase) {. raises: [Defect, CatchableError].} = # For internal use only. Used in asyncmacro if not(isNil(fut.error)): when chronosStackTrace: - injectStacktrace(fut) + injectStacktrace(fut.error) raise fut.error proc internalRead*[T](fut: Future[T]): T {.inline.} = @@ -526,7 +517,7 @@ proc read*[T](future: Future[T] ): T {. # TODO: Make a custom exception type for this? raise newException(ValueError, "Future still in progress.") -proc readError*[T](future: Future[T]): ref CatchableError {. +proc readError*(future: FutureBase): ref CatchableError {. raises: [Defect, ValueError].} = ## Retrieves the exception stored in ``future``. ## @@ -610,7 +601,7 @@ proc asyncDiscard*[T](future: Future[T]) {. proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {. deprecated: "Use allFutures[T](varargs[Future[T]])".} = ## Returns a future which will complete once both ``fut1`` and ``fut2`` - ## complete. + ## finish. ## ## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled. var retFuture = newFuture[void]("chronos.`and`") @@ -642,7 +633,7 @@ proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {. proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = ## Returns a future which will complete once either ``fut1`` or ``fut2`` - ## complete. + ## finish. ## ## If ``fut1`` or ``fut2`` future is failed, the result future will also be ## failed with an error stored in ``fut1`` or ``fut2`` respectively. @@ -696,7 +687,7 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = proc all*[T](futs: varargs[Future[T]]): auto {. deprecated: "Use allFutures(varargs[Future[T]])".} = - ## Returns a future which will complete once all futures in ``futs`` complete. + ## Returns a future which will complete once all futures in ``futs`` finish. ## If the argument is empty, the returned future completes immediately. ## ## If the awaited futures are not ``Future[void]``, the returned future @@ -796,8 +787,8 @@ proc oneIndex*[T](futs: varargs[Future[T]]): Future[int] {. proc oneValue*[T](futs: varargs[Future[T]]): Future[T] {. deprecated: "Use one[T](varargs[Future[T]])".} = - ## Returns a future which will complete once one of the futures in ``futs`` - ## complete. + ## Returns a future which will finish once one of the futures in ``futs`` + ## finish. ## ## If the argument is empty, returned future FAILS immediately. ## @@ -865,15 +856,15 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] = ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. var retFuture = newFuture[void]("chronos.allFutures()") let totalFutures = len(futs) - var completedFutures = 0 + var finishedFutures = 0 # Because we can't capture varargs[T] in closures we need to create copy. var nfuts = @futs proc cb(udata: pointer) = if not(retFuture.finished()): - inc(completedFutures) - if completedFutures == totalFutures: + inc(finishedFutures) + if finishedFutures == totalFutures: retFuture.complete() proc cancellation(udata: pointer) = @@ -886,10 +877,10 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] = if not(fut.finished()): fut.addCallback(cb) else: - inc(completedFutures) + inc(finishedFutures) retFuture.cancelCallback = cancellation - if len(nfuts) == 0 or len(nfuts) == completedFutures: + if len(nfuts) == 0 or len(nfuts) == finishedFutures: retFuture.complete() return retFuture @@ -912,21 +903,21 @@ proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] = ## will be completed, failed or canceled. ## ## Returned sequence will hold all the Future[T] objects passed to - ## ``allCompleted`` with the order preserved. + ## ``allFinished`` with the order preserved. ## ## If the argument is empty, the returned future COMPLETES immediately. ## ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. var retFuture = newFuture[seq[Future[T]]]("chronos.allFinished()") let totalFutures = len(futs) - var completedFutures = 0 + var finishedFutures = 0 var nfuts = @futs proc cb(udata: pointer) = if not(retFuture.finished()): - inc(completedFutures) - if completedFutures == totalFutures: + inc(finishedFutures) + if finishedFutures == totalFutures: retFuture.complete(nfuts) proc cancellation(udata: pointer) = @@ -939,10 +930,10 @@ proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] = if not(fut.finished()): fut.addCallback(cb) else: - inc(completedFutures) + inc(finishedFutures) retFuture.cancelCallback = cancellation - if len(nfuts) == 0 or len(nfuts) == completedFutures: + if len(nfuts) == 0 or len(nfuts) == finishedFutures: retFuture.complete(nfuts) return retFuture @@ -958,6 +949,16 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] = ## On cancel futures in ``futs`` WILL NOT BE cancelled. var retFuture = newFuture[Future[T]]("chronos.one()") + if len(futs) == 0: + retFuture.fail(newException(ValueError, "Empty Future[T] list")) + return retFuture + + # If one of the Future[T] already finished we return it as result + for fut in futs: + if fut.finished(): + retFuture.complete(fut) + return retFuture + # Because we can't capture varargs[T] in closures we need to create copy. var nfuts = @futs @@ -979,18 +980,9 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] = if not(nfuts[i].finished()): nfuts[i].removeCallback(cb) - # If one of the Future[T] already finished we return it as result - for fut in nfuts: - if fut.finished(): - retFuture.complete(fut) - return retFuture - for fut in nfuts: fut.addCallback(cb) - if len(nfuts) == 0: - retFuture.fail(newException(ValueError, "Empty Future[T] list")) - retFuture.cancelCallback = cancellation return retFuture @@ -1003,7 +995,17 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] = ## On success returned Future will hold finished FutureBase. ## ## On cancel futures in ``futs`` WILL NOT BE cancelled. - var retFuture = newFuture[FutureBase]("chronos.race()") + let retFuture = newFuture[FutureBase]("chronos.race()") + + if len(futs) == 0: + retFuture.fail(newException(ValueError, "Empty Future[T] list")) + return retFuture + + # If one of the Future[T] already finished we return it as result + for fut in futs: + if fut.finished(): + retFuture.complete(fut) + return retFuture # Because we can't capture varargs[T] in closures we need to create copy. var nfuts = @futs @@ -1026,17 +1028,9 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] = if not(nfuts[i].finished()): nfuts[i].removeCallback(cb) - # If one of the Future[T] already finished we return it as result - for fut in nfuts: - if fut.finished(): - retFuture.complete(fut) - return retFuture - for fut in nfuts: fut.addCallback(cb, cast[pointer](fut)) - if len(nfuts) == 0: - retFuture.fail(newException(ValueError, "Empty Future[T] list")) - retFuture.cancelCallback = cancellation + return retFuture diff --git a/chronos/debugutils.nim b/chronos/debugutils.nim index 17d6412bc..de1aee4ea 100644 --- a/chronos/debugutils.nim +++ b/chronos/debugutils.nim @@ -20,11 +20,11 @@ when chronosFutureTracking: const AllFutureStates* = {FutureState.Pending, FutureState.Cancelled, - FutureState.Finished, FutureState.Failed} - WithoutFinished* = {FutureState.Pending, FutureState.Cancelled, + FutureState.Completed, FutureState.Failed} + WithoutCompleted* = {FutureState.Pending, FutureState.Cancelled, FutureState.Failed} OnlyPending* = {FutureState.Pending} - OnlyFinished* = {FutureState.Finished} + OnlyCompleted* = {FutureState.Completed} proc dumpPendingFutures*(filter = AllFutureStates): string = ## Dump all `pending` Future[T] objects. diff --git a/tests/testfut.nim b/tests/testfut.nim index fa250a19b..af92354b5 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1098,7 +1098,7 @@ suite "Future[T] behavior test suite": var fut = waitProc() await cancelAndWait(fut) check: - fut.state == FutureState.Finished + fut.state == FutureState.Completed neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2 asyncTest "Cancellation withTimeout() test": @@ -1129,7 +1129,7 @@ suite "Future[T] behavior test suite": var fut = withTimeoutProc() await cancelAndWait(fut) check: - fut.state == FutureState.Finished + fut.state == FutureState.Completed neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2 asyncTest "Cancellation race test": @@ -1462,8 +1462,8 @@ suite "Future[T] behavior test suite": var fut2 = race(f31, f21, f11) check: - fut1.done() and fut1.read() == FutureBase(f10) - fut2.done() and fut2.read() == FutureBase(f21) + fut1.completed() and fut1.read() == FutureBase(f10) + fut2.completed() and fut2.read() == FutureBase(f21) await allFutures(f20, f30, f11, f31) diff --git a/tests/testutils.nim b/tests/testutils.nim index f45819007..e5890372a 100644 --- a/tests/testutils.nim +++ b/tests/testutils.nim @@ -22,8 +22,8 @@ suite "Asynchronous utilities test suite": test "Future clean and leaks test": when chronosFutureTracking: - if pendingFuturesCount(WithoutFinished) == 0'u: - if pendingFuturesCount(OnlyFinished) > 0'u: + if pendingFuturesCount(WithoutCompleted) == 0'u: + if pendingFuturesCount(OnlyCompleted) > 0'u: poll() check pendingFuturesCount() == 0'u else: From 36ab9c8e4aab4ff210514ede60b8f2a64695067d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 30 May 2023 08:57:57 +0300 Subject: [PATCH 022/146] Fix sigprocmask declaration missing when threads are disabled. (#396) --- chronos/osdefs.nim | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 971a9a931..39bd947da 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -825,10 +825,10 @@ elif defined(macos) or defined(macosx): from std/posix import close, shutdown, socket, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, getaddrinfo, gai_strerror, getrlimit, - setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, - sigaddset, sigismember, fcntl, accept, pipe, write, - signal, read, setsockopt, getsockopt, getcwd, chdir, - waitpid, kill, + setrlimit, getpid, pthread_sigmask, sigprocmask, + sigemptyset, sigaddset, sigismember, fcntl, accept, + pipe, write, signal, read, setsockopt, getsockopt, + getcwd, chdir, waitpid, kill, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, @@ -846,10 +846,10 @@ elif defined(macos) or defined(macosx): export close, shutdown, socket, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, getaddrinfo, gai_strerror, getrlimit, - setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, - sigaddset, sigismember, fcntl, accept, pipe, write, - signal, read, setsockopt, getsockopt, getcwd, chdir, - waitpid, kill, + setrlimit, getpid, pthread_sigmask, sigprocmask, + sigemptyset, sigaddset, sigismember, fcntl, accept, + pipe, write, signal, read, setsockopt, getsockopt, + getcwd, chdir, waitpid, kill, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, @@ -886,8 +886,8 @@ elif defined(linux): getrlimit, setrlimit, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, sendmsg, recvmsg, getpid, fcntl, - pthread_sigmask, clock_gettime, signal, getcwd, chdir, - waitpid, kill, + pthread_sigmask, sigprocmask, clock_gettime, signal, + getcwd, chdir, waitpid, kill, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, @@ -909,8 +909,8 @@ elif defined(linux): getrlimit, setrlimit, getpeername, getsockname, recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, sendmsg, recvmsg, getpid, fcntl, - pthread_sigmask, clock_gettime, signal, getcwd, chdir, - waitpid, kill, + pthread_sigmask, sigprocmask, clock_gettime, signal, + getcwd, chdir, waitpid, kill, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, From 6523f741a65b27d3973df9a68771447e53bf8ee2 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 30 May 2023 18:45:16 +0300 Subject: [PATCH 023/146] Assertion and bugfix. (#397) --- chronos/asyncfutures2.nim | 1 + chronos/streams/asyncstream.nim | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index b575a1f94..9a9879c76 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -222,6 +222,7 @@ proc finish(fut: FutureBase, state: FutureState) = # 1. `finish()` is a private procedure and `state` is under our control. # 2. `fut.state` is checked by `checkFinished()`. fut.state = state + doAssert fut.cancelcb == nil or state != FutureState.Cancelled fut.cancelcb = nil # release cancellation callback memory for item in fut.callbacks.mitems(): if not(isNil(item.function)): diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 6abb019c9..931453d4f 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -961,7 +961,7 @@ proc join*(rw: AsyncStreamRW): Future[void] = if not(rw.future.finished()): rw.future.addCallback(continuation, cast[pointer](retFuture)) - rw.future.cancelCallback = cancellation + retFuture.cancelCallback = cancellation else: retFuture.complete() From e436f20b33d2dfd8e341b316f30bd8d11360f8f7 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 31 May 2023 07:24:25 +0200 Subject: [PATCH 024/146] Memory cleanups (#395) * Avoid `FutureContinue` bloat by moving type punning into iterator (which is typed anyway) * clear closure iterator from future when iteration is done also on cancellation / exception * remove some redundant local variables in `await` * document `futureContinue` flow --- chronos/asyncfutures2.nim | 108 ++++++++++++++++++++++---------------- chronos/asyncmacro2.nim | 78 +++++++++++---------------- 2 files changed, 94 insertions(+), 92 deletions(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 9a9879c76..37d205142 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -30,6 +30,14 @@ const template LocCompleteIndex*: untyped {.deprecated: "LocFinishIndex".} = LocFinishIndex +when chronosStrictException: + when (NimMajor, NimMinor) < (1, 4): + {.pragma: closureIter, raises: [Defect, CatchableError], gcsafe.} + else: + {.pragma: closureIter, raises: [CatchableError], gcsafe.} +else: + {.pragma: closureIter, raises: [Exception], gcsafe.} + type FutureState* {.pure.} = enum Pending, Completed, Cancelled, Failed @@ -42,6 +50,7 @@ type state*: FutureState error*: ref CatchableError ## Stored exception mustCancel*: bool + closure*: iterator(f: FutureBase): FutureBase {.closureIter.} when chronosFutureId: id*: uint @@ -54,19 +63,7 @@ type next*: FutureBase prev*: FutureBase - # ZAH: we have discussed some possible optimizations where - # the future can be stored within the caller's stack frame. - # How much refactoring is needed to make this a regular non-ref type? - # Obviously, it will still be allocated on the heap when necessary. Future*[T] = ref object of FutureBase ## Typed future. - when chronosStrictException: - when (NimMajor, NimMinor) < (1, 4): - closure*: iterator(f: Future[T]): FutureBase {.raises: [Defect, CatchableError], gcsafe.} - else: - closure*: iterator(f: Future[T]): FutureBase {.raises: [CatchableError], gcsafe.} - else: - closure*: iterator(f: Future[T]): FutureBase {.raises: [Exception], gcsafe.} - when T isnot void: value*: T ## Stored value @@ -235,7 +232,7 @@ proc finish(fut: FutureBase, state: FutureState) = proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = if not(future.cancelled()): - checkFinished(FutureBase(future), loc) + checkFinished(future, loc) doAssert(isNil(future.error)) future.value = val future.finish(FutureState.Completed) @@ -246,7 +243,7 @@ template complete*[T](future: Future[T], val: T) = proc complete(future: Future[void], loc: ptr SrcLoc) = if not(future.cancelled()): - checkFinished(FutureBase(future), loc) + checkFinished(future, loc) doAssert(isNil(future.error)) future.finish(FutureState.Completed) @@ -256,7 +253,7 @@ template complete*(future: Future[void]) = proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = if not(future.cancelled()): - checkFinished(FutureBase(future), loc) + checkFinished(future, loc) future.error = error when chronosStackTrace: future.errorStackTrace = if getStackTrace(error) == "": @@ -281,7 +278,7 @@ proc cancelAndSchedule(future: FutureBase, loc: ptr SrcLoc) = future.finish(FutureState.Cancelled) template cancelAndSchedule*(future: FutureBase) = - cancelAndSchedule(FutureBase(future), getSrcLocation()) + cancelAndSchedule(future, getSrcLocation()) proc cancel(future: FutureBase, loc: ptr SrcLoc): bool = ## Request that Future ``future`` cancel itself. @@ -302,8 +299,14 @@ proc cancel(future: FutureBase, loc: ptr SrcLoc): bool = return false if not(isNil(future.child)): + # If you hit this assertion, you should have used the `CancelledError` + # mechanism and/or use a regular `addCallback` + doAssert future.cancelcb.isNil, + "futures returned from `{.async.}` functions must not use `cancelCallback`" + if cancel(future.child, getSrcLocation()): return true + else: if not(isNil(future.cancelcb)): future.cancelcb(cast[pointer](future)) @@ -328,8 +331,7 @@ proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer) = if future.finished(): callSoon(cb, udata) else: - let acb = AsyncCallback(function: cb, udata: udata) - future.callbacks.add acb + future.callbacks.add AsyncCallback(function: cb, udata: udata) proc addCallback*(future: FutureBase, cb: CallbackFunc) = ## Adds the callbacks proc to be called when the future completes. @@ -370,32 +372,51 @@ proc `callback=`*(future: FutureBase, cb: CallbackFunc) = proc `cancelCallback=`*(future: FutureBase, cb: CallbackFunc) = ## Sets the callback procedure to be called when the future is cancelled. ## - ## This callback will be called immediately as ``future.cancel()`` invoked. + ## This callback will be called immediately as ``future.cancel()`` invoked and + ## must be set before future is finished. + + doAssert not future.finished(), + "cancellation callback must be set before finishing the future" future.cancelcb = cb {.push stackTrace: off.} -proc internalContinue[T](fut: pointer) {.gcsafe, raises: [Defect].} +proc futureContinue*(fut: FutureBase) {.raises: [Defect], gcsafe.} -proc futureContinue*[T](fut: Future[T]) {.gcsafe, raises: [Defect].} = - # Used internally by async transformation +proc internalContinue(fut: pointer) {.raises: [Defect], gcsafe.} = + let asFut = cast[FutureBase](fut) + GC_unref(asFut) + futureContinue(asFut) + +proc futureContinue*(fut: FutureBase) {.raises: [Defect], gcsafe.} = + # This function is responsible for calling the closure iterator generated by + # the `{.async.}` transformation either until it has completed its iteration + # or raised and error / been cancelled. + # + # Every call to an `{.async.}` proc is redirected to call this function + # instead with its original body captured in `fut.closure`. + var next: FutureBase try: - if not(fut.closure.finished()): - var next = fut.closure(fut) - # Continue while the yielded future is already finished. - while (not next.isNil()) and next.finished(): - next = fut.closure(fut) - if fut.closure.finished(): - break + while true: + # Call closure to make progress on `fut` until it reaches `yield` (inside + # `await` typically) or completes / fails / is cancelled + next = fut.closure(fut) + if fut.closure.finished(): # Reached the end of the transformed proc + break - if fut.closure.finished(): - fut.closure = nil if next == nil: - if not(fut.finished()): - raiseAssert "Async procedure (" & ($fut.location[LocCreateIndex]) & ") yielded `nil`, " & - "are you await'ing a `nil` Future?" - else: + raiseAssert "Async procedure (" & ($fut.location[LocCreateIndex]) & + ") yielded `nil`, are you await'ing a `nil` Future?" + + if not next.finished(): + # We cannot make progress on `fut` until `next` has finished - schedule + # `fut` to continue running when that happens GC_ref(fut) - next.addCallback(internalContinue[T], cast[pointer](fut)) + next.addCallback(CallbackFunc(internalContinue), cast[pointer](fut)) + + # return here so that we don't remove the closure below + return + + # Continue while the yielded future is already finished. except CancelledError: fut.cancelAndSchedule() except CatchableError as exc: @@ -405,11 +426,13 @@ proc futureContinue*[T](fut: Future[T]) {.gcsafe, raises: [Defect].} = raise (ref Defect)(exc) fut.fail((ref ValueError)(msg: exc.msg, parent: exc)) + finally: + next = nil # GC hygiene -proc internalContinue[T](fut: pointer) {.gcsafe, raises: [Defect].} = - let asFut = cast[Future[T]](fut) - GC_unref(asFut) - futureContinue(asFut) + # `futureContinue` will not be called any more for this future so we can + # clean it up + fut.closure = nil + fut.child = nil {.pop.} @@ -845,9 +868,6 @@ proc cancelAndWait*(fut: FutureBase): Future[void] = fut.cancel() return retFuture -proc cancelAndWait*[T](fut: Future[T]): Future[void] = - cancelAndWait(FutureBase(fut)) - proc allFutures*(futs: varargs[FutureBase]): Future[void] = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. @@ -896,7 +916,7 @@ proc allFutures*[T](futs: varargs[Future[T]]): Future[void] = # Because we can't capture varargs[T] in closures we need to create copy. var nfuts: seq[FutureBase] for future in futs: - nfuts.add(FutureBase(future)) + nfuts.add(future) allFutures(nfuts) proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] = diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index 9da84b31c..f5b3570ce 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -123,7 +123,13 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = let internalFutureSym = ident "chronosInternalRetFuture" - procBody = prc.body.processBody(internalFutureSym, baseTypeIsVoid) + internalFutureType = + if baseTypeIsVoid: + newNimNode(nnkBracketExpr, prc).add(newIdentNode("Future")).add(newIdentNode("void")) + else: returnType + castFutureSym = quote do: + cast[`internalFutureType`](`internalFutureSym`) + procBody = prc.body.processBody(castFutureSym, baseTypeIsVoid) # don't do anything with forward bodies (empty) if procBody.kind != nnkEmpty: @@ -139,7 +145,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = " a void async proc".} # -> complete(chronosInternalRetFuture) let complete = - newCall(newIdentNode("complete"), internalFutureSym) + newCall(newIdentNode("complete"), castFutureSym) newStmtList(resultTemplate, procBodyBlck, complete) else: @@ -168,28 +174,20 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = # -> complete(chronosInternalRetFuture, result) newCall(newIdentNode("complete"), - internalFutureSym, newIdentNode("result"))) + castFutureSym, newIdentNode("result"))) let - internalFutureType = - if baseTypeIsVoid: - newNimNode(nnkBracketExpr, prc).add(newIdentNode("Future")).add(newIdentNode("void")) - else: returnType - internalFutureParameter = nnkIdentDefs.newTree(internalFutureSym, internalFutureType, newEmptyNode()) + internalFutureParameter = nnkIdentDefs.newTree(internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) iteratorNameSym = genSym(nskIterator, $prcName) closureIterator = newProc(iteratorNameSym, [newIdentNode("FutureBase"), internalFutureParameter], closureBody, nnkIteratorDef) + iteratorNameSym.copyLineInfo(prc) + closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body) closureIterator.addPragma(newIdentNode("closure")) - # **Remark 435**: We generate a proc with an inner iterator which call each other - # recursively. The current Nim compiler is not smart enough to infer - # the `gcsafe`-ty aspect of this setup, so we always annotate it explicitly - # with `gcsafe`. This means that the client code is always enforced to be - # `gcsafe`. This is still **safe**, the compiler still checks for `gcsafe`-ty - # regardless, it is only helping the compiler's inference algorithm. See - # https://github.com/nim-lang/RFCs/issues/435 - # for more details. + + # `async` code must be gcsafe closureIterator.addPragma(newIdentNode("gcsafe")) # TODO when push raises is active in a module, the iterator here inherits @@ -211,9 +209,11 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = )) # If proc has an explicit gcsafe pragma, we add it to iterator as well. + # TODO if these lines are not here, srcloc tests fail (!) if prc.pragma.findChild(it.kind in {nnkSym, nnkIdent} and it.strVal == "gcsafe") != nil: closureIterator.addPragma(newIdentNode("gcsafe")) + outerProcBody.add(closureIterator) # -> let resultFuture = newFuture[T]() @@ -264,7 +264,6 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = )) if baseTypeIsVoid: - # Add discardable pragma. if returnType.kind == nnkEmpty: # Add Future[void] prc.params2[0] = @@ -276,47 +275,30 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = template await*[T](f: Future[T]): untyped = when declared(chronosInternalRetFuture): - #work around https://github.com/nim-lang/Nim/issues/19193 - when not declaredInScope(chronosInternalTmpFuture): - var chronosInternalTmpFuture {.inject.}: FutureBase = f - else: - chronosInternalTmpFuture = f - chronosInternalRetFuture.child = chronosInternalTmpFuture - - # This "yield" is meant for a closure iterator in the caller. - yield chronosInternalTmpFuture - - # By the time we get control back here, we're guaranteed that the Future we - # just yielded has been completed (success, failure or cancellation), - # through a very complicated mechanism in which the caller proc (a regular - # closure) adds itself as a callback to chronosInternalTmpFuture. - # - # Callbacks are called only after completion and a copy of the closure - # iterator that calls this template is still in that callback's closure - # environment. That's where control actually gets back to us. - - chronosInternalRetFuture.child = nil + chronosInternalRetFuture.child = f + # `futureContinue` calls the iterator generated by the `async` + # transformation - `yield` gives control back to `futureContinue` which is + # responsible for resuming execution once the yielded future is finished + yield chronosInternalRetFuture.child + + # `child` is guaranteed to have been `finished` after the yield if chronosInternalRetFuture.mustCancel: raise newCancelledError() - chronosInternalTmpFuture.internalCheckComplete() + + # `child` released by `futureContinue` + chronosInternalRetFuture.child.internalCheckComplete() when T isnot void: - cast[type(f)](chronosInternalTmpFuture).internalRead() + cast[type(f)](chronosInternalRetFuture.child).internalRead() else: unsupported "await is only available within {.async.}" template awaitne*[T](f: Future[T]): Future[T] = when declared(chronosInternalRetFuture): - #work around https://github.com/nim-lang/Nim/issues/19193 - when not declaredInScope(chronosInternalTmpFuture): - var chronosInternalTmpFuture {.inject.}: FutureBase = f - else: - chronosInternalTmpFuture = f - chronosInternalRetFuture.child = chronosInternalTmpFuture - yield chronosInternalTmpFuture - chronosInternalRetFuture.child = nil + chronosInternalRetFuture.child = f + yield chronosInternalRetFuture.child if chronosInternalRetFuture.mustCancel: raise newCancelledError() - cast[type(f)](chronosInternalTmpFuture) + cast[type(f)](chronosInternalRetFuture.child) else: unsupported "awaitne is only available within {.async.}" From 5be223e92c441ea30b96f928ac13fcae78e4bb90 Mon Sep 17 00:00:00 2001 From: gpicron Date: Thu, 1 Jun 2023 16:02:33 +0200 Subject: [PATCH 025/146] allow reuse of port for UDP (#379) --- .gitignore | 1 + chronos/transports/datagram.nim | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/.gitignore b/.gitignore index db41c852c..c63155181 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ nimcache/ nimble.develop nimble.paths /build/ +nimbledeps diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index a642b594c..0fa1a0e65 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -318,6 +318,13 @@ when defined(windows): closeSocket(localSock) raiseTransportOsError(err) + if ServerFlags.ReusePort in flags: + if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEPORT, 1): + let err = osLastError() + if sock == asyncInvalidSocket: + closeSocket(localSock) + raiseTransportOsError(err) + if ServerFlags.Broadcast in flags: if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_BROADCAST, 1): let err = osLastError() @@ -524,6 +531,13 @@ else: closeSocket(localSock) raiseTransportOsError(err) + if ServerFlags.ReusePort in flags: + if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEPORT, 1): + let err = osLastError() + if sock == asyncInvalidSocket: + closeSocket(localSock) + raiseTransportOsError(err) + if ServerFlags.Broadcast in flags: if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_BROADCAST, 1): let err = osLastError() From 315a27236c8ea34fb8f96d725892610ddcdf206d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 1 Jun 2023 18:09:49 +0300 Subject: [PATCH 026/146] Recover cancellation cleanup for AsyncEvent wait(). (#398) * Recover cancellation cleanup for AsyncEvent wait(). * Address review comments. --- chronos/asyncsync.nim | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index 4a0a13ec7..a48002c4e 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -223,12 +223,15 @@ proc wait*(event: AsyncEvent): Future[void] = ## If the internal flag is `true` on entry, return immediately. Otherwise, ## block until another task calls `fire()` to set the flag to `true`, ## then return. - var w = newFuture[void]("AsyncEvent.wait") + let retFuture = newFuture[void]("AsyncEvent.wait") + proc cancellation(udata: pointer) {.gcsafe, raises: [Defect].} = + event.waiters.keepItIf(it != retFuture) if not(event.flag): - event.waiters.add(w) + retFuture.cancelCallback = cancellation + event.waiters.add(retFuture) else: - w.complete() - w + retFuture.complete() + retFuture proc fire*(event: AsyncEvent) = ## Set the internal flag of ``event`` to `true`. All tasks waiting for it From 02fda01bf260a16d70e5b827819b5314542d5ecd Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 2 Jun 2023 01:53:20 +0300 Subject: [PATCH 027/146] Add facility to emulate signals on Windows. (#357) * Initial Windows asyncproc part. Deprecate usage of net/nativesockets in handles, asyncloop. Introduce primitives with inheritable flag. * Some posix fixes. * More duplicate fixes. * Fix AsyncProcessOption.EchoCommand option. Remove isMainModule code. Fix execCommand to use AsyncProcessOption.EvalCommand. * Initial asyncproc tests commit. * Some Posix fixes. * Fix Posix crash. * Add testsuite. Fix osnet.nim to use proper declarations. Fix timer.nim to use proper declarations. Add Windows environment handling procedures. Fix createAsyncPipe. Add leaks tracking for AsyncProcessRef. * Fix O_CLOEXEC constant value. * Add file descriptors leak test. * Remove commented code. Refactor exceptions. Fix compilation warnings. * No exception ioselectors_kqueue initial commit. * Some BSD fixes. Linux refactoring of selectors.nim. * Some fixes to move further. * Last Linux fixes. * Fixes for asyncloop to use 2nd version of selectors api. * Add osutils.nim. * Some fixes. * Hardening resumeRead(), resumeWrite() and consumers. Add ESRCH handling. Introduce no-exception fromPipe2. * Make Windows part exception-free and fix zombie race issue. * createStreamServer() fixes. * Upgrade asyncproc to use non-exception primitives. Fix ioselectors_kqueue to use closeFd(). * Refactor accept() and acceptLoop() to be exception free. * Deprecated some `result` usage. Last fixes to make stream.nim exception free. Use closeFd(). Refactor some loops to use handleEintr(). * Fix connect() forgot to unregister socket on error. * All createAsyncSocket() sockets should be closed with unregisterAndCloseFd(). * Attempt to fix posix bug with incomplete output. * Change number of runs in big test. * Refactoring pipes creation. Attempt to fix "write error: Resource temporarily unavailable". * Fix waitForExit(duration) code. Fix test exit code. * Fix Windows missing SIGTERM. * Fix mistype. * Fix compilation error. * Attempt to fix Nim 1.6 issue. * Eliminate Nim's WideCString usage to avoid any problems in future. * Deprecate posix usage in osnet. * Eliminate unused imports. * Some debugging statements for investigation. * Remove all the debugging statements. * Remove testhelpers in favor of unittest2/asynctests. * Fix flaky test. * Make test more resilient to timings. * Add memory size dump to CI. * Refactor some blocks to use valueOr. Make waitForExit to use kill() instead of terminate(). * Remove memory dumps. * Fix peekProcessExitCode() blocks issue. * Fix Windows issue. * Add some debugging echoes. * Fix compilation error. * Add some debugging echoes. * Add more helpers. * Fix compilation error. * waitForExit() is primary suspect. * Remove fast-path for waitForExit. Remove loop for reading signal events. * Remove all debugging echoes. * Return one debugging echo. * Fix futures tests. * Add test for multiple processes waiting to attempt stress test. * Refactor ioselectors_epoll for better signalfd and process monitoring. Add more race condition fixes to waitForExit. Fix some Nim 1.6 warnings. * Fix after rebase issues and warnings. * Fix style issues. Fix different Nim versions issues. Workaround `signalfd` style issues. * Add one more Linux signals workaround. Add one more multiple processes test. * Windows fixes. * Remove unixPlatform define. Fix WSAECONNABORTED for devel. * Temporarily disable rate limit tests. Fix more devel issues. * Deprecate `hasThreadSupport` for ioselectors_kqueue. Fix verifySelectParams issue. Add exit codes test for multiple processes. Fix osnet PtrToCstringConv warning. * ioselectors_kqueue refactoring. * Initial commit. * Fix 1.2-1.4 compilation issue. * Fix unused warning for testCtrlC() test. * Post-rebase fixes. * Restore master files. * More fixes. * Remove duplicates. * Fix style mistake. * Add more flexible pragmas. --- chronos/asyncloop.nim | 158 ++++++++++++++++++++++++++-------- chronos/handles.nim | 2 +- chronos/osdefs.nim | 46 +++++++++- chronos/oserrno.nim | 2 + chronos/osutils.nim | 117 ++++++++++++++++++++++++- chronos/transports/stream.nim | 6 +- tests/testsignal.nim | 95 ++++++++++++-------- 7 files changed, 344 insertions(+), 82 deletions(-) diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index ff2f07946..8c9b62612 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -10,8 +10,10 @@ when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} + {.pragma: callbackFunc, stdcall, gcsafe, raises: [Defect].} else: {.push raises: [].} + {.pragma: callbackFunc, stdcall, gcsafe, raises: [].} from nativesockets import Port import std/[tables, strutils, heapqueue, deques] @@ -289,6 +291,7 @@ func toException*(v: OSErrorCode): ref OSError = newOSError(v) # Result[T, OSErrorCode] values. when defined(windows): + export SIGINT, SIGQUIT, SIGTERM type CompletionKey = ULONG_PTR @@ -301,11 +304,8 @@ when defined(windows): CustomOverlapped* = object of OVERLAPPED data*: CompletionData - OVERLAPPED_ENTRY* = object - lpCompletionKey*: ULONG_PTR - lpOverlapped*: ptr CustomOverlapped - internal: ULONG_PTR - dwNumberOfBytesTransferred: DWORD + DispatcherFlag* = enum + SignalHandlerInstalled PDispatcher* = ref object of PDispatcherBase ioPort: HANDLE @@ -315,6 +315,7 @@ when defined(windows): getAcceptExSockAddrs*: WSAPROC_GETACCEPTEXSOCKADDRS transmitFile*: WSAPROC_TRANSMITFILE getQueuedCompletionStatusEx*: LPFN_GETQUEUEDCOMPLETIONSTATUSEX + flags: set[DispatcherFlag] PtrCustomOverlapped* = ptr CustomOverlapped @@ -330,6 +331,7 @@ when defined(windows): WaitableHandle* = ref PostCallbackData ProcessHandle* = distinct WaitableHandle + SignalHandle* = distinct WaitableHandle WaitableResult* {.pure.} = enum Ok, Timeout @@ -444,7 +446,7 @@ when defined(windows): {.push stackTrace: off.} proc waitableCallback(param: pointer, timerOrWaitFired: WINBOOL) {. - stdcall, gcsafe.} = + callbackFunc.} = # This procedure will be executed in `wait thread`, so it must not use # GC related objects. # We going to ignore callbacks which was spawned when `isNil(param) == true` @@ -577,6 +579,86 @@ when defined(windows): ## Remove process' watching using process' descriptor ``procHandle``. removeProcess2(procHandle).tryGet() + {.push stackTrace: off.} + proc consoleCtrlEventHandler(dwCtrlType: DWORD): uint32 {.callbackFunc.} = + ## This procedure will be executed in different thread, so it MUST not use + ## any GC related features (strings, seqs, echo etc.). + case dwCtrlType + of CTRL_C_EVENT: + return + (if raiseSignal(SIGINT).valueOr(false): TRUE else: FALSE) + of CTRL_BREAK_EVENT: + return + (if raiseSignal(SIGINT).valueOr(false): TRUE else: FALSE) + of CTRL_CLOSE_EVENT: + return + (if raiseSignal(SIGTERM).valueOr(false): TRUE else: FALSE) + of CTRL_LOGOFF_EVENT: + return + (if raiseSignal(SIGQUIT).valueOr(false): TRUE else: FALSE) + else: + FALSE + {.pop.} + + proc addSignal2*(signal: int, cb: CallbackFunc, + udata: pointer = nil): Result[SignalHandle, OSErrorCode] = + ## Start watching signal ``signal``, and when signal appears, call the + ## callback ``cb`` with specified argument ``udata``. Returns signal + ## identifier code, which can be used to remove signal callback + ## via ``removeSignal``. + ## + ## NOTE: On Windows only subset of signals are supported: SIGINT, SIGTERM, + ## SIGQUIT + const supportedSignals = [SIGINT, SIGTERM, SIGQUIT] + doAssert(cint(signal) in supportedSignals, "Signal is not supported") + let loop = getThreadDispatcher() + var hWait: WaitableHandle = nil + + proc continuation(ucdata: pointer) {.gcsafe.} = + doAssert(not(isNil(ucdata))) + doAssert(not(isNil(hWait))) + cb(hWait[].udata) + + if SignalHandlerInstalled notin loop.flags: + if getConsoleCP() != 0'u32: + # Console application, we going to cleanup Nim default signal handlers. + if setConsoleCtrlHandler(consoleCtrlEventHandler, TRUE) == FALSE: + return err(osLastError()) + loop.flags.incl(SignalHandlerInstalled) + else: + return err(ERROR_NOT_SUPPORTED) + + let + flags = WT_EXECUTEINWAITTHREAD + hEvent = ? openEvent($getSignalName(signal)) + + hWait = registerWaitable(hEvent, flags, InfiniteDuration, + continuation, udata).valueOr: + discard closeFd(hEvent) + return err(error) + ok(SignalHandle(hWait)) + + proc removeSignal2*(signalHandle: SignalHandle): Result[void, OSErrorCode] = + ## Remove watching signal ``signal``. + ? closeWaitable(WaitableHandle(signalHandle)) + ok() + + proc addSignal*(signal: int, cb: CallbackFunc, + udata: pointer = nil): SignalHandle {. + raises: [Defect, ValueError].} = + ## Registers callback ``cb`` to be called when signal ``signal`` will be + ## raised. Returns signal identifier, which can be used to clear signal + ## callback via ``removeSignal``. + addSignal2(signal, cb, udata).valueOr: + raise newException(ValueError, osErrorMsg(error)) + + proc removeSignal*(signalHandle: SignalHandle) {. + raises: [Defect, ValueError].} = + ## Remove signal's watching using signal descriptor ``signalfd``. + let res = removeSignal2(signalHandle) + if res.isErr(): + raise newException(ValueError, osErrorMsg(res.error())) + proc poll*() = ## Perform single asynchronous step, processing timers and completing ## tasks. Blocks until at least one event has completed. @@ -870,7 +952,6 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or ## Please note, that socket is not closed immediately. To avoid bugs with ## closing socket, while operation pending, socket will be closed as ## soon as all pending operations will be notified. - ## You can execute ``aftercb`` before actual socket close operation. let loop = getThreadDispatcher() proc continuation(udata: pointer) = @@ -1168,41 +1249,44 @@ proc callIdle*(cbproc: CallbackFunc) = include asyncfutures2 -when not(defined(windows)): - when asyncEventEngine in ["epoll", "kqueue"]: - proc waitSignal*(signal: int): Future[void] {.raises: [Defect].} = - var retFuture = newFuture[void]("chronos.waitSignal()") - var signalHandle: Opt[SignalHandle] - - template getSignalException(e: OSErrorCode): untyped = - newException(AsyncError, "Could not manipulate signal handler, " & - "reason [" & $int(e) & "]: " & osErrorMsg(e)) - - proc continuation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - if signalHandle.isSome(): - let res = removeSignal2(signalHandle.get()) - if res.isErr(): - retFuture.fail(getSignalException(res.error())) - else: - retFuture.complete() - proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - if signalHandle.isSome(): - let res = removeSignal2(signalHandle.get()) - if res.isErr(): - retFuture.fail(getSignalException(res.error())) +when defined(macosx) or defined(macos) or defined(freebsd) or + defined(netbsd) or defined(openbsd) or defined(dragonfly) or + defined(linux) or defined(windows): - signalHandle = - block: - let res = addSignal2(signal, continuation) + proc waitSignal*(signal: int): Future[void] {.raises: [Defect].} = + var retFuture = newFuture[void]("chronos.waitSignal()") + var signalHandle: Opt[SignalHandle] + + template getSignalException(e: OSErrorCode): untyped = + newException(AsyncError, "Could not manipulate signal handler, " & + "reason [" & $int(e) & "]: " & osErrorMsg(e)) + + proc continuation(udata: pointer) {.gcsafe.} = + if not(retFuture.finished()): + if signalHandle.isSome(): + let res = removeSignal2(signalHandle.get()) if res.isErr(): retFuture.fail(getSignalException(res.error())) - Opt.some(res.get()) + else: + retFuture.complete() - retFuture.cancelCallback = cancellation - retFuture + proc cancellation(udata: pointer) {.gcsafe.} = + if not(retFuture.finished()): + if signalHandle.isSome(): + let res = removeSignal2(signalHandle.get()) + if res.isErr(): + retFuture.fail(getSignalException(res.error())) + + signalHandle = + block: + let res = addSignal2(signal, continuation) + if res.isErr(): + retFuture.fail(getSignalException(res.error())) + Opt.some(res.get()) + + retFuture.cancelCallback = cancellation + retFuture proc sleepAsync*(duration: Duration): Future[void] = ## Suspends the execution of the current async procedure for the next diff --git a/chronos/handles.nim b/chronos/handles.nim index ee12ab1fe..65b31a439 100644 --- a/chronos/handles.nim +++ b/chronos/handles.nim @@ -15,7 +15,7 @@ else: import "."/[asyncloop, osdefs, osutils] import stew/results from nativesockets import Domain, Protocol, SockType, toInt -export Domain, Protocol, SockType, results +export Domain, Protocol, SockType, results, osutils const asyncInvalidSocket* = AsyncFD(osdefs.INVALID_SOCKET) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 39bd947da..92773d072 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -61,6 +61,12 @@ when defined(windows): INADDR_BROADCAST* = 0xffff_ffff'u32 INADDR_NONE* = 0xffff_ffff'u32 + CTRL_C_EVENT* = 0'u32 + CTRL_BREAK_EVENT* = 1'u32 + CTRL_CLOSE_EVENT* = 2'u32 + CTRL_LOGOFF_EVENT* = 5'u32 + CTRL_SHUTDOWN_EVENT* = 6'u32 + WAIT_ABANDONED* = 0x80'u32 WAIT_OBJECT_0* = 0x00'u32 WAIT_TIMEOUT* = 0x102'u32 @@ -299,7 +305,10 @@ when defined(windows): POVERLAPPED_COMPLETION_ROUTINE* = proc (para1: DWORD, para2: DWORD, para3: POVERLAPPED) {. - stdcall, gcsafe, raises: [].} + stdcall, gcsafe, raises: [].} + + PHANDLER_ROUTINE* = proc (dwCtrlType: DWORD): WINBOOL {. + stdcall, gcsafe, raises: [Defect].} OSVERSIONINFO* {.final, pure.} = object dwOSVersionInfoSize*: DWORD @@ -494,6 +503,8 @@ when defined(windows): dwMilliseconds: DWORD, fAlertable: WINBOOL): WINBOOL {. stdcall, gcsafe, raises: [].} + WindowsSigHandler = proc (a: cint) {.noconv, raises: [], gcsafe.} + proc getVersionEx*(lpVersionInfo: ptr OSVERSIONINFO): WINBOOL {. stdcall, dynlib: "kernel32", importc: "GetVersionExW", sideEffect.} @@ -593,6 +604,9 @@ when defined(windows): proc getCurrentProcess*(): HANDLE {. stdcall, dynlib: "kernel32", importc: "GetCurrentProcess", sideEffect.} + proc getCurrentProcessId*(): DWORD {. + stdcall, dynlib: "kernel32", importc: "GetCurrentProcessId", sideEffect.} + proc getSystemTimeAsFileTime*(lpSystemTimeAsFileTime: var FILETIME) {. stdcall, dynlib: "kernel32", importc: "GetSystemTimeAsFileTime", sideEffect.} @@ -710,7 +724,7 @@ when defined(windows): proc createEvent*(lpEventAttributes: ptr SECURITY_ATTRIBUTES, bManualReset: DWORD, bInitialState: DWORD, - lpName: LPWSTR): HANDLE {. + lpName: ptr WCHAR): HANDLE {. stdcall, dynlib: "kernel32", importc: "CreateEventW", sideEffect.} proc setEvent*(hEvent: HANDLE): WINBOOL {. @@ -811,9 +825,37 @@ when defined(windows): proc rtlNtStatusToDosError*(code: uint64): ULONG {. stdcall, dynlib: "ntdll", importc: "RtlNtStatusToDosError", sideEffect.} + proc getConsoleCP*(): UINT {. + stdcall, dynlib: "kernel32", importc: "GetConsoleCP", sideEffect.} + + proc setConsoleCtrlHandler*(handleRoutine: PHANDLER_ROUTINE, + add: WINBOOL): WINBOOL {. + stdcall, dynlib: "kernel32", importc: "SetConsoleCtrlHandler", + sideEffect.} + + proc generateConsoleCtrlEvent*(dwCtrlEvent: DWORD, + dwProcessGroupId: DWORD): WINBOOL {. + stdcall, dynlib: "kernel32", importc: "GenerateConsoleCtrlEvent", + sideEffect.} + proc `==`*(x, y: SocketHandle): bool {.borrow.} proc `==`*(x, y: HANDLE): bool {.borrow.} + proc c_signal*(sign: cint, handler: WindowsSigHandler): WindowsSigHandler {. + importc: "signal", header: "", raises: [], sideEffect.} + + const + SIGABRT* = cint(22) + SIGINT* = cint(2) + SIGQUIT* = cint(3) + SIGTERM* = cint(15) + SIGFPE* = cint(8) + SIGILL* = cint(4) + SIGSEGV* = cint(11) + SIG_DFL* = cast[WindowsSigHandler](0) + SIG_IGN* = cast[WindowsSigHandler](1) + SIG_ERR* = cast[WindowsSigHandler](-1) + proc getSecurityAttributes*(inheritHandle = false): SECURITY_ATTRIBUTES = SECURITY_ATTRIBUTES( nLength: DWORD(sizeof(SECURITY_ATTRIBUTES)), diff --git a/chronos/oserrno.nim b/chronos/oserrno.nim index 5cacb223c..4f1c7658c 100644 --- a/chronos/oserrno.nim +++ b/chronos/oserrno.nim @@ -1315,6 +1315,8 @@ elif defined(windows): ERROR_FILE_NOT_FOUND* = OSErrorCode(2) ERROR_TOO_MANY_OPEN_FILES* = OSErrorCode(4) ERROR_ACCESS_DENIED* = OSErrorCode(5) + ERROR_ALREADY_EXISTS* = OSErrorCode(183) + ERROR_NOT_SUPPORTED* = OSErrorCode(50) ERROR_BROKEN_PIPE* = OSErrorCode(109) ERROR_BUFFER_OVERFLOW* = OSErrorCode(111) ERROR_PIPE_BUSY* = OSErrorCode(231) diff --git a/chronos/osutils.nim b/chronos/osutils.nim index 2ff1072aa..40f76a354 100644 --- a/chronos/osutils.nim +++ b/chronos/osutils.nim @@ -18,7 +18,12 @@ else: when defined(windows) or defined(nimdoc): import stew/base10 - const PipeHeaderName* = r"\\.\pipe\LOCAL\chronos\" + const + PipeHeaderName* = r"\\.\pipe\LOCAL\chronos\" + SignalPrefixName* = cstring(r"Local\chronos-events-") + MaxSignalEventLength* = 64 + MaxSignalSuffixLength* = MaxSignalEventLength - + (len(SignalPrefixName) + Base10.maxLen(uint64) + 2) type DescriptorFlag* {.pure.} = enum @@ -74,6 +79,26 @@ when defined(windows): proc closeFd*(s: HANDLE): int = if osdefs.closeHandle(s) == TRUE: 0 else: -1 + proc toWideBuffer*(s: openArray[char], + d: var openArray[WCHAR]): Result[int, OSErrorCode] = + if len(s) == 0: return ok(0) + let res = multiByteToWideChar(CP_UTF8, 0'u32, unsafeAddr s[0], cint(-1), + addr d[0], cint(len(d))) + if res == 0: + err(osLastError()) + else: + ok(res) + + proc toMultibyteBuffer*(s: openArray[WCHAR], + d: var openArray[char]): Result[int, OSErrorCode] = + if len(s) == 0: return ok(0) + let res = wideCharToMultiByte(CP_UTF8, 0'u32, unsafeAddr s[0], cint(-1), + addr d[0], cint(len(d)), nil, nil) + if res == 0: + err(osLastError()) + else: + ok(res) + proc toWideString*(s: string): Result[LPWSTR, OSErrorCode] = if len(s) == 0: ok(cast[LPWSTR](alloc0(sizeof(WCHAR)))) @@ -209,6 +234,96 @@ when defined(windows): return err(errorCode) ok((read: pipeIn, write: pipeOut)) + proc getSignalName*(signal: int): cstring = + ## Convert Windows SIGNAL identifier to string representation. + ## + ## This procedure supports only SIGINT, SIGTERM and SIGQUIT values. + case signal + of SIGINT: cstring("sigint") + of SIGTERM: cstring("sigterm") + of SIGQUIT: cstring("sigquit") + else: + raiseAssert "Signal is not supported" + + proc getEventPath*(suffix: cstring): array[MaxSignalEventLength, WCHAR] = + ## Create Windows' Event object name suffixed by ``suffix``. This name + ## is create in local session namespace with name like this: + ## ``Local\chronos-events--``. + ## + ## This procedure is GC-free, so it could be used in other threads. + doAssert(len(suffix) < MaxSignalSuffixLength) + var + resMc: array[MaxSignalEventLength, char] + resWc: array[MaxSignalEventLength, WCHAR] + + var offset = 0 + let + pid = osdefs.getCurrentProcessId() + pid10 = Base10.toBytes(uint64(pid)) + copyMem(addr resMc[offset], SignalPrefixName, len(SignalPrefixName)) + offset += len(SignalPrefixName) + copyMem(addr resMc[offset], unsafeAddr pid10.data[0], pid10.len) + offset += pid10.len + resMc[offset] = '-' + offset += 1 + copyMem(addr resMc[offset], suffix, len(suffix)) + offset += len(suffix) + resMc[offset] = '\x00' + let res = toWideBuffer(resMc, resWc) + if res.isErr(): + raiseAssert "Invalid suffix value, got " & osErrorMsg(res.error()) + resWc + + proc raiseEvent(suffix: cstring): Result[bool, OSErrorCode] = + var sa = getSecurityAttributes() + let + eventName = getEventPath(suffix) + # We going to fire event, so we can try to create it already signaled. + event = createEvent(addr sa, FALSE, TRUE, unsafeAddr eventName[0]) + errorCode = osLastError() + + if event == HANDLE(0): + err(errorCode) + else: + if errorCode == ERROR_ALREADY_EXISTS: + let res = setEvent(event) + if res == FALSE: + err(osLastError()) + else: + ok(true) + else: + ok(false) + + proc raiseSignal*(signal: cint): Result[bool, OSErrorCode] = + ## This is helper procedure which could help to raise Unix signals in + ## Windows GUI / Service application. Console applications are handled + ## automatically. + ## + ## This procedure does not use Nim's GC, so it can be placed in any handler + ## of your application even in code which is running in different thread. + raiseEvent(getSignalName(signal)) + + proc raiseConsoleCtrlSignal*(groupId = 0'u32): Result[void, OSErrorCode] = + ## Raise CTRL+C event in current console. + if generateConsoleCtrlEvent(CTRL_C_EVENT, groupId) == FALSE: + err(osLastError()) + else: + ok() + + proc openEvent*(suffix: string): Result[HANDLE, OSErrorCode] = + ## Open or create Windows event object with suffix ``suffix``. + var sa = getSecurityAttributes() + let + # We going to wait for created event, so we don't need to create it in + # signaled state. + eventName = getEventPath(suffix) + event = createEvent(addr sa, FALSE, FALSE, unsafeAddr eventName[0]) + if event == HANDLE(0): + let errorCode = osLastError() + err(errorCode) + else: + ok(event) + else: template handleEintr*(body: untyped): untyped = diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 1b28b1deb..260d21eed 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1896,7 +1896,7 @@ proc close*(server: StreamServer) = proc closeWait*(server: StreamServer): Future[void] = ## Close server ``server`` and release all resources. server.close() - result = server.join() + server.join() proc createStreamServer*(host: TransportAddress, cbproc: StreamCallback, @@ -2055,7 +2055,7 @@ proc createStreamServer*(host: TransportAddress, addr slen) != 0: let err = osLastError() if sock == asyncInvalidSocket: - serverSocket.closeSocket() + discard unregisterAndCloseFd(serverSocket) raiseTransportOsError(err) fromSAddr(addr saddr, slen, localAddress) @@ -2150,7 +2150,7 @@ proc createStreamServer*[T](host: TransportAddress, proc getUserData*[T](server: StreamServer): T {.inline.} = ## Obtain user data stored in ``server`` object. - result = cast[T](server.udata) + cast[T](server.udata) template fastWrite(transp: auto, pbytes: var ptr byte, rbytes: var int, nbytes: int) = diff --git a/tests/testsignal.nim b/tests/testsignal.nim index 0bcf79333..4114809ff 100644 --- a/tests/testsignal.nim +++ b/tests/testsignal.nim @@ -6,7 +6,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import unittest2 -import ../chronos +import ../chronos, ../chronos/oserrno {.used.} @@ -14,57 +14,76 @@ when not defined(windows): import posix suite "Signal handling test suite": - when not defined(windows): + proc testSignal(signal, value: int): Future[bool] {.async.} = var signalCounter = 0 - sigfd: SignalHandle + sigFd: SignalHandle + handlerFut = newFuture[void]("signal.handler") - proc signalProc(udata: pointer) = + proc signalHandler(udata: pointer) {.gcsafe.} = signalCounter = cast[int](udata) - try: - removeSignal(sigfd) - except Exception as exc: - raiseAssert exc.msg + let res = removeSignal2(sigFd) + if res.isErr(): + handlerFut.fail(newException(ValueError, osErrorMsg(res.error()))) + else: + handlerFut.complete() - proc asyncProc() {.async.} = - await sleepAsync(500.milliseconds) + sigFd = + block: + let res = addSignal2(signal, signalHandler, cast[pointer](value)) + if res.isErr(): + raiseAssert osErrorMsg(res.error()) + res.get() - proc test(signal, value: int): bool = - try: - sigfd = addSignal(signal, signalProc, cast[pointer](value)) - except Exception as exc: - raiseAssert exc.msg - var fut = asyncProc() + when defined(windows): + discard raiseSignal(cint(signal)) + else: discard posix.kill(posix.getpid(), cint(signal)) - waitFor(fut) - signalCounter == value - proc testWait(signal: int): bool = - var fut = waitSignal(signal) + await handlerFut.wait(5.seconds) + return signalCounter == value + + proc testWait(signal: int): Future[bool] {.async.} = + var fut = waitSignal(signal) + when defined(windows): + discard raiseSignal(cint(signal)) + else: discard posix.kill(posix.getpid(), cint(signal)) - waitFor(fut) - true + await fut.wait(5.seconds) + return true + + when defined(windows): + proc testCtrlC(): Future[bool] {.async, used.} = + var fut = waitSignal(SIGINT) + let res = raiseConsoleCtrlSignal() + if res.isErr(): + raiseAssert osErrorMsg(res.error()) + await fut.wait(5.seconds) + return true test "SIGINT test": - when not defined(windows): - check test(SIGINT, 31337) == true - else: - skip() + let res = waitFor testSignal(SIGINT, 31337) + check res == true test "SIGTERM test": - when defined(windows): - skip() - else: - check test(SIGTERM, 65537) == true + let res = waitFor testSignal(SIGTERM, 65537) + check res == true test "waitSignal(SIGINT) test": - when defined(windows): - skip() - else: - check testWait(SIGINT) == true + let res = waitFor testWait(SIGINT) + check res == true test "waitSignal(SIGTERM) test": - when defined(windows): - skip() - else: - check testWait(SIGTERM) == true + let res = waitFor testWait(SIGTERM) + check res == true + + # This test doesn't work well in test suite, because it generates CTRL+C + # event in Windows console, parent process receives this signal and stops + # test suite execution. + + # test "Windows [CTRL+C] test": + # when defined(windows): + # let res = waitFor testCtrlC() + # check res == true + # else: + # skip() From 6e80f16e0cd9dc4c4728f3996e616b75c3561ee2 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 3 Jun 2023 19:36:06 +0300 Subject: [PATCH 028/146] Fix recently introduced imports poisoning. (#400) * Fix import poison introduced. * Move export to handles, and annotate it. --- chronos/handles.nim | 5 ++++- chronos/osutils.nim | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/chronos/handles.nim b/chronos/handles.nim index 65b31a439..1dcb29db0 100644 --- a/chronos/handles.nim +++ b/chronos/handles.nim @@ -15,7 +15,10 @@ else: import "."/[asyncloop, osdefs, osutils] import stew/results from nativesockets import Domain, Protocol, SockType, toInt -export Domain, Protocol, SockType, results, osutils +export Domain, Protocol, SockType, results + +when defined(windows): + export raiseSignal, raiseConsoleCtrlSignal const asyncInvalidSocket* = AsyncFD(osdefs.INVALID_SOCKET) diff --git a/chronos/osutils.nim b/chronos/osutils.nim index 40f76a354..2fd3928e3 100644 --- a/chronos/osutils.nim +++ b/chronos/osutils.nim @@ -9,7 +9,7 @@ import stew/results import osdefs, oserrno -export results, osdefs, oserrno +export results when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} From ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sun, 4 Jun 2023 09:46:04 +0200 Subject: [PATCH 029/146] version 3.2.0 (#399) last version to support Nim 1.2, 1.4 - may receive critical backports but otherwise this is EOL for said Nim versions --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index dfb343c70..6483d845f 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "3.1.0" +version = "3.2.0" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From 2c7774d9828f3b889d63af2a277eecbf0b3134ab Mon Sep 17 00:00:00 2001 From: tersec Date: Sun, 4 Jun 2023 07:51:02 +0000 Subject: [PATCH 030/146] drop support for Nim 1.2, 1.4, add Nim 2.0 to CI testing matrix (#378) * add Nim 2.0 to CI testing matrix * Deprecate 1.2 and 1.4 testing branches. --------- Co-authored-by: cheatfate --- .github/workflows/ci.yml | 2 +- chronos.nimble | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 019f9954c..b78f2a121 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,7 +26,7 @@ jobs: cpu: amd64 #- os: windows #cpu: i386 - branch: [version-1-2, version-1-4, version-1-6, devel] + branch: [version-1-6, version-2-0, devel] include: - target: os: linux diff --git a/chronos.nimble b/chronos.nimble index 6483d845f..6b4ac58a8 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -36,8 +36,10 @@ task test, "Run all tests": "-d:debug -d:chronosPreviewV4", "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", "-d:release", - "-d:release -d:chronosPreviewV4", - ]: run args, "tests/testall" + "-d:release -d:chronosPreviewV4"]: + run args, "tests/testall" + if (NimMajor, NimMinor) > (1, 6): + run args & " --mm:refc", "tests/testall" task test_libbacktrace, "test with libbacktrace": var allArgs = @[ From 157ca4fea57498933418f9b47b91a3a826f6204a Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 5 Jun 2023 13:02:13 +0200 Subject: [PATCH 031/146] Support implicit returns (#401) * based on https://github.com/nim-lang/Nim/pull/21898 * also fixes generic Future[T] where T ends up being `void` --- chronos/asyncmacro2.nim | 208 +++++++++++++++++++++------------------- tests/testmacro.nim | 72 +++++++++++++- 2 files changed, 180 insertions(+), 100 deletions(-) diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index f5b3570ce..a7fc3ba72 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -9,46 +9,73 @@ import std/[macros] -proc skipUntilStmtList(node: NimNode): NimNode {.compileTime.} = - # Skips a nest of StmtList's. - if node[0].kind == nnkStmtList: - skipUntilStmtList(node[0]) +# `quote do` will ruin line numbers so we avoid it using these helpers +proc completeWithResult(fut, baseType: NimNode): NimNode {.compileTime.} = + # when `baseType` is void: + # complete(`fut`) + # else: + # complete(`fut`, result) + if baseType.eqIdent("void"): + # Shortcut if we know baseType at macro expansion time + newCall(ident "complete", fut) else: - node + # `baseType` might be generic and resolve to `void` + nnkWhenStmt.newTree( + nnkElifExpr.newTree( + nnkInfix.newTree(ident "is", baseType, ident "void"), + newCall(ident "complete", fut) + ), + nnkElseExpr.newTree( + newCall(ident "complete", fut, ident "result") + ) + ) + +proc completeWithNode(fut, baseType, node: NimNode): NimNode {.compileTime.} = + # when typeof(`node`) is void: + # `node` # statement / explicit return + # -> completeWithResult(fut, baseType) + # else: # expression / implicit return + # complete(`fut`, `node`) + if node.kind == nnkEmpty: # shortcut when known at macro expanstion time + completeWithResult(fut, baseType) + else: + # Handle both expressions and statements - since the type is not know at + # macro expansion time, we delegate this choice to a later compilation stage + # with `when`. + nnkWhenStmt.newTree( + nnkElifExpr.newTree( + nnkInfix.newTree( + ident "is", nnkTypeOfExpr.newTree(node), ident "void"), + newStmtList( + node, + completeWithResult(fut, baseType) + ) + ), + nnkElseExpr.newTree( + newCall(ident "complete", fut, node) + ) + ) -proc processBody(node, retFutureSym: NimNode, - baseTypeIsVoid: bool): NimNode {.compileTime.} = +proc processBody(node, fut, baseType: NimNode): NimNode {.compileTime.} = #echo(node.treeRepr) - result = node case node.kind of nnkReturnStmt: - result = newNimNode(nnkStmtList, node) - - # As I've painfully found out, the order here really DOES matter. - if node[0].kind == nnkEmpty: - if not baseTypeIsVoid: - result.add newCall(newIdentNode("complete"), retFutureSym, - newIdentNode("result")) - else: - result.add newCall(newIdentNode("complete"), retFutureSym) - else: - let x = node[0].processBody(retFutureSym, baseTypeIsVoid) - if x.kind == nnkYieldStmt: result.add x - else: - result.add newCall(newIdentNode("complete"), retFutureSym, x) + let + res = newNimNode(nnkStmtList, node) + res.add completeWithNode(fut, baseType, processBody(node[0], fut, baseType)) + res.add newNimNode(nnkReturnStmt, node).add(newNilLit()) - result.add newNimNode(nnkReturnStmt, node).add(newNilLit()) - return # Don't process the children of this return stmt + res of RoutineNodes-{nnkTemplateDef}: # skip all the nested procedure definitions - return node - else: discard - - for i in 0 ..< result.len: - # We must not transform nested procedures of any form, otherwise - # `retFutureSym` will be used for all nested procedures as their own - # `retFuture`. - result[i] = processBody(result[i], retFutureSym, baseTypeIsVoid) + node + else: + for i in 0 ..< node.len: + # We must not transform nested procedures of any form, otherwise + # `fut` will be used for all nested procedures as their own + # `retFuture`. + node[i] = processBody(node[i], fut, baseType) + node proc getName(node: NimNode): string {.compileTime.} = case node.kind @@ -63,10 +90,6 @@ proc getName(node: NimNode): string {.compileTime.} = else: error("Unknown name.") -proc verifyReturnType(typeName: string) {.compileTime.} = - if typeName != "Future": - error("Expected return type of 'Future' got '" & typeName & "'") - macro unsupported(s: static[string]): untyped = error s @@ -95,22 +118,25 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = ## The ``async`` macro supports a stmtList holding multiple async procedures. if prc.kind notin {nnkProcTy, nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}: error("Cannot transform " & $prc.kind & " into an async proc." & - " proc/method definition or lambda node expected.") + " proc/method definition or lambda node expected.", prc) let returnType = cleanupOpenSymChoice(prc.params2[0]) # Verify that the return type is a Future[T] let baseType = - if returnType.kind == nnkBracketExpr: - let fut = repr(returnType[0]) - verifyReturnType(fut) - returnType[1] - elif returnType.kind == nnkEmpty: - ident("void") + if returnType.kind == nnkEmpty: + ident "void" + elif not ( + returnType.kind == nnkBracketExpr and eqIdent(returnType[0], "Future")): + error( + "Expected return type of 'Future' got '" & repr(returnType) & "'", prc) + return else: - raiseAssert("Unhandled async return type: " & $prc.kind) + returnType[1] - let baseTypeIsVoid = baseType.eqIdent("void") + let + baseTypeIsVoid = baseType.eqIdent("void") + futureVoidType = nnkBracketExpr.newTree(ident "Future", ident "void") if prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}: let @@ -124,63 +150,50 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = let internalFutureSym = ident "chronosInternalRetFuture" internalFutureType = - if baseTypeIsVoid: - newNimNode(nnkBracketExpr, prc).add(newIdentNode("Future")).add(newIdentNode("void")) + if baseTypeIsVoid: futureVoidType else: returnType - castFutureSym = quote do: - cast[`internalFutureType`](`internalFutureSym`) - procBody = prc.body.processBody(castFutureSym, baseTypeIsVoid) + castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym) + + procBody = prc.body.processBody(castFutureSym, baseType) # don't do anything with forward bodies (empty) if procBody.kind != nnkEmpty: - # fix #13899, `defer` should not escape its original scope - let procBodyBlck = - newStmtList(newTree(nnkBlockStmt, newEmptyNode(), procBody)) - - # Avoid too much quote do to not lose original line numbers - let closureBody = if baseTypeIsVoid: - let resultTemplate = quote do: - template result: auto {.used.} = - {.fatal: "You should not reference the `result` variable inside" & - " a void async proc".} - # -> complete(chronosInternalRetFuture) - let complete = - newCall(newIdentNode("complete"), castFutureSym) - - newStmtList(resultTemplate, procBodyBlck, complete) - else: - # -> iterator nameIter(chronosInternalRetFuture: Future[T]): FutureBase {.closure.} = - # -> {.push warning[resultshadowed]: off.} - # -> var result: T - # -> {.pop.} - # -> - # -> complete(chronosInternalRetFuture, result) - newStmtList( - # -> {.push warning[resultshadowed]: off.} - newNimNode(nnkPragma).add(newIdentNode("push"), - newNimNode(nnkExprColonExpr).add(newNimNode(nnkBracketExpr).add( - newIdentNode("warning"), newIdentNode("resultshadowed")), - newIdentNode("off"))), - - # -> var result: T - newNimNode(nnkVarSection, prc.body).add( - newIdentDefs(newIdentNode("result"), baseType)), - - # -> {.pop.}) - newNimNode(nnkPragma).add( - newIdentNode("pop")), + let + # fix #13899, `defer` should not escape its original scope + procBodyBlck = nnkBlockStmt.newTree(newEmptyNode(), procBody) + + resultDecl = nnkWhenStmt.newTree( + # when `baseType` is void: + nnkElifExpr.newTree( + nnkInfix.newTree(ident "is", baseType, ident "void"), + quote do: + template result: auto {.used.} = + {.fatal: "You should not reference the `result` variable inside" & + " a void async proc".} + ), + # else: + nnkElseExpr.newTree( + newStmtList( + quote do: {.push warning[resultshadowed]: off.}, + # var result: `baseType` + nnkVarSection.newTree( + nnkIdentDefs.newTree(ident "result", baseType, newEmptyNode())), + quote do: {.pop.}, + ) + ) + ) - procBodyBlck, + completeDecl = completeWithNode(castFutureSym, baseType, procBodyBlck) - # -> complete(chronosInternalRetFuture, result) - newCall(newIdentNode("complete"), - castFutureSym, newIdentNode("result"))) + closureBody = newStmtList(resultDecl, completeDecl) - let - internalFutureParameter = nnkIdentDefs.newTree(internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) + internalFutureParameter = nnkIdentDefs.newTree( + internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) iteratorNameSym = genSym(nskIterator, $prcName) - closureIterator = newProc(iteratorNameSym, [newIdentNode("FutureBase"), internalFutureParameter], - closureBody, nnkIteratorDef) + closureIterator = newProc( + iteratorNameSym, + [newIdentNode("FutureBase"), internalFutureParameter], + closureBody, nnkIteratorDef) iteratorNameSym.copyLineInfo(prc) @@ -221,6 +234,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = # doesn't reference it, avoid cyclic ref (#203) let retFutureSym = ident "resultFuture" + retFutureSym.copyLineInfo(prc) # Do not change this code to `quote do` version because `instantiationInfo` # will be broken for `newFuture()` call. outerProcBody.add( @@ -230,7 +244,6 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = newLit(prcName)) ) ) - # -> resultFuture.closure = iterator outerProcBody.add( newAssignment( @@ -266,10 +279,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = if baseTypeIsVoid: if returnType.kind == nnkEmpty: # Add Future[void] - prc.params2[0] = - newNimNode(nnkBracketExpr, prc) - .add(newIdentNode("Future")) - .add(newIdentNode("void")) + prc.params2[0] = futureVoidType prc diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 680a2ba62..2526c5dea 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -5,8 +5,8 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) +import std/[macros, strutils] import unittest2 -import macros import ../chronos {.used.} @@ -123,6 +123,76 @@ suite "Macro transformations test suite": macroAsync2(testMacro2, seq, Opt, Result, OpenObject, cstring) check waitFor(testMacro2()).len == 0 + test "Future with generics": + proc gen(T: typedesc): Future[T] {.async.} = + proc testproc(): Future[T] {.async.} = + when T is void: + return + else: + return default(T) + await testproc() + + waitFor gen(void) + check: + waitFor(gen(int)) == default(int) + + test "Implicit return": + proc implicit(): Future[int] {.async.} = + 42 + + proc implicit2(): Future[int] {.async.} = + block: + 42 + + proc implicit3(): Future[int] {.async.} = + try: + parseInt("error") + except ValueError: + 42 + + proc implicit4(v: bool): Future[int] {.async.} = + case v + of false: 5 + of true: 42 + + proc implicit5(v: bool): Future[int] {.async.} = + if v: 42 + else: 5 + + proc implicit6(v: ref int): Future[int] {.async.} = + try: + parseInt("error") + except ValueError: + 42 + finally: + v[] = 42 + + proc implicit7(v: bool): Future[int] {.async.} = + case v + of false: return 33 + of true: 42 + + proc implicit8(v: bool): Future[int] {.async.} = + case v + of false: await implicit7(v) + of true: 42 + + let fin = new int + check: + waitFor(implicit()) == 42 + waitFor(implicit2()) == 42 + waitFor(implicit3()) == 42 + waitFor(implicit4(true)) == 42 + waitFor(implicit5(true)) == 42 + waitFor(implicit5(false)) == 5 + waitFor(implicit6(fin)) == 42 + fin[] == 42 + waitFor(implicit7(true)) == 42 + waitFor(implicit7(false)) == 33 + + waitFor(implicit8(true)) == 42 + waitFor(implicit8(false)) == 33 + suite "Closure iterator's exception transformation issues": test "Nested defer/finally not called on return": # issue #288 From a6ac5f2213a0720e6d79db89d7e3f89619619769 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 5 Jun 2023 13:03:38 +0200 Subject: [PATCH 032/146] exception warning fixes (#403) --- chronos/asyncfutures2.nim | 37 ++++++++++++++++++++++++------------ chronos/asyncloop.nim | 2 +- chronos/transports/ipnet.nim | 6 +++--- tests/testdatagram.nim | 2 +- tests/testhttpclient.nim | 10 +++++----- tests/testnet.nim | 2 +- tests/teststream.nim | 4 ++-- 7 files changed, 38 insertions(+), 25 deletions(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 37d205142..805f40c20 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -395,7 +395,7 @@ proc futureContinue*(fut: FutureBase) {.raises: [Defect], gcsafe.} = # Every call to an `{.async.}` proc is redirected to call this function # instead with its original body captured in `fut.closure`. var next: FutureBase - try: + template iterate = while true: # Call closure to make progress on `fut` until it reaches `yield` (inside # `await` typically) or completes / fails / is cancelled @@ -417,17 +417,30 @@ proc futureContinue*(fut: FutureBase) {.raises: [Defect], gcsafe.} = return # Continue while the yielded future is already finished. - except CancelledError: - fut.cancelAndSchedule() - except CatchableError as exc: - fut.fail(exc) - except Exception as exc: - if exc of Defect: - raise (ref Defect)(exc) - - fut.fail((ref ValueError)(msg: exc.msg, parent: exc)) - finally: - next = nil # GC hygiene + + when chronosStrictException: + try: + iterate + except CancelledError: + fut.cancelAndSchedule() + except CatchableError as exc: + fut.fail(exc) + finally: + next = nil # GC hygiene + else: + try: + iterate + except CancelledError: + fut.cancelAndSchedule() + except CatchableError as exc: + fut.fail(exc) + except Exception as exc: + if exc of Defect: + raise (ref Defect)(exc) + + fut.fail((ref ValueError)(msg: exc.msg, parent: exc)) + finally: + next = nil # GC hygiene # `futureContinue` will not be called any more for this future so we can # clean it up diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index 8c9b62612..cb95d8f7e 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -1498,7 +1498,7 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. include asyncmacro2 -proc runForever*() {.raises: [Defect, CatchableError].} = +proc runForever*() = ## Begins a never ending global dispatcher poll loop. ## Raises different exceptions depending on the platform. while true: diff --git a/chronos/transports/ipnet.nim b/chronos/transports/ipnet.nim index 1d61cb6df..130e8fa00 100644 --- a/chronos/transports/ipnet.nim +++ b/chronos/transports/ipnet.nim @@ -408,7 +408,7 @@ proc init*(t: typedesc[IpNet], network: string): IpNet {. if len(parts) > 1: try: prefix = parseInt(parts[1]) - except: + except ValueError: prefix = -1 if prefix == -1: ipaddr = parseIpAddress(parts[1]) @@ -434,8 +434,8 @@ proc init*(t: typedesc[IpNet], network: string): IpNet {. result = t.init(host, mask) else: result = t.init(host, prefix) - except: - raise newException(TransportAddressError, "Incorrect network address!") + except ValueError as exc: + raise newException(TransportAddressError, exc.msg) proc `==`*(n1, n2: IpNet): bool {.inline.} = ## Returns ``true`` if networks ``n1`` and ``n2`` are equal in IP family and diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index 1eea48958..17385a3ff 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -463,7 +463,7 @@ suite "Datagram Transport test suite": try: await wait(dgram.join(), 1.seconds) result = true - except: + except CatchableError: discard proc testBroadcast(): Future[int] {.async.} = diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 07128d9d7..16f001711 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -913,9 +913,9 @@ suite "HTTP client testing suite": await allFutures(f1, f2) check: f1.finished() - f1.done() + f1.completed() f2.finished() - f2.done() + f2.completed() f1.read() == (200, "ok", 0) f2.read() == (200, "ok", 0) session.connectionsCount == 2 @@ -976,9 +976,9 @@ suite "HTTP client testing suite": await allFutures(f1, f2) check: f1.finished() - f1.done() + f1.completed() f2.finished() - f2.done() + f2.completed() f1.read() == (200, "ok", 0) f2.read() == (200, "ok", 0) session.connectionsCount == 0 @@ -1261,7 +1261,7 @@ suite "HTTP client testing suite": test "HTTP client no-pipeline test": let address = initTAddress("127.0.0.1:30080") check waitFor(testNoPipeline(address)) == true - + test "HTTP client server-sent events test": let address = initTAddress("127.0.0.1:30080") check waitFor(testServerSentEvents(address, false)) == true diff --git a/tests/testnet.nim b/tests/testnet.nim index 419195d83..c6355d4c0 100644 --- a/tests/testnet.nim +++ b/tests/testnet.nim @@ -575,7 +575,7 @@ suite "Network utilities test suite": try: inet = IpNet.init(item[0]) res = true - except: + except TransportAddressError: res = false check: $res == item[1] diff --git a/tests/teststream.nim b/tests/teststream.nim index 416e0a9c1..7601a3975 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -689,7 +689,7 @@ suite "Stream Transport test suite": try: await wait(server.join(), 10.seconds) result = 1 - except: + except CatchableError: discard proc testWriteConnReset(address: TransportAddress): Future[int] {.async.} = @@ -765,7 +765,7 @@ suite "Stream Transport test suite": try: transp = await connect(address) flag = true - except: + except CatchableError: server.stop() server.close() await server.join() From 2ef34c7339c5d8e59f212d7af72a06e0d3e8327c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 5 Jun 2023 13:47:38 +0200 Subject: [PATCH 033/146] ratelimit: set cancellation earlier (#402) future may be completed by worker before cancellation is set --- chronos/ratelimit.nim | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/chronos/ratelimit.nim b/chronos/ratelimit.nim index 5bf8af9fb..caaf659fe 100644 --- a/chronos/ratelimit.nim +++ b/chronos/ratelimit.nim @@ -102,10 +102,6 @@ proc consume*(bucket: TokenBucket, tokens: int): Future[void] = retFuture.complete() return retFuture - bucket.pendingRequests.add(BucketWaiter(future: retFuture, value: tokens)) - if isNil(bucket.workFuture) or bucket.workFuture.finished(): - bucket.workFuture = worker(bucket) - proc cancellation(udata: pointer) = for index in 0.. Date: Mon, 5 Jun 2023 22:21:50 +0200 Subject: [PATCH 034/146] clean up `Defect` (#404) --- chronos/apps/http/httpbodyrw.nim | 8 +-- chronos/apps/http/httpclient.nim | 40 ++++++------ chronos/apps/http/httpcommon.nim | 8 +-- chronos/apps/http/httpserver.nim | 42 ++++++------- chronos/apps/http/httptable.nim | 5 +- chronos/apps/http/multipart.nim | 30 ++++----- chronos/apps/http/shttpserver.nim | 2 +- chronos/asyncfutures2.nim | 32 ++++------ chronos/asyncloop.nim | 77 ++++++++++++------------ chronos/asyncmacro2.nim | 7 +-- chronos/asyncproc.nim | 8 +-- chronos/asyncsync.nim | 53 ++++++++-------- chronos/debugutils.nim | 5 +- chronos/handles.nim | 11 ++-- chronos/ioselects/ioselectors_epoll.nim | 29 ++++----- chronos/ioselects/ioselectors_kqueue.nim | 29 ++++----- chronos/ioselects/ioselectors_poll.nim | 27 ++++----- chronos/osdefs.nim | 2 +- chronos/osutils.nim | 5 +- chronos/ratelimit.nim | 2 +- chronos/sendfile.nim | 5 +- chronos/srcloc.nim | 7 +-- chronos/streams/asyncstream.nim | 23 +++---- chronos/timer.nim | 5 +- chronos/transports/common.nim | 35 +++++------ chronos/transports/datagram.nim | 33 +++++----- chronos/transports/ipnet.nim | 9 +-- chronos/transports/osnet.nim | 17 +++--- chronos/transports/stream.nim | 37 ++++++------ tests/testbugs.nim | 2 +- tests/testsoon.nim | 4 +- 31 files changed, 268 insertions(+), 331 deletions(-) diff --git a/chronos/apps/http/httpbodyrw.nim b/chronos/apps/http/httpbodyrw.nim index ef9060fee..ba2b1d4fa 100644 --- a/chronos/apps/http/httpbodyrw.nim +++ b/chronos/apps/http/httpbodyrw.nim @@ -29,8 +29,8 @@ type opened*: int64 closed*: int64 -proc setupHttpBodyWriterTracker(): HttpBodyTracker {.gcsafe, raises: [Defect].} -proc setupHttpBodyReaderTracker(): HttpBodyTracker {.gcsafe, raises: [Defect].} +proc setupHttpBodyWriterTracker(): HttpBodyTracker {.gcsafe, raises: [].} +proc setupHttpBodyReaderTracker(): HttpBodyTracker {.gcsafe, raises: [].} proc getHttpBodyWriterTracker(): HttpBodyTracker {.inline.} = var res = cast[HttpBodyTracker](getTracker(HttpBodyWriterTrackerName)) @@ -138,7 +138,7 @@ proc closeWait*(bstream: HttpBodyWriter) {.async.} = bstream.bstate = HttpState.Closed untrackHttpBodyWriter(bstream) -proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [Defect].} = +proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [].} = if len(bstream.streams) == 1: # If HttpBodyReader has only one stream it has ``BoundedStreamReader``, in # such case its impossible to get more bytes then expected amount. @@ -155,5 +155,5 @@ proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [Defect].} = false proc closed*(bstream: HttpBodyReader | HttpBodyWriter): bool {. - raises: [Defect].} = + raises: [].} = bstream.bstate != HttpState.Alive diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 1462c53ae..311ff1bb0 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -205,11 +205,11 @@ type # Open -> (Finished, Error) -> (Closing, Closed) proc setupHttpClientConnectionTracker(): HttpClientTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc setupHttpClientRequestTracker(): HttpClientTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc setupHttpClientResponseTracker(): HttpClientTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc getHttpClientConnectionTracker(): HttpClientTracker {.inline.} = var res = cast[HttpClientTracker](getTracker(HttpClientConnectionTrackerName)) @@ -353,7 +353,7 @@ proc new*(t: typedesc[HttpSessionRef], idleTimeout = HttpConnectionIdleTimeout, idlePeriod = HttpConnectionCheckPeriod, socketFlags: set[SocketFlags] = {}): HttpSessionRef {. - raises: [Defect] .} = + raises: [] .} = ## Create new HTTP session object. ## ## ``maxRedirections`` - maximum number of HTTP 3xx redirections @@ -381,7 +381,7 @@ proc new*(t: typedesc[HttpSessionRef], newFuture[void]("session.watcher.placeholder") res -proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [Defect] .} = +proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [] .} = var res: set[TLSFlags] if HttpClientFlag.NoVerifyHost in flags: res.incl(TLSFlags.NoVerifyHost) @@ -390,7 +390,7 @@ proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [Defect] .} = res proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {. - raises: [Defect] .} = + raises: [] .} = let scheme = if len(url.scheme) == 0: HttpClientScheme.NonSecure @@ -454,13 +454,13 @@ proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {. addresses: addresses)) proc getAddress*(session: HttpSessionRef, - url: string): HttpResult[HttpAddress] {.raises: [Defect].} = + url: string): HttpResult[HttpAddress] {.raises: [].} = ## Create new HTTP address using URL string ``url`` and . session.getAddress(parseUri(url)) proc getAddress*(address: TransportAddress, ctype: HttpClientScheme = HttpClientScheme.NonSecure, - queryString: string = "/"): HttpAddress {.raises: [Defect].} = + queryString: string = "/"): HttpAddress {.raises: [].} = ## Create new HTTP address using Transport address ``address``, connection ## type ``ctype`` and query string ``queryString``. let uri = parseUri(queryString) @@ -579,7 +579,7 @@ proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef, res proc setError(request: HttpClientRequestRef, error: ref HttpError) {. - raises: [Defect] .} = + raises: [] .} = request.error = error request.state = HttpReqRespState.Error if not(isNil(request.connection)): @@ -587,7 +587,7 @@ proc setError(request: HttpClientRequestRef, error: ref HttpError) {. request.connection.error = error proc setError(response: HttpClientResponseRef, error: ref HttpError) {. - raises: [Defect] .} = + raises: [] .} = response.error = error response.state = HttpReqRespState.Error if not(isNil(response.connection)): @@ -851,7 +851,7 @@ proc closeWait*(response: HttpClientResponseRef) {.async.} = untrackHttpClientResponse(response) proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] - ): HttpResult[HttpClientResponseRef] {.raises: [Defect] .} = + ): HttpResult[HttpClientResponseRef] {.raises: [] .} = ## Process response headers. let resp = parseResponse(data, false) if resp.failed(): @@ -990,7 +990,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpClientRequestRef {. - raises: [Defect].} = + raises: [].} = let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, version: version, flags: flags, headers: HttpTable.init(headers), @@ -1005,7 +1005,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpResult[HttpClientRequestRef] {. - raises: [Defect].} = + raises: [].} = let address = ? session.getAddress(parseUri(url)) let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, @@ -1019,14 +1019,14 @@ proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [] - ): HttpResult[HttpClientRequestRef] {.raises: [Defect].} = + ): HttpResult[HttpClientRequestRef] {.raises: [].} = HttpClientRequestRef.new(session, url, MethodGet, version, flags, headers) proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [] - ): HttpClientRequestRef {.raises: [Defect].} = + ): HttpClientRequestRef {.raises: [].} = HttpClientRequestRef.new(session, ha, MethodGet, version, flags, headers) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, @@ -1034,7 +1034,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = [] - ): HttpResult[HttpClientRequestRef] {.raises: [Defect].} = + ): HttpResult[HttpClientRequestRef] {.raises: [].} = HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, body) @@ -1043,7 +1043,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[char] = []): HttpResult[HttpClientRequestRef] {. - raises: [Defect].} = + raises: [].} = HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, body.toOpenArrayByte(0, len(body) - 1)) @@ -1052,7 +1052,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpClientRequestRef {. - raises: [Defect].} = + raises: [].} = HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, body) @@ -1061,12 +1061,12 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[char] = []): HttpClientRequestRef {. - raises: [Defect].} = + raises: [].} = HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, body.toOpenArrayByte(0, len(body) - 1)) proc prepareRequest(request: HttpClientRequestRef): string {. - raises: [Defect].} = + raises: [].} = template hasChunkedEncoding(request: HttpClientRequestRef): bool = toLowerAscii(request.headers.getString(TransferEncodingHeader)) == "chunked" diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index 515920e86..cc2478d47 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -118,7 +118,7 @@ template newHttpUseClosedError*(): ref HttpUseClosedError = iterator queryParams*(query: string, flags: set[QueryParamsFlag] = {}): KeyValueTuple {. - raises: [Defect].} = + raises: [].} = ## Iterate over url-encoded query string. for pair in query.split('&'): let items = pair.split('=', maxsplit = 1) @@ -133,7 +133,7 @@ iterator queryParams*(query: string, func getTransferEncoding*(ch: openArray[string]): HttpResult[ set[TransferEncodingFlags]] {. - raises: [Defect].} = + raises: [].} = ## Parse value of multiple HTTP headers ``Transfer-Encoding`` and return ## it as set of ``TransferEncodingFlags``. var res: set[TransferEncodingFlags] = {} @@ -164,7 +164,7 @@ func getTransferEncoding*(ch: openArray[string]): HttpResult[ func getContentEncoding*(ch: openArray[string]): HttpResult[ set[ContentEncodingFlags]] {. - raises: [Defect].} = + raises: [].} = ## Parse value of multiple HTTP headers ``Content-Encoding`` and return ## it as set of ``ContentEncodingFlags``. var res: set[ContentEncodingFlags] = {} @@ -194,7 +194,7 @@ func getContentEncoding*(ch: openArray[string]): HttpResult[ ok(res) func getContentType*(ch: openArray[string]): HttpResult[ContentTypeData] {. - raises: [Defect].} = + raises: [].} = ## Check and prepare value of ``Content-Type`` header. if len(ch) == 0: err("No Content-Type values found") diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 26c62e6be..1da4b44cf 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -55,12 +55,12 @@ type HttpProcessCallback* = proc(req: RequestFence): Future[HttpResponseRef] {. - gcsafe, raises: [Defect, CatchableError].} + gcsafe, raises: [].} HttpConnectionCallback* = proc(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} HttpServer* = object of RootObj instance*: StreamServer @@ -133,7 +133,7 @@ type proc init(htype: typedesc[HttpProcessError], error: HttpServerError, exc: ref CatchableError, remote: TransportAddress, - code: HttpCode): HttpProcessError {.raises: [Defect].} = + code: HttpCode): HttpProcessError {.raises: [].} = HttpProcessError(error: error, exc: exc, remote: remote, code: code) proc createConnection(server: HttpServerRef, @@ -153,7 +153,7 @@ proc new*(htype: typedesc[HttpServerRef], httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576): HttpResult[HttpServerRef] {. - raises: [Defect].} = + raises: [].} = let serverUri = if len(serverUri.hostname) > 0: @@ -198,7 +198,7 @@ proc new*(htype: typedesc[HttpServerRef], ) ok(res) -proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [Defect].} = +proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [].} = if req.response.isNone(): var resp = HttpResponseRef( status: Http200, @@ -222,7 +222,7 @@ proc getHostname*(server: HttpServerRef): string = else: server.baseUri.hostname -proc dumbResponse*(): HttpResponseRef {.raises: [Defect].} = +proc dumbResponse*(): HttpResponseRef {.raises: [].} = ## Create an empty response to return when request processor got no request. HttpResponseRef(state: HttpResponseState.Dumb, version: HttpVersion11) @@ -233,14 +233,14 @@ proc getId(transp: StreamTransport): Result[string, string] {.inline.} = except TransportOsError as exc: err($exc.msg) -proc hasBody*(request: HttpRequestRef): bool {.raises: [Defect].} = +proc hasBody*(request: HttpRequestRef): bool {.raises: [].} = ## Returns ``true`` if request has body. request.requestFlags * {HttpRequestFlags.BoundBody, HttpRequestFlags.UnboundBody} != {} proc prepareRequest(conn: HttpConnectionRef, req: HttpRequestHeader): HttpResultCode[HttpRequestRef] {. - raises: [Defect].}= + raises: [].}= var request = HttpRequestRef(connection: conn, state: HttpState.Alive) if req.version notin {HttpVersion10, HttpVersion11}: @@ -678,7 +678,7 @@ proc `keepalive=`*(resp: HttpResponseRef, value: bool) = else: resp.flags.excl(HttpResponseFlags.KeepAlive) -proc keepalive*(resp: HttpResponseRef): bool {.raises: [Defect].} = +proc keepalive*(resp: HttpResponseRef): bool {.raises: [].} = HttpResponseFlags.KeepAlive in resp.flags proc processLoop(server: HttpServerRef, transp: StreamTransport, @@ -882,7 +882,7 @@ proc acceptClientLoop(server: HttpServerRef) {.async.} = if breakLoop: break -proc state*(server: HttpServerRef): HttpServerState {.raises: [Defect].} = +proc state*(server: HttpServerRef): HttpServerState {.raises: [].} = ## Returns current HTTP server's state. if server.lifetime.finished(): ServerClosed @@ -944,7 +944,7 @@ proc join*(server: HttpServerRef): Future[void] = retFuture proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] {. - raises: [Defect].} = + raises: [].} = ## Create new MultiPartReader interface for specific request. if req.meth in PostMethods: if MultipartForm in req.requestFlags: @@ -1040,30 +1040,30 @@ proc post*(req: HttpRequestRef): Future[HttpTable] {.async.} = raiseHttpCriticalError("Unsupported request body") proc setHeader*(resp: HttpResponseRef, key, value: string) {. - raises: [Defect].} = + raises: [].} = ## Sets value of header ``key`` to ``value``. doAssert(resp.state == HttpResponseState.Empty) resp.headersTable.set(key, value) proc setHeaderDefault*(resp: HttpResponseRef, key, value: string) {. - raises: [Defect].} = + raises: [].} = ## Sets value of header ``key`` to ``value``, only if header ``key`` is not ## present in the headers table. discard resp.headersTable.hasKeyOrPut(key, value) proc addHeader*(resp: HttpResponseRef, key, value: string) {. - raises: [Defect].} = + raises: [].} = ## Adds value ``value`` to header's ``key`` value. doAssert(resp.state == HttpResponseState.Empty) resp.headersTable.add(key, value) proc getHeader*(resp: HttpResponseRef, key: string, - default: string = ""): string {.raises: [Defect].} = + default: string = ""): string {.raises: [].} = ## Returns value of header with name ``name`` or ``default``, if header is ## not present in the table. resp.headersTable.getString(key, default) -proc hasHeader*(resp: HttpResponseRef, key: string): bool {.raises: [Defect].} = +proc hasHeader*(resp: HttpResponseRef, key: string): bool {.raises: [].} = ## Returns ``true`` if header with name ``key`` present in the headers table. key in resp.headersTable @@ -1083,7 +1083,7 @@ func createHeaders(resp: HttpResponseRef): string = answer proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string {. - raises: [Defect].}= + raises: [].}= if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if length > 0: @@ -1101,7 +1101,7 @@ proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string {. resp.createHeaders() proc prepareChunkedHeaders(resp: HttpResponseRef): string {. - raises: [Defect].} = + raises: [].} = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if not(resp.hasHeader(ContentTypeHeader)): @@ -1118,7 +1118,7 @@ proc prepareChunkedHeaders(resp: HttpResponseRef): string {. resp.createHeaders() proc prepareServerSideEventHeaders(resp: HttpResponseRef): string {. - raises: [Defect].} = + raises: [].} = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if not(resp.hasHeader(ContentTypeHeader)): @@ -1131,7 +1131,7 @@ proc prepareServerSideEventHeaders(resp: HttpResponseRef): string {. resp.createHeaders() proc preparePlainHeaders(resp: HttpResponseRef): string {. - raises: [Defect].} = + raises: [].} = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if not(resp.hasHeader(ServerHeader)): @@ -1393,7 +1393,7 @@ proc remoteAddress*(request: HttpRequestRef): TransportAddress = request.connection.remoteAddress() proc requestInfo*(req: HttpRequestRef, contentType = "text/text"): string {. - raises: [Defect].} = + raises: [].} = ## Returns comprehensive information about request for specific content ## type. ## diff --git a/chronos/apps/http/httptable.nim b/chronos/apps/http/httptable.nim index 1cbf62469..86060de30 100644 --- a/chronos/apps/http/httptable.nim +++ b/chronos/apps/http/httptable.nim @@ -10,10 +10,7 @@ import std/[tables, strutils] import stew/base10 -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} type HttpTable* = object diff --git a/chronos/apps/http/multipart.nim b/chronos/apps/http/multipart.nim index 2ec869a83..45506a2a1 100644 --- a/chronos/apps/http/multipart.nim +++ b/chronos/apps/http/multipart.nim @@ -72,7 +72,7 @@ type BChar* = byte | char proc startsWith(s, prefix: openArray[byte]): bool {. - raises: [Defect].} = + raises: [].} = # This procedure is copy of strutils.startsWith() procedure, however, # it is intended to work with arrays of bytes, but not with strings. var i = 0 @@ -82,7 +82,7 @@ proc startsWith(s, prefix: openArray[byte]): bool {. inc(i) proc parseUntil(s, until: openArray[byte]): int {. - raises: [Defect].} = + raises: [].} = # This procedure is copy of parseutils.parseUntil() procedure, however, # it is intended to work with arrays of bytes, but not with strings. var i = 0 @@ -96,7 +96,7 @@ proc parseUntil(s, until: openArray[byte]): int {. -1 func setPartNames(part: var MultiPart): HttpResult[void] {. - raises: [Defect].} = + raises: [].} = if part.headers.count("content-disposition") != 1: return err("Content-Disposition header is incorrect") var header = part.headers.getString("content-disposition") @@ -121,7 +121,7 @@ func setPartNames(part: var MultiPart): HttpResult[void] {. proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader], buffer: openArray[A], boundary: openArray[B]): MultiPartReader {. - raises: [Defect].} = + raises: [].} = ## Create new MultiPartReader instance with `buffer` interface. ## ## ``buffer`` - is buffer which will be used to read data. @@ -146,7 +146,7 @@ proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef], stream: HttpBodyReader, boundary: openArray[B], partHeadersMaxSize = 4096): MultiPartReaderRef {. - raises: [Defect].} = + raises: [].} = ## Create new MultiPartReader instance with `stream` interface. ## ## ``stream`` is stream used to read data. @@ -270,7 +270,7 @@ proc consumeBody*(mp: MultiPart) {.async.} = discard proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] {. - raises: [Defect].} = + raises: [].} = ## Get multipart's ``mp`` stream, which can be used to obtain value of the ## part. case mp.kind @@ -295,7 +295,7 @@ proc closeWait*(mpr: MultiPartReaderRef) {.async.} = else: discard -proc getBytes*(mp: MultiPart): seq[byte] {.raises: [Defect].} = +proc getBytes*(mp: MultiPart): seq[byte] {.raises: [].} = ## Returns value for MultiPart ``mp`` as sequence of bytes. case mp.kind of MultiPartSource.Buffer: @@ -304,7 +304,7 @@ proc getBytes*(mp: MultiPart): seq[byte] {.raises: [Defect].} = doAssert(not(mp.stream.atEof()), "Value is not obtained yet") mp.buffer -proc getString*(mp: MultiPart): string {.raises: [Defect].} = +proc getString*(mp: MultiPart): string {.raises: [].} = ## Returns value for MultiPart ``mp`` as string. case mp.kind of MultiPartSource.Buffer: @@ -313,7 +313,7 @@ proc getString*(mp: MultiPart): string {.raises: [Defect].} = doAssert(not(mp.stream.atEof()), "Value is not obtained yet") bytesToString(mp.buffer) -proc atEoM*(mpr: var MultiPartReader): bool {.raises: [Defect].} = +proc atEoM*(mpr: var MultiPartReader): bool {.raises: [].} = ## Procedure returns ``true`` if MultiPartReader has reached the end of ## multipart message. case mpr.kind @@ -322,7 +322,7 @@ proc atEoM*(mpr: var MultiPartReader): bool {.raises: [Defect].} = of MultiPartSource.Stream: mpr.stream.atEof() -proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [Defect].} = +proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [].} = ## Procedure returns ``true`` if MultiPartReader has reached the end of ## multipart message. case mpr.kind @@ -332,7 +332,7 @@ proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [Defect].} = mpr.stream.atEof() proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] {. - raises: [Defect].} = + raises: [].} = ## Get multipart part from MultiPartReader instance. ## ## This procedure will work only for MultiPartReader with buffer source. @@ -423,7 +423,7 @@ proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] {. err("Incorrect multipart form") func isEmpty*(mp: MultiPart): bool {. - raises: [Defect].} = + raises: [].} = ## Returns ``true`` is multipart ``mp`` is not initialized/filled yet. mp.counter == 0 @@ -440,7 +440,7 @@ func validateBoundary[B: BChar](boundary: openArray[B]): HttpResult[void] = ok() func getMultipartBoundary*(contentData: ContentTypeData): HttpResult[string] {. - raises: [Defect].} = + raises: [].} = ## Returns ``multipart/form-data`` boundary value from ``Content-Type`` ## header. ## @@ -481,7 +481,7 @@ proc quoteCheck(name: string): HttpResult[string] = proc init*[B: BChar](mpt: typedesc[MultiPartWriter], boundary: openArray[B]): MultiPartWriter {. - raises: [Defect].} = + raises: [].} = ## Create new MultiPartWriter instance with `buffer` interface. ## ## ``boundary`` - is multipart boundary, this value must not be empty. @@ -511,7 +511,7 @@ proc init*[B: BChar](mpt: typedesc[MultiPartWriter], proc new*[B: BChar](mpt: typedesc[MultiPartWriterRef], stream: HttpBodyWriter, boundary: openArray[B]): MultiPartWriterRef {. - raises: [Defect].} = + raises: [].} = doAssert(validateBoundary(boundary).isOk()) doAssert(not(isNil(stream))) diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 37cf8ca57..93f253b8e 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -70,7 +70,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576 - ): HttpResult[SecureHttpServerRef] {.raises: [Defect].} = + ): HttpResult[SecureHttpServerRef] {.raises: [].} = doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!") doAssert(not(isNil(tlsCertificate)), "TLS certificate must not be nil!") diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 805f40c20..5438e4fcc 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -31,10 +31,7 @@ template LocCompleteIndex*: untyped {.deprecated: "LocFinishIndex".} = LocFinishIndex when chronosStrictException: - when (NimMajor, NimMinor) < (1, 4): - {.pragma: closureIter, raises: [Defect, CatchableError], gcsafe.} - else: - {.pragma: closureIter, raises: [CatchableError], gcsafe.} + {.pragma: closureIter, raises: [CatchableError], gcsafe.} else: {.pragma: closureIter, raises: [Exception], gcsafe.} @@ -380,14 +377,14 @@ proc `cancelCallback=`*(future: FutureBase, cb: CallbackFunc) = future.cancelcb = cb {.push stackTrace: off.} -proc futureContinue*(fut: FutureBase) {.raises: [Defect], gcsafe.} +proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} -proc internalContinue(fut: pointer) {.raises: [Defect], gcsafe.} = +proc internalContinue(fut: pointer) {.raises: [], gcsafe.} = let asFut = cast[FutureBase](fut) GC_unref(asFut) futureContinue(asFut) -proc futureContinue*(fut: FutureBase) {.raises: [Defect], gcsafe.} = +proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} = # This function is responsible for calling the closure iterator generated by # the `{.async.}` transformation either until it has completed its iteration # or raised and error / been cancelled. @@ -528,8 +525,7 @@ when chronosStackTrace: # newMsg.add "\n" & $entry error.msg = newMsg -proc internalCheckComplete*(fut: FutureBase) {. - raises: [Defect, CatchableError].} = +proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} = # For internal use only. Used in asyncmacro if not(isNil(fut.error)): when chronosStackTrace: @@ -541,8 +537,7 @@ proc internalRead*[T](fut: Future[T]): T {.inline.} = when T isnot void: return fut.value -proc read*[T](future: Future[T] ): T {. - raises: [Defect, CatchableError].} = +proc read*[T](future: Future[T] ): T {.raises: [CatchableError].} = ## Retrieves the value of ``future``. Future must be finished otherwise ## this function will fail with a ``ValueError`` exception. ## @@ -554,8 +549,7 @@ proc read*[T](future: Future[T] ): T {. # TODO: Make a custom exception type for this? raise newException(ValueError, "Future still in progress.") -proc readError*(future: FutureBase): ref CatchableError {. - raises: [Defect, ValueError].} = +proc readError*(future: FutureBase): ref CatchableError {.raises: [ValueError].} = ## Retrieves the exception stored in ``future``. ## ## An ``ValueError`` exception will be thrown if no exception exists @@ -682,8 +676,8 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = ## ## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled. var retFuture = newFuture[void]("chronos.or") - var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].} - cb = proc(udata: pointer) {.gcsafe, raises: [Defect].} = + var cb: proc(udata: pointer) {.gcsafe, raises: [].} + cb = proc(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): var fut = cast[FutureBase](udata) if cast[pointer](fut1) == udata: @@ -996,8 +990,8 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] = # Because we can't capture varargs[T] in closures we need to create copy. var nfuts = @futs - var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].} - cb = proc(udata: pointer) {.gcsafe, raises: [Defect].} = + var cb: proc(udata: pointer) {.gcsafe, raises: [].} + cb = proc(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): var res: Future[T] var rfut = cast[FutureBase](udata) @@ -1044,8 +1038,8 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] = # Because we can't capture varargs[T] in closures we need to create copy. var nfuts = @futs - var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].} - cb = proc(udata: pointer) {.gcsafe, raises: [Defect].} = + var cb: proc(udata: pointer) {.gcsafe, raises: [].} + cb = proc(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): var res: FutureBase var rfut = cast[FutureBase](udata) diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index cb95d8f7e..a4359516e 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -8,12 +8,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} - {.pragma: callbackFunc, stdcall, gcsafe, raises: [Defect].} -else: - {.push raises: [].} - {.pragma: callbackFunc, stdcall, gcsafe, raises: [].} +{.push raises: [].} from nativesockets import Port import std/[tables, strutils, heapqueue, deques] @@ -160,7 +155,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or export oserrno type - CallbackFunc* = proc (arg: pointer) {.gcsafe, raises: [Defect].} + CallbackFunc* = proc (arg: pointer) {.gcsafe, raises: [].} AsyncCallback* = object function*: CallbackFunc @@ -177,8 +172,8 @@ type TrackerBase* = ref object of RootRef id*: string - dump*: proc(): string {.gcsafe, raises: [Defect].} - isLeaked*: proc(): bool {.gcsafe, raises: [Defect].} + dump*: proc(): string {.gcsafe, raises: [].} + isLeaked*: proc(): bool {.gcsafe, raises: [].} PDispatcherBase = ref object of RootRef timers*: HeapQueue[TimerCallback] @@ -291,6 +286,8 @@ func toException*(v: OSErrorCode): ref OSError = newOSError(v) # Result[T, OSErrorCode] values. when defined(windows): + {.pragma: stdcallbackFunc, stdcall, gcsafe, raises: [].} + export SIGINT, SIGQUIT, SIGTERM type CompletionKey = ULONG_PTR @@ -419,8 +416,8 @@ when defined(windows): var gDisp{.threadvar.}: PDispatcher ## Global dispatcher - proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [Defect].} - proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [Defect].} + proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [].} + proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [].} proc getIoHandler*(disp: PDispatcher): HANDLE = ## Returns the underlying IO Completion Port handle (Windows) or selector @@ -436,7 +433,7 @@ when defined(windows): loop.handles.incl(fd) ok() - proc register*(fd: AsyncFD) {.raises: [Defect, OSError].} = + proc register*(fd: AsyncFD) {.raises: [OSError].} = ## Register file descriptor ``fd`` in thread's dispatcher. register2(fd).tryGet() @@ -446,7 +443,7 @@ when defined(windows): {.push stackTrace: off.} proc waitableCallback(param: pointer, timerOrWaitFired: WINBOOL) {. - callbackFunc.} = + stdcallbackFunc.} = # This procedure will be executed in `wait thread`, so it must not use # GC related objects. # We going to ignore callbacks which was spawned when `isNil(param) == true` @@ -568,19 +565,19 @@ when defined(windows): proc addProcess*(pid: int, cb: CallbackFunc, udata: pointer = nil): ProcessHandle {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Registers callback ``cb`` to be called when process with process ## identifier ``pid`` exited. Returns process identifier, which can be ## used to clear process callback via ``removeProcess``. addProcess2(pid, cb, udata).tryGet() proc removeProcess*(procHandle: ProcessHandle) {. - raises: [Defect, OSError].} = + raises: [ OSError].} = ## Remove process' watching using process' descriptor ``procHandle``. removeProcess2(procHandle).tryGet() {.push stackTrace: off.} - proc consoleCtrlEventHandler(dwCtrlType: DWORD): uint32 {.callbackFunc.} = + proc consoleCtrlEventHandler(dwCtrlType: DWORD): uint32 {.stdcallbackFunc.} = ## This procedure will be executed in different thread, so it MUST not use ## any GC related features (strings, seqs, echo etc.). case dwCtrlType @@ -645,7 +642,7 @@ when defined(windows): proc addSignal*(signal: int, cb: CallbackFunc, udata: pointer = nil): SignalHandle {. - raises: [Defect, ValueError].} = + raises: [ValueError].} = ## Registers callback ``cb`` to be called when signal ``signal`` will be ## raised. Returns signal identifier, which can be used to clear signal ## callback via ``removeSignal``. @@ -653,7 +650,7 @@ when defined(windows): raise newException(ValueError, osErrorMsg(error)) proc removeSignal*(signalHandle: SignalHandle) {. - raises: [Defect, ValueError].} = + raises: [ValueError].} = ## Remove signal's watching using signal descriptor ``signalfd``. let res = removeSignal2(signalHandle) if res.isErr(): @@ -829,8 +826,8 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or var gDisp{.threadvar.}: PDispatcher ## Global dispatcher - proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [Defect].} - proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [Defect].} + proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [].} + proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [].} proc getIoHandler*(disp: PDispatcher): Selector[SelectorData] = ## Returns system specific OS queue. @@ -905,31 +902,31 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or return err(osdefs.EBADF) loop.selector.updateHandle2(cint(fd), newEvents) - proc register*(fd: AsyncFD) {.raises: [Defect, OSError].} = + proc register*(fd: AsyncFD) {.raises: [OSError].} = ## Register file descriptor ``fd`` in thread's dispatcher. register2(fd).tryGet() - proc unregister*(fd: AsyncFD) {.raises: [Defect, OSError].} = + proc unregister*(fd: AsyncFD) {.raises: [OSError].} = ## Unregister file descriptor ``fd`` from thread's dispatcher. unregister2(fd).tryGet() proc addReader*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Start watching the file descriptor ``fd`` for read availability and then ## call the callback ``cb`` with specified argument ``udata``. addReader2(fd, cb, udata).tryGet() - proc removeReader*(fd: AsyncFD) {.raises: [Defect, OSError].} = + proc removeReader*(fd: AsyncFD) {.raises: [OSError].} = ## Stop watching the file descriptor ``fd`` for read availability. removeReader2(fd).tryGet() proc addWriter*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Start watching the file descriptor ``fd`` for write availability and then ## call the callback ``cb`` with specified argument ``udata``. addWriter2(fd, cb, udata).tryGet() - proc removeWriter*(fd: AsyncFD) {.raises: [Defect, OSError].} = + proc removeWriter*(fd: AsyncFD) {.raises: [OSError].} = ## Stop watching the file descriptor ``fd`` for write availability. removeWriter2(fd).tryGet() @@ -1051,7 +1048,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or proc addSignal*(signal: int, cb: CallbackFunc, udata: pointer = nil): SignalHandle {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Start watching signal ``signal``, and when signal appears, call the ## callback ``cb`` with specified argument ``udata``. Returns signal ## identifier code, which can be used to remove signal callback @@ -1059,20 +1056,20 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or addSignal2(signal, cb, udata).tryGet() proc removeSignal*(signalHandle: SignalHandle) {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Remove watching signal ``signal``. removeSignal2(signalHandle).tryGet() proc addProcess*(pid: int, cb: CallbackFunc, udata: pointer = nil): ProcessHandle {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Registers callback ``cb`` to be called when process with process ## identifier ``pid`` exited. Returns process identifier, which can be ## used to clear process callback via ``removeProcess``. addProcess2(pid, cb, udata).tryGet() proc removeProcess*(procHandle: ProcessHandle) {. - raises: [Defect, OSError].} = + raises: [OSError].} = ## Remove process' watching using process' descriptor ``procHandle``. removeProcess2(procHandle).tryGet() @@ -1254,7 +1251,7 @@ when defined(macosx) or defined(macos) or defined(freebsd) or defined(netbsd) or defined(openbsd) or defined(dragonfly) or defined(linux) or defined(windows): - proc waitSignal*(signal: int): Future[void] {.raises: [Defect].} = + proc waitSignal*(signal: int): Future[void] {.raises: [].} = var retFuture = newFuture[void]("chronos.waitSignal()") var signalHandle: Opt[SignalHandle] @@ -1324,8 +1321,8 @@ proc stepsAsync*(number: int): Future[void] = var retFuture = newFuture[void]("chronos.stepsAsync(int)") var counter = 0 - var continuation: proc(data: pointer) {.gcsafe, raises: [Defect].} - continuation = proc(data: pointer) {.gcsafe, raises: [Defect].} = + var continuation: proc(data: pointer) {.gcsafe, raises: [].} + continuation = proc(data: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): inc(counter) if counter < number: @@ -1376,7 +1373,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] = # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 - proc continuation(udata: pointer) {.gcsafe, raises: [Defect].} = + proc continuation(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): if not(cancelling): if not(fut.finished()): @@ -1394,7 +1391,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] = # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 - proc cancellation(udata: pointer) {.gcsafe, raises: [Defect].} = + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = if not isNil(timer): clearTimer(timer) if not(fut.finished()): @@ -1435,7 +1432,7 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = var timer: TimerCallback var cancelling = false - proc continuation(udata: pointer) {.raises: [Defect].} = + proc continuation(udata: pointer) {.raises: [].} = if not(retFuture.finished()): if not(cancelling): if not(fut.finished()): @@ -1457,8 +1454,8 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = else: retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) - var cancellation: proc(udata: pointer) {.gcsafe, raises: [Defect].} - cancellation = proc(udata: pointer) {.gcsafe, raises: [Defect].} = + var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} + cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = if not isNil(timer): clearTimer(timer) if not(fut.finished()): @@ -1504,7 +1501,7 @@ proc runForever*() = while true: poll() -proc waitFor*[T](fut: Future[T]): T {.raises: [Defect, CatchableError].} = +proc waitFor*[T](fut: Future[T]): T {.raises: [CatchableError].} = ## **Blocks** the current thread until the specified future completes. ## There's no way to tell if poll or read raised the exception while not(fut.finished()): @@ -1540,7 +1537,7 @@ when chronosFutureTracking: when defined(windows): proc waitForSingleObject*(handle: HANDLE, timeout: Duration): Future[WaitableResult] {. - raises: [Defect].} = + raises: [].} = ## Waits until the specified object is in the signaled state or the ## time-out interval elapses. WaitForSingleObject() for asynchronous world. let flags = WT_EXECUTEONLYONCE diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index a7fc3ba72..bcad60686 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -211,8 +211,6 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = let raises = nnkBracket.newTree() when chronosStrictException: raises.add(newIdentNode("CatchableError")) - when (NimMajor, NimMinor) < (1, 4): - raises.add(newIdentNode("Defect")) else: raises.add(newIdentNode("Exception")) @@ -268,12 +266,9 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = # https://github.com/nim-lang/RFCs/issues/435 prc.addPragma(newIdentNode("gcsafe")) - let raises = nnkBracket.newTree() - when (NimMajor, NimMinor) < (1, 4): - raises.add(newIdentNode("Defect")) prc.addPragma(nnkExprColonExpr.newTree( newIdentNode("raises"), - raises + nnkBracket.newTree() )) if baseTypeIsVoid: diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index 6e9858f9c..8d15b72e3 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -7,12 +7,8 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} - {.pragma: apforward, gcsafe, raises: [Defect].} -else: - {.push raises: [].} - {.pragma: apforward, gcsafe, raises: [].} +{.push raises: [].} +{.pragma: apforward, gcsafe, raises: [].} import std/strtabs import "."/[config, asyncloop, handles, osdefs, osutils, oserrno], diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index a48002c4e..530984682 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -10,10 +10,7 @@ ## This module implements some core synchronization primitives. -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import std/[sequtils, math, deques, tables, typetraits] import ./asyncloop @@ -67,17 +64,17 @@ type EventBusSubscription*[T] = proc(bus: AsyncEventBus, payload: EventPayload[T]): Future[void] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} ## EventBus subscription callback type. EventBusAllSubscription* = proc(bus: AsyncEventBus, event: AwaitableEvent): Future[void] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} ## EventBus subscription callback type. EventBusCallback = proc(bus: AsyncEventBus, event: string, key: EventBusKey, data: EventPayloadBase) {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} EventBusKey* = object ## Unique subscription key. @@ -186,7 +183,7 @@ proc locked*(lock: AsyncLock): bool = ## Return `true` if the lock ``lock`` is acquired, `false` otherwise. lock.locked -proc release*(lock: AsyncLock) {.raises: [Defect, AsyncLockError].} = +proc release*(lock: AsyncLock) {.raises: [AsyncLockError].} = ## Release a lock ``lock``. ## ## When the ``lock`` is locked, reset it to unlocked, and return. If any @@ -224,7 +221,7 @@ proc wait*(event: AsyncEvent): Future[void] = ## block until another task calls `fire()` to set the flag to `true`, ## then return. let retFuture = newFuture[void]("AsyncEvent.wait") - proc cancellation(udata: pointer) {.gcsafe, raises: [Defect].} = + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = event.waiters.keepItIf(it != retFuture) if not(event.flag): retFuture.cancelCallback = cancellation @@ -298,7 +295,7 @@ proc empty*[T](aq: AsyncQueue[T]): bool {.inline.} = (len(aq.queue) == 0) proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {. - raises: [Defect, AsyncQueueFullError].}= + raises: [AsyncQueueFullError].}= ## Put an item ``item`` to the beginning of the queue ``aq`` immediately. ## ## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised. @@ -308,7 +305,7 @@ proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {. aq.getters.wakeupNext() proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {. - raises: [Defect, AsyncQueueFullError].}= + raises: [AsyncQueueFullError].}= ## Put an item ``item`` at the end of the queue ``aq`` immediately. ## ## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised. @@ -318,7 +315,7 @@ proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {. aq.getters.wakeupNext() proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {. - raises: [Defect, AsyncQueueEmptyError].} = + raises: [AsyncQueueEmptyError].} = ## Get an item from the beginning of the queue ``aq`` immediately. ## ## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised. @@ -329,7 +326,7 @@ proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {. res proc popLastNoWait*[T](aq: AsyncQueue[T]): T {. - raises: [Defect, AsyncQueueEmptyError].} = + raises: [AsyncQueueEmptyError].} = ## Get an item from the end of the queue ``aq`` immediately. ## ## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised. @@ -396,12 +393,12 @@ proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async.} = return aq.popLastNoWait() proc putNoWait*[T](aq: AsyncQueue[T], item: T) {. - raises: [Defect, AsyncQueueFullError].} = + raises: [AsyncQueueFullError].} = ## Alias of ``addLastNoWait()``. aq.addLastNoWait(item) proc getNoWait*[T](aq: AsyncQueue[T]): T {. - raises: [Defect, AsyncQueueEmptyError].} = + raises: [AsyncQueueEmptyError].} = ## Alias of ``popFirstNoWait()``. aq.popFirstNoWait() @@ -514,7 +511,7 @@ proc waitEvent*(bus: AsyncEventBus, T: typedesc, event: string): Future[T] {. var default: EventItem var retFuture = newFuture[T]("AsyncEventBus.waitEvent") let eventKey = generateKey(T.name, event) - proc cancellation(udata: pointer) {.gcsafe, raises: [Defect].} = + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): bus.events.withValue(eventKey, item): item.waiters.keepItIf(it != cast[FutureBase](retFuture)) @@ -531,7 +528,7 @@ proc waitAllEvents*(bus: AsyncEventBus): Future[AwaitableEvent] {. ## Returns ``Future`` which holds helper object. Using this object you can ## retrieve event's name and payload. var retFuture = newFuture[AwaitableEvent]("AsyncEventBus.waitAllEvents") - proc cancellation(udata: pointer) {.gcsafe, raises: [Defect].} = + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): bus.waiters.keepItIf(it != retFuture) retFuture.cancelCallback = cancellation @@ -547,7 +544,7 @@ proc subscribe*[T](bus: AsyncEventBus, event: string, ## ## Returns key that can be used to unsubscribe. proc trampoline(tbus: AsyncEventBus, event: string, key: EventBusKey, - data: EventPayloadBase) {.gcsafe, raises: [Defect].} = + data: EventPayloadBase) {.gcsafe, raises: [].} = let payload = cast[EventPayload[T]](data) asyncSpawn callback(bus, payload) @@ -571,7 +568,7 @@ proc subscribeAll*(bus: AsyncEventBus, ## ## Returns key that can be used to unsubscribe. proc trampoline(tbus: AsyncEventBus, event: string, key: EventBusKey, - data: EventPayloadBase) {.gcsafe, raises: [Defect].} = + data: EventPayloadBase) {.gcsafe, raises: [].} = let event = AwaitableEvent(eventName: event, payload: data) asyncSpawn callback(bus, event) @@ -660,7 +657,7 @@ template emitWait*[T](bus: AsyncEventBus, event: string, proc `==`(a, b: EventQueueKey): bool {.borrow.} -proc compact(ab: AsyncEventQueue) {.raises: [Defect].} = +proc compact(ab: AsyncEventQueue) {.raises: [].} = if len(ab.readers) > 0: let minOffset = block: @@ -684,14 +681,14 @@ proc compact(ab: AsyncEventQueue) {.raises: [Defect].} = ab.queue.clear() proc getReaderIndex(ab: AsyncEventQueue, key: EventQueueKey): int {. - raises: [Defect].} = + raises: [].} = for index, value in ab.readers.pairs(): if value.key == key: return index -1 proc newAsyncEventQueue*[T](limitSize = 0): AsyncEventQueue[T] {. - raises: [Defect].} = + raises: [].} = ## Creates new ``AsyncEventBus`` maximum size of ``limitSize`` (default is ## ``0`` which means that there no limits). ## @@ -709,10 +706,10 @@ proc newAsyncEventQueue*[T](limitSize = 0): AsyncEventQueue[T] {. initDeque[T](nextPowerOfTwo(limitSize + 1)) AsyncEventQueue[T](counter: 0'u64, queue: queue, limit: limitSize) -proc len*(ab: AsyncEventQueue): int {.raises: [Defect].} = +proc len*(ab: AsyncEventQueue): int {.raises: [].} = len(ab.queue) -proc register*(ab: AsyncEventQueue): EventQueueKey {.raises: [Defect].} = +proc register*(ab: AsyncEventQueue): EventQueueKey {.raises: [].} = inc(ab.counter) let reader = EventQueueReader(key: EventQueueKey(ab.counter), offset: ab.offset + len(ab.queue), @@ -721,7 +718,7 @@ proc register*(ab: AsyncEventQueue): EventQueueKey {.raises: [Defect].} = EventQueueKey(ab.counter) proc unregister*(ab: AsyncEventQueue, key: EventQueueKey) {. - raises: [Defect] .} = + raises: [] .} = let index = ab.getReaderIndex(key) if index >= 0: let reader = ab.readers[index] @@ -731,14 +728,14 @@ proc unregister*(ab: AsyncEventQueue, key: EventQueueKey) {. ab.readers.delete(index) ab.compact() -proc close*(ab: AsyncEventQueue) {.raises: [Defect].} = +proc close*(ab: AsyncEventQueue) {.raises: [].} = for reader in ab.readers.items(): if not(isNil(reader.waiter)) and not(reader.waiter.finished()): reader.waiter.complete() ab.readers.reset() ab.queue.clear() -proc closeWait*(ab: AsyncEventQueue): Future[void] {.raises: [Defect].} = +proc closeWait*(ab: AsyncEventQueue): Future[void] {.raises: [].} = var retFuture = newFuture[void]("AsyncEventQueue.closeWait()") proc continuation(udata: pointer) {.gcsafe.} = if not(retFuture.finished()): @@ -753,7 +750,7 @@ template readerOverflow*(ab: AsyncEventQueue, reader: EventQueueReader): bool = ab.limit + (reader.offset - ab.offset) <= len(ab.queue) -proc emit*[T](ab: AsyncEventQueue[T], data: T) {.raises: [Defect].} = +proc emit*[T](ab: AsyncEventQueue[T], data: T) {.raises: [].} = if len(ab.readers) > 0: # We enqueue `data` only if there active reader present. var changesPresent = false diff --git a/chronos/debugutils.nim b/chronos/debugutils.nim index de1aee4ea..0bf7e3ef8 100644 --- a/chronos/debugutils.nim +++ b/chronos/debugutils.nim @@ -7,10 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import "."/[asyncloop, config] export asyncloop diff --git a/chronos/handles.nim b/chronos/handles.nim index 1dcb29db0..2348b33cc 100644 --- a/chronos/handles.nim +++ b/chronos/handles.nim @@ -7,10 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import "."/[asyncloop, osdefs, osutils] import stew/results @@ -156,7 +153,7 @@ proc createAsyncSocket*(domain: Domain, sockType: SockType, return asyncInvalidSocket proc wrapAsyncSocket*(sock: cint|SocketHandle): AsyncFD {. - raises: [Defect, CatchableError].} = + raises: [CatchableError].} = ## Wraps socket to asynchronous socket handle. ## Return ``asyncInvalidSocket`` on error. wrapAsyncSocket2(sock).valueOr: @@ -191,7 +188,7 @@ proc setMaxOpenFiles2*(count: int): Result[void, OSErrorCode] = return err(osLastError()) ok() -proc getMaxOpenFiles*(): int {.raises: [Defect, OSError].} = +proc getMaxOpenFiles*(): int {.raises: [OSError].} = ## Returns maximum file descriptor number that can be opened by this process. ## ## Note: On Windows its impossible to obtain such number, so getMaxOpenFiles() @@ -202,7 +199,7 @@ proc getMaxOpenFiles*(): int {.raises: [Defect, OSError].} = raiseOSError(res.error()) res.get() -proc setMaxOpenFiles*(count: int) {.raises: [Defect, OSError].} = +proc setMaxOpenFiles*(count: int) {.raises: [OSError].} = ## Set maximum file descriptor number that can be opened by this process. ## ## Note: On Windows its impossible to set this value, so it just a nop call. diff --git a/chronos/ioselects/ioselectors_epoll.nim b/chronos/ioselects/ioselectors_epoll.nim index 3eed8707b..d438bac02 100644 --- a/chronos/ioselects/ioselectors_epoll.nim +++ b/chronos/ioselects/ioselectors_epoll.nim @@ -11,10 +11,7 @@ import std/[deques, tables] import stew/base10 -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} type SelectorImpl[T] = object @@ -677,70 +674,70 @@ proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] = ok(res) proc newSelector*[T](): Selector[T] {. - raises: [Defect, OSError].} = + raises: [OSError].} = let res = Selector.new(T) if res.isErr(): raiseOSError(res.error()) res.get() proc close*[T](s: Selector[T]) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = s.close2() if res.isErr(): raiseIOSelectorsError(res.error()) proc newSelectEvent*(): SelectEvent {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = SelectEvent.new() if res.isErr(): raiseIOSelectorsError(res.error()) res.get() proc trigger*(event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = event.trigger2() if res.isErr(): raiseIOSelectorsError(res.error()) proc close*(event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = event.close2() if res.isErr(): raiseIOSelectorsError(res.error()) proc registerHandle*[T](s: Selector[T], fd: cint | SocketHandle, events: set[Event], data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerHandle2(s, fd, events, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc updateHandle*[T](s: Selector[T], fd: cint | SocketHandle, events: set[Event]) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = updateHandle2(s, fd, events) if res.isErr(): raiseIOSelectorsError(res.error()) proc unregister*[T](s: Selector[T], fd: cint | SocketHandle) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = unregister2(s, fd) if res.isErr(): raiseIOSelectorsError(res.error()) proc unregister*[T](s: Selector[T], event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = unregister2(s, event) if res.isErr(): raiseIOSelectorsError(res.error()) proc registerTimer*[T](s: Selector[T], timeout: int, oneshot: bool, data: T): cint {. - discardable, raises: [Defect, IOSelectorsException].} = + discardable, raises: [IOSelectorsException].} = let res = registerTimer2(s, timeout, oneshot, data) if res.isErr(): raiseIOSelectorsError(res.error()) res.get() proc registerEvent*[T](s: Selector[T], event: SelectEvent, data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerEvent2(s, event, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc selectInto*[T](s: Selector[T], timeout: int, readyKeys: var openArray[ReadyKey]): int {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = selectInto2(s, timeout, readyKeys) if res.isErr(): raiseIOSelectorsError(res.error()) res.get() diff --git a/chronos/ioselects/ioselectors_kqueue.nim b/chronos/ioselects/ioselectors_kqueue.nim index eadb5cd39..9f0627aa9 100644 --- a/chronos/ioselects/ioselectors_kqueue.nim +++ b/chronos/ioselects/ioselectors_kqueue.nim @@ -8,7 +8,8 @@ # # This module implements BSD kqueue(). -{.push raises: [Defect].} +{.push raises: [].} + import std/[kqueue, deques, tables] import stew/base10 @@ -606,85 +607,85 @@ proc select2*[T](s: Selector[T], ok(res) proc newSelector*[T](): owned(Selector[T]) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = Selector.new(T) if res.isErr(): raiseIOSelectorsError(res.error()) res.get() proc newSelectEvent*(): SelectEvent {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = SelectEvent.new() if res.isErr(): raiseIOSelectorsError(res.error()) res.get() proc trigger*(ev: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = ev.trigger2() if res.isErr(): raiseIOSelectorsError(res.error()) proc close*(ev: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = ev.close2() if res.isErr(): raiseIOSelectorsError(res.error()) proc registerHandle*[T](s: Selector[T], fd: cint | SocketHandle, events: set[Event], data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerHandle2(s, cint(fd), events, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc updateHandle*[T](s: Selector[T], fd: cint | SocketHandle, events: set[Event]) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = updateHandle2(s, cint(fd), events) if res.isErr(): raiseIOSelectorsError(res.error()) proc registerEvent*[T](s: Selector[T], ev: SelectEvent, data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerEvent2(s, ev, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc registerVnode*[T](s: Selector[T], fd: cint, events: set[Event], data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerVnode2(s, fd, events, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc unregister*[T](s: Selector[T], event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = unregister2(s, event) if res.isErr(): raiseIOSelectorsError(res.error()) proc unregister*[T](s: Selector[T], fd: cint|SocketHandle) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = unregister2(s, fd) if res.isErr(): raiseIOSelectorsError(res.error()) proc selectInto*[T](s: Selector[T], timeout: int, results: var openArray[ReadyKey]): int {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = selectInto2(s, timeout, results) if res.isErr(): raiseIOSelectorsError(res.error()) res.get() proc select*[T](s: Selector[T], timeout: int): seq[ReadyKey] {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = select2(s, timeout) if res.isErr(): raiseIOSelectorsError(res.error()) res.get() -proc close*[T](s: Selector[T]) {.raises: [Defect, IOSelectorsException].} = +proc close*[T](s: Selector[T]) {.raises: [IOSelectorsException].} = let res = s.close2() if res.isErr(): raiseIOSelectorsError(res.error()) diff --git a/chronos/ioselects/ioselectors_poll.nim b/chronos/ioselects/ioselectors_poll.nim index 9ff8ad12e..d0d533cd0 100644 --- a/chronos/ioselects/ioselectors_poll.nim +++ b/chronos/ioselects/ioselectors_poll.nim @@ -11,10 +11,7 @@ import std/tables import stew/base10 -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} type SelectorImpl[T] = object @@ -250,63 +247,63 @@ proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] = ok(res) proc newSelector*[T](): Selector[T] {. - raises: [Defect, OSError].} = + raises: [OSError].} = let res = Selector.new(T) if res.isErr(): raiseOSError(res.error) res.get() proc close*[T](s: Selector[T]) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = s.close2() if res.isErr(): raiseIOSelectorsError(res.error()) proc newSelectEvent*(): SelectEvent {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = SelectEvent.new() if res.isErr(): raiseIOSelectorsError(res.error()) res.get() proc trigger*(event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = event.trigger2() if res.isErr(): raiseIOSelectorsError(res.error()) proc close*(event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = event.close2() if res.isErr(): raiseIOSelectorsError(res.error()) proc registerHandle*[T](s: Selector[T], fd: cint | SocketHandle, events: set[Event], data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerHandle2(s, cint(fd), events, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc updateHandle*[T](s: Selector[T], fd: cint | SocketHandle, events: set[Event]) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = updateHandle2(s, cint(fd), events) if res.isErr(): raiseIOSelectorsError(res.error()) proc unregister*[T](s: Selector[T], fd: cint | SocketHandle) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = unregister2(s, cint(fd)) if res.isErr(): raiseIOSelectorsError(res.error()) proc unregister*[T](s: Selector[T], event: SelectEvent) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = unregister2(s, event) if res.isErr(): raiseIOSelectorsError(res.error()) proc registerEvent*[T](s: Selector[T], event: SelectEvent, data: T) {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = registerEvent2(s, event, data) if res.isErr(): raiseIOSelectorsError(res.error()) proc selectInto*[T](s: Selector[T], timeout: int, readyKeys: var openArray[ReadyKey]): int {. - raises: [Defect, IOSelectorsException].} = + raises: [IOSelectorsException].} = let res = selectInto2(s, timeout, readyKeys) if res.isErr(): raiseIOSelectorsError(res.error()) res.get() diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 92773d072..8106fb680 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -308,7 +308,7 @@ when defined(windows): stdcall, gcsafe, raises: [].} PHANDLER_ROUTINE* = proc (dwCtrlType: DWORD): WINBOOL {. - stdcall, gcsafe, raises: [Defect].} + stdcall, gcsafe, raises: [].} OSVERSIONINFO* {.final, pure.} = object dwOSVersionInfoSize*: DWORD diff --git a/chronos/osutils.nim b/chronos/osutils.nim index 2fd3928e3..86505c2b8 100644 --- a/chronos/osutils.nim +++ b/chronos/osutils.nim @@ -11,10 +11,7 @@ import osdefs, oserrno export results -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} when defined(windows) or defined(nimdoc): import stew/base10 diff --git a/chronos/ratelimit.nim b/chronos/ratelimit.nim index caaf659fe..02d80f519 100644 --- a/chronos/ratelimit.nim +++ b/chronos/ratelimit.nim @@ -6,7 +6,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -{.push raises: [Defect].} +{.push raises: [].} import ../chronos import timer diff --git a/chronos/sendfile.nim b/chronos/sendfile.nim index e1e14b2f9..8cba9e83f 100644 --- a/chronos/sendfile.nim +++ b/chronos/sendfile.nim @@ -9,10 +9,7 @@ ## This module provides cross-platform wrapper for ``sendfile()`` syscall. -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} when defined(nimdoc): proc sendfile*(outfd, infd: int, offset: int, count: var int): int = diff --git a/chronos/srcloc.nim b/chronos/srcloc.nim index 6edaefb5e..ac29640cd 100644 --- a/chronos/srcloc.nim +++ b/chronos/srcloc.nim @@ -6,10 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} + +{.push raises: [].} + import stew/base10 type diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 931453d4f..9920fc7cb 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -7,10 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import ../asyncloop, ../asyncsync import ../transports/common, ../transports/stream @@ -67,10 +64,10 @@ type Closed ## Stream was closed StreamReaderLoop* = proc (stream: AsyncStreamReader): Future[void] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} ## Main read loop for read streams. StreamWriterLoop* = proc (stream: AsyncStreamWriter): Future[void] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} ## Main write loop for write streams. AsyncStreamReader* = ref object of RootRef @@ -219,15 +216,15 @@ proc newAsyncStreamUseClosedError*(): ref AsyncStreamUseClosedError {. newException(AsyncStreamUseClosedError, "Stream is already closed") proc raiseAsyncStreamUseClosedError*() {. - noinline, noreturn, raises: [Defect, AsyncStreamUseClosedError].} = + noinline, noreturn, raises: [AsyncStreamUseClosedError].} = raise newAsyncStreamUseClosedError() proc raiseAsyncStreamLimitError*() {. - noinline, noreturn, raises: [Defect, AsyncStreamLimitError].} = + noinline, noreturn, raises: [AsyncStreamLimitError].} = raise newAsyncStreamLimitError() proc raiseAsyncStreamIncompleteError*() {. - noinline, noreturn, raises: [Defect, AsyncStreamIncompleteError].} = + noinline, noreturn, raises: [AsyncStreamIncompleteError].} = raise newAsyncStreamIncompleteError() proc raiseEmptyMessageDefect*() {.noinline, noreturn.} = @@ -235,7 +232,7 @@ proc raiseEmptyMessageDefect*() {.noinline, noreturn.} = "Could not write empty message") proc raiseAsyncStreamWriteEOFError*() {. - noinline, noreturn, raises: [Defect, AsyncStreamWriteEOFError].} = + noinline, noreturn, raises: [AsyncStreamWriteEOFError].} = raise newException(AsyncStreamWriteEOFError, "Stream finished or remote side dropped connection") @@ -336,9 +333,9 @@ template checkStreamFinished*(t: untyped) = if t.atEof(): raiseAsyncStreamWriteEOFError() proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc getAsyncStreamReaderTracker(): AsyncStreamTracker {.inline.} = var res = cast[AsyncStreamTracker](getTracker(AsyncStreamReaderTrackerName)) @@ -974,7 +971,7 @@ proc close*(rw: AsyncStreamRW) = if not(rw.closed()): rw.state = AsyncStreamState.Closing - proc continuation(udata: pointer) {.raises: [Defect].} = + proc continuation(udata: pointer) {.raises: [].} = if not isNil(rw.udata): GC_unref(cast[ref int](rw.udata)) if not(rw.future.finished()): diff --git a/chronos/timer.nim b/chronos/timer.nim index 8e7cb8fa4..29af20e79 100644 --- a/chronos/timer.nim +++ b/chronos/timer.nim @@ -27,10 +27,7 @@ import "."/osdefs const asyncTimer* {.strdefine.} = "mono" -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} when defined(windows): when asyncTimer == "system": diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 8255d2621..cbec5d6f4 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -7,10 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import std/[strutils] import stew/[base10, byteutils] @@ -209,7 +206,7 @@ proc toHex*(address: TransportAddress): string = "None" proc initTAddress*(address: string): TransportAddress {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Parses string representation of ``address``. ``address`` can be IPv4, IPv6 ## or Unix domain address. ## @@ -259,7 +256,7 @@ proc initTAddress*(address: string): TransportAddress {. TransportAddress(family: AddressFamily.Unix) proc initTAddress*(address: string, port: Port): TransportAddress {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Initialize ``TransportAddress`` with IP (IPv4 or IPv6) address ``address`` ## and port number ``port``. let ipaddr = @@ -277,7 +274,7 @@ proc initTAddress*(address: string, port: Port): TransportAddress {. address_v6: ipaddr.address_v6, port: port) proc initTAddress*(address: string, port: int): TransportAddress {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Initialize ``TransportAddress`` with IP (IPv4 or IPv6) address ``address`` ## and port number ``port``. if port < 0 or port > 65535: @@ -298,7 +295,7 @@ proc initTAddress*(address: IpAddress, port: Port): TransportAddress = proc getAddrInfo(address: string, port: Port, domain: Domain, sockType: SockType = SockType.SOCK_STREAM, protocol: Protocol = Protocol.IPPROTO_TCP): ptr AddrInfo {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## We have this one copy of ``getAddrInfo()`` because of AI_V4MAPPED in ## ``net.nim:getAddrInfo()``, which is not cross-platform. var hints: AddrInfo @@ -380,7 +377,7 @@ proc toSAddr*(address: TransportAddress, sa: var Sockaddr_storage, discard proc address*(ta: TransportAddress): IpAddress {. - raises: [Defect, ValueError].} = + raises: [ValueError].} = ## Converts ``TransportAddress`` to ``net.IpAddress`` object. ## ## Note its impossible to convert ``TransportAddress`` of ``Unix`` family, @@ -393,7 +390,7 @@ proc address*(ta: TransportAddress): IpAddress {. else: raise newException(ValueError, "IpAddress supports only IPv4/IPv6!") -proc host*(ta: TransportAddress): string {.raises: [Defect].} = +proc host*(ta: TransportAddress): string {.raises: [].} = ## Returns ``host`` of TransportAddress ``ta``. ## ## For IPv4 and IPv6 addresses it will return IP address as string, or empty @@ -410,7 +407,7 @@ proc host*(ta: TransportAddress): string {.raises: [Defect].} = proc resolveTAddress*(address: string, port: Port, domain: Domain): seq[TransportAddress] {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = var res: seq[TransportAddress] let aiList = getAddrInfo(address, port, domain) var it = aiList @@ -426,7 +423,7 @@ proc resolveTAddress*(address: string, port: Port, res proc resolveTAddress*(address: string, domain: Domain): seq[TransportAddress] {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = let parts = block: let res = address.rsplit(":", maxsplit = 1) @@ -448,7 +445,7 @@ proc resolveTAddress*(address: string, domain: Domain): seq[TransportAddress] {. resolveTAddress(hostname, Port(port), domain) proc resolveTAddress*(address: string): seq[TransportAddress] {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Resolve string representation of ``address``. ## ## Supported formats are: @@ -461,7 +458,7 @@ proc resolveTAddress*(address: string): seq[TransportAddress] {. resolveTAddress(address, Domain.AF_UNSPEC) proc resolveTAddress*(address: string, port: Port): seq[TransportAddress] {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Resolve string representation of ``address``. ## ## Supported formats are: @@ -475,7 +472,7 @@ proc resolveTAddress*(address: string, port: Port): seq[TransportAddress] {. proc resolveTAddress*(address: string, family: AddressFamily): seq[TransportAddress] {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Resolve string representation of ``address``. ## ## Supported formats are: @@ -495,7 +492,7 @@ proc resolveTAddress*(address: string, proc resolveTAddress*(address: string, port: Port, family: AddressFamily): seq[TransportAddress] {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Resolve string representation of ``address``. ## ## ``address`` could be dot IPv4/IPv6 address or hostname. @@ -512,7 +509,7 @@ proc resolveTAddress*(address: string, port: Port, proc resolveTAddress*(address: string, family: IpAddressFamily): seq[TransportAddress] {. - deprecated, raises: [Defect, TransportAddressError].} = + deprecated, raises: [TransportAddressError].} = case family of IpAddressFamily.IPv4: resolveTAddress(address, AddressFamily.IPv4) @@ -521,7 +518,7 @@ proc resolveTAddress*(address: string, proc resolveTAddress*(address: string, port: Port, family: IpAddressFamily): seq[TransportAddress] {. - deprecated, raises: [Defect, TransportAddressError].} = + deprecated, raises: [TransportAddressError].} = case family of IpAddressFamily.IPv4: resolveTAddress(address, port, AddressFamily.IPv4) @@ -586,7 +583,7 @@ template getTransportOsError*(err: cint): ref TransportOsError = getTransportOsError(OSErrorCode(err)) proc raiseTransportOsError*(err: OSErrorCode) {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Raises transport specific OS error. raise getTransportOsError(err) diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 0fa1a0e65..91a7e7a05 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -7,10 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import std/deques when not(defined(windows)): import ".."/selectors2 @@ -30,7 +27,7 @@ type DatagramCallback* = proc(transp: DatagramTransport, remote: TransportAddress): Future[void] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} DatagramTransport* = ref object of RootRef fd*: AsyncFD # File descriptor @@ -64,7 +61,7 @@ const DgramTransportTrackerName* = "datagram.transport" proc remoteAddress*(transp: DatagramTransport): TransportAddress {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Returns ``transp`` remote socket address. if transp.remote.family == AddressFamily.None: var saddr: Sockaddr_storage @@ -76,7 +73,7 @@ proc remoteAddress*(transp: DatagramTransport): TransportAddress {. transp.remote proc localAddress*(transp: DatagramTransport): TransportAddress {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Returns ``transp`` local socket address. if transp.local.family == AddressFamily.None: var saddr: Sockaddr_storage @@ -92,7 +89,7 @@ template setReadError(t, e: untyped) = (t).error = getTransportOsError(e) proc setupDgramTransportTracker(): DgramTransportTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc getDgramTransportTracker(): DgramTransportTracker {.inline.} = var res = cast[DgramTransportTracker](getTracker(DgramTransportTrackerName)) @@ -288,7 +285,7 @@ when defined(windows): child: DatagramTransport, bufferSize: int, ttl: int): DatagramTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = var localSock: AsyncFD doAssert(remote.family == local.family) doAssert(not isNil(cbproc)) @@ -406,7 +403,7 @@ when defined(windows): else: # Linux/BSD/MacOS part - proc readDatagramLoop(udata: pointer) {.raises: Defect.}= + proc readDatagramLoop(udata: pointer) {.raises: [].}= var raddr: TransportAddress doAssert(not isNil(udata)) let transp = cast[DatagramTransport](udata) @@ -498,7 +495,7 @@ else: child: DatagramTransport, bufferSize: int, ttl: int): DatagramTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = var localSock: AsyncFD doAssert(remote.family == local.family) doAssert(not isNil(cbproc)) @@ -603,7 +600,7 @@ else: proc close*(transp: DatagramTransport) = ## Closes and frees resources of transport ``transp``. - proc continuation(udata: pointer) {.raises: Defect.} = + proc continuation(udata: pointer) {.raises: [].} = if not(transp.future.finished()): # Stop tracking transport untrackDgram(transp) @@ -636,7 +633,7 @@ proc newDatagramTransport*(cbproc: DatagramCallback, bufSize: int = DefaultDatagramBufferSize, ttl: int = 0 ): DatagramTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Create new UDP datagram transport (IPv4). ## ## ``cbproc`` - callback which will be called, when new datagram received. @@ -662,7 +659,7 @@ proc newDatagramTransport*[T](cbproc: DatagramCallback, bufSize: int = DefaultDatagramBufferSize, ttl: int = 0 ): DatagramTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = var fflags = flags + {GCUserData} GC_ref(udata) newDatagramTransportCommon(cbproc, remote, local, sock, fflags, @@ -678,7 +675,7 @@ proc newDatagramTransport6*(cbproc: DatagramCallback, bufSize: int = DefaultDatagramBufferSize, ttl: int = 0 ): DatagramTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Create new UDP datagram transport (IPv6). ## ## ``cbproc`` - callback which will be called, when new datagram received. @@ -704,7 +701,7 @@ proc newDatagramTransport6*[T](cbproc: DatagramCallback, bufSize: int = DefaultDatagramBufferSize, ttl: int = 0 ): DatagramTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = var fflags = flags + {GCUserData} GC_ref(udata) newDatagramTransportCommon(cbproc, remote, local, sock, fflags, @@ -865,7 +862,7 @@ proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, return retFuture proc peekMessage*(transp: DatagramTransport, msg: var seq[byte], - msglen: var int) {.raises: [Defect, CatchableError].} = + msglen: var int) {.raises: [CatchableError].} = ## Get access to internal message buffer and length of incoming datagram. if ReadError in transp.state: transp.state.excl(ReadError) @@ -877,7 +874,7 @@ proc peekMessage*(transp: DatagramTransport, msg: var seq[byte], msglen = transp.buflen proc getMessage*(transp: DatagramTransport): seq[byte] {. - raises: [Defect, CatchableError].} = + raises: [CatchableError].} = ## Copy data from internal message buffer and return result. var default: seq[byte] if ReadError in transp.state: diff --git a/chronos/transports/ipnet.nim b/chronos/transports/ipnet.nim index 130e8fa00..3e6a20130 100644 --- a/chronos/transports/ipnet.nim +++ b/chronos/transports/ipnet.nim @@ -9,10 +9,7 @@ ## This module implements various IP network utility procedures. -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import std/strutils import stew/endians2 @@ -352,7 +349,7 @@ proc `$`*(mask: IpMask, include0x = false): string = else: "Unknown mask family: " & $mask.family -proc ip*(mask: IpMask): string {.raises: [Defect, ValueError].} = +proc ip*(mask: IpMask): string {.raises: [ValueError].} = ## Returns IP address text representation of IP mask ``mask``. case mask.family of AddressFamily.IPv4: @@ -387,7 +384,7 @@ proc init*(t: typedesc[IpNet], host: TransportAddress, IpNet(mask: mask, host: host) proc init*(t: typedesc[IpNet], network: string): IpNet {. - raises: [Defect, TransportAddressError].} = + raises: [TransportAddressError].} = ## Initialize IP Network from string representation in format ##
/ or
/. var parts = network.rsplit("/", maxsplit = 1) diff --git a/chronos/transports/osnet.nim b/chronos/transports/osnet.nim index 973a2dd89..21adb656e 100644 --- a/chronos/transports/osnet.nim +++ b/chronos/transports/osnet.nim @@ -10,10 +10,7 @@ ## This module implements cross-platform network interfaces list. ## Currently supported OSes are Windows, Linux, MacOS, BSD(not tested). -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import std/algorithm import ".."/osdefs @@ -753,7 +750,7 @@ when defined(linux): if endflag: break - proc getInterfaces*(): seq[NetworkInterface] {.raises: [Defect].} = + proc getInterfaces*(): seq[NetworkInterface] {.raises: [].} = ## Return list of available interfaces. var res: seq[NetworkInterface] var pid = osdefs.getpid() @@ -767,7 +764,7 @@ when defined(linux): discard osdefs.close(sock) res - proc getBestRoute*(address: TransportAddress): Route {.raises: [Defect].} = + proc getBestRoute*(address: TransportAddress): Route {.raises: [].} = ## Return best applicable OS route, which will be used for connecting to ## address ``address``. var pid = osdefs.getpid() @@ -795,7 +792,7 @@ elif defined(macosx) or defined(macos) or defined(bsd): else: StatusDown - proc getInterfaces*(): seq[NetworkInterface] {.raises: [Defect].} = + proc getInterfaces*(): seq[NetworkInterface] {.raises: [].} = ## Return list of available interfaces. var res: seq[NetworkInterface] var ifap: ptr IfAddrs @@ -867,7 +864,7 @@ elif defined(macosx) or defined(macos) or defined(bsd): else: 0 - proc getBestRoute*(address: TransportAddress): Route {.raises: [Defect].} = + proc getBestRoute*(address: TransportAddress): Route {.raises: [].} = ## Return best applicable OS route, which will be used for connecting to ## address ``address``. var sock: cint @@ -1034,7 +1031,7 @@ elif defined(windows): res.net = IpNet.init(res.host, prefixLength) res - proc getInterfaces*(): seq[NetworkInterface] {.raises: [Defect].} = + proc getInterfaces*(): seq[NetworkInterface] {.raises: [].} = ## Return list of network interfaces. var res = newSeq[NetworkInterface]() var size = WorkBufferSize @@ -1086,7 +1083,7 @@ elif defined(windows): sort(res, cmp) res - proc getBestRoute*(address: TransportAddress): Route {.raises: [Defect].} = + proc getBestRoute*(address: TransportAddress): Route {.raises: [].} = ## Return best applicable OS route, which will be used for connecting to ## address ``address``. var res = Route() diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 260d21eed..18d6a506d 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -7,10 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -when (NimMajor, NimMinor) < (1, 4): - {.push raises: [Defect].} -else: - {.push raises: [].} +{.push raises: [].} import std/deques import ".."/[asyncloop, handles, osdefs, osutils, oserrno] @@ -68,7 +65,7 @@ type ReadMessagePredicate* = proc (data: openArray[byte]): tuple[consumed: int, done: bool] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} const StreamTransportTrackerName* = "stream.transport" @@ -125,14 +122,14 @@ else: type StreamCallback* = proc(server: StreamServer, client: StreamTransport): Future[void] {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} ## New remote client connection callback ## ``server`` - StreamServer object. ## ``client`` - accepted client transport. TransportInitCallback* = proc(server: StreamServer, fd: AsyncFD): StreamTransport {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} ## Custom transport initialization procedure, which can allocate inherited ## StreamTransport object. @@ -144,7 +141,7 @@ type # transport for new client proc remoteAddress*(transp: StreamTransport): TransportAddress {. - raises: [Defect, TransportError].} = + raises: [TransportError].} = ## Returns ``transp`` remote socket address. if transp.kind != TransportKind.Socket: raise newException(TransportError, "Socket required!") @@ -158,7 +155,7 @@ proc remoteAddress*(transp: StreamTransport): TransportAddress {. transp.remote proc localAddress*(transp: StreamTransport): TransportAddress {. - raises: [Defect, TransportError].} = + raises: [TransportError].} = ## Returns ``transp`` local socket address. if transp.kind != TransportKind.Socket: raise newException(TransportError, "Socket required!") @@ -205,9 +202,9 @@ template shiftVectorFile(v: var StreamVector, o: untyped) = (v).offset += uint(o) proc setupStreamTransportTracker(): StreamTransportTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc setupStreamServerTracker(): StreamServerTracker {. - gcsafe, raises: [Defect].} + gcsafe, raises: [].} proc getStreamTransportTracker(): StreamTransportTracker {.inline.} = var res = cast[StreamTransportTracker](getTracker(StreamTransportTrackerName)) @@ -824,9 +821,9 @@ when defined(windows): # For some reason Nim compiler does not detect `pipeHandle` usage in # pipeContinuation() procedure, so we marking it as {.used.} here. var pipeHandle {.used.} = INVALID_HANDLE_VALUE - var pipeContinuation: proc (udata: pointer) {.gcsafe, raises: [Defect].} + var pipeContinuation: proc (udata: pointer) {.gcsafe, raises: [].} - pipeContinuation = proc (udata: pointer) {.gcsafe, raises: [Defect].} = + pipeContinuation = proc (udata: pointer) {.gcsafe, raises: [].} = # Continue only if `retFuture` is not cancelled. if not(retFuture.finished()): let @@ -1830,12 +1827,12 @@ proc stop2*(server: StreamServer): Result[void, OSErrorCode] = server.status = ServerStatus.Stopped ok() -proc start*(server: StreamServer) {.raises: [Defect, TransportOsError].} = +proc start*(server: StreamServer) {.raises: [TransportOsError].} = ## Starts ``server``. let res = start2(server) if res.isErr(): raiseTransportOsError(res.error()) -proc stop*(server: StreamServer) {.raises: [Defect, TransportOsError].} = +proc stop*(server: StreamServer) {.raises: [TransportOsError].} = ## Stops ``server``. let res = stop2(server) if res.isErr(): raiseTransportOsError(res.error()) @@ -1907,7 +1904,7 @@ proc createStreamServer*(host: TransportAddress, child: StreamServer = nil, init: TransportInitCallback = nil, udata: pointer = nil): StreamServer {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Create new TCP stream server. ## ## ``host`` - address to which server will be bound. @@ -2115,7 +2112,7 @@ proc createStreamServer*(host: TransportAddress, child: StreamServer = nil, init: TransportInitCallback = nil, udata: pointer = nil): StreamServer {. - raises: [Defect, CatchableError].} = + raises: [CatchableError].} = createStreamServer(host, nil, flags, sock, backlog, bufferSize, child, init, cast[pointer](udata)) @@ -2128,7 +2125,7 @@ proc createStreamServer*[T](host: TransportAddress, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil): StreamServer {. - raises: [Defect, CatchableError].} = + raises: [CatchableError].} = var fflags = flags + {GCUserData} GC_ref(udata) createStreamServer(host, cbproc, fflags, sock, backlog, bufferSize, @@ -2142,7 +2139,7 @@ proc createStreamServer*[T](host: TransportAddress, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil): StreamServer {. - raises: [Defect, CatchableError].} = + raises: [CatchableError].} = var fflags = flags + {GCUserData} GC_ref(udata) createStreamServer(host, nil, fflags, sock, backlog, bufferSize, @@ -2681,7 +2678,7 @@ proc fromPipe2*(fd: AsyncFD, child: StreamTransport = nil, proc fromPipe*(fd: AsyncFD, child: StreamTransport = nil, bufferSize = DefaultStreamBufferSize): StreamTransport {. - raises: [Defect, TransportOsError].} = + raises: [TransportOsError].} = ## Create new transport object using pipe's file descriptor. ## ## ``bufferSize`` is size of internal buffer for transport. diff --git a/tests/testbugs.nim b/tests/testbugs.nim index ba1e6df24..cf18a13c9 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -119,7 +119,7 @@ suite "Asynchronous issues test suite": check bytesSent == messageSize var rfut {.used.} = inpTransp.readExactly(addr buffer[0], messageSize) - proc waiterProc(udata: pointer) {.raises: [Defect], gcsafe.} = + proc waiterProc(udata: pointer) {.raises: [], gcsafe.} = try: waitFor(sleepAsync(0.milliseconds)) except CatchableError: diff --git a/tests/testsoon.nim b/tests/testsoon.nim index 69bffd41a..88072c267 100644 --- a/tests/testsoon.nim +++ b/tests/testsoon.nim @@ -46,8 +46,8 @@ suite "callSoon() tests suite": await sleepAsync(100.milliseconds) timeoutsTest1 += 1 - var callbackproc: proc(udata: pointer) {.gcsafe, raises: [Defect].} - callbackproc = proc (udata: pointer) {.gcsafe, raises: [Defect].} = + var callbackproc: proc(udata: pointer) {.gcsafe, raises: [].} + callbackproc = proc (udata: pointer) {.gcsafe, raises: [].} = timeoutsTest2 += 1 {.gcsafe.}: callSoon(callbackproc) From 0035f4fa6692e85756aa192b4df84c21d3cacacb Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 7 Jun 2023 20:04:07 +0200 Subject: [PATCH 035/146] Introduce `chronos/futures` (#405) * move `Future[T]` into its own module along with some basic accessors * mark all fields internal, exposing only read-only versions under the old names * introduce `init`/`completed`/etc as a way of creating a future (vs newFuture) * introduce `LocationKind` for `SrcLoc` access * don't expose `FutureList` unless future tracking is enabled * introduce `chronosStrictFutureAccess` which controls a number of additional `Defect` being raised when accessing Future fields in the wrong state - this will become true in a future version In this version, `Future[T]` backwards compatibility code remains in `asyncfutures2` meaning that if only `chronos/futures` is imported, only "new" API is available. This branch is a refinement / less invasive / minimal version of https://github.com/status-im/nim-chronos/pull/373. --- chronos/asyncfutures2.nim | 209 +++++++++++------------------------ chronos/asyncloop.nim | 10 +- chronos/asyncmacro2.nim | 22 ++-- chronos/config.nim | 4 + chronos/debugutils.nim | 2 +- chronos/futures.nim | 221 ++++++++++++++++++++++++++++++++++++++ tests/testall.nim | 2 +- tests/testfutures.nim | 26 +++++ 8 files changed, 331 insertions(+), 165 deletions(-) create mode 100644 chronos/futures.nim create mode 100644 tests/testfutures.nim diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 5438e4fcc..d170f0825 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -10,8 +10,6 @@ import std/sequtils import stew/base10 -import "."/srcloc -export srcloc when chronosStackTrace: when defined(nimHasStacktracesModule): @@ -21,49 +19,20 @@ when chronosStackTrace: reraisedFromBegin = -10 reraisedFromEnd = -100 - type StackTrace = string +template LocCreateIndex*: auto {.deprecated: "LocationKind.Create".} = + LocationKind.Create +template LocFinishIndex*: auto {.deprecated: "LocationKind.Finish".} = + LocationKind.Finish +template LocCompleteIndex*: untyped {.deprecated: "LocationKind.Finish".} = + LocationKind.Finish -const - LocCreateIndex* = 0 - LocFinishIndex* = 1 - -template LocCompleteIndex*: untyped {.deprecated: "LocFinishIndex".} = - LocFinishIndex - -when chronosStrictException: - {.pragma: closureIter, raises: [CatchableError], gcsafe.} -else: - {.pragma: closureIter, raises: [Exception], gcsafe.} +func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {.deprecated: "use LocationKind".} = + case v + of 0: loc[LocationKind.Create] + of 1: loc[LocationKind.Finish] + else: raiseAssert("Unknown source location " & $v) type - FutureState* {.pure.} = enum - Pending, Completed, Cancelled, Failed - - FutureBase* = ref object of RootObj ## Untyped future. - location*: array[2, ptr SrcLoc] - callbacks: seq[AsyncCallback] - cancelcb*: CallbackFunc - child*: FutureBase - state*: FutureState - error*: ref CatchableError ## Stored exception - mustCancel*: bool - closure*: iterator(f: FutureBase): FutureBase {.closureIter.} - - when chronosFutureId: - id*: uint - - when chronosStackTrace: - errorStackTrace*: StackTrace - stackTrace: StackTrace ## For debugging purposes only. - - when chronosFutureTracking: - next*: FutureBase - prev*: FutureBase - - Future*[T] = ref object of FutureBase ## Typed future. - when T isnot void: - value*: T ## Stored value - FutureStr*[T] = ref object of Future[T] ## Future to hold GC strings gcholder*: string @@ -72,59 +41,24 @@ type ## Future to hold GC seqs gcholder*: seq[B] - FutureDefect* = object of Defect - cause*: FutureBase - - FutureError* = object of CatchableError - - CancelledError* = object of FutureError - - FutureList* = object - head*: FutureBase - tail*: FutureBase - count*: uint - # Backwards compatibility for old FutureState name template Finished* {.deprecated: "Use Completed instead".} = Completed template Finished*(T: type FutureState): FutureState {.deprecated: "Use FutureState.Completed instead".} = FutureState.Completed -when chronosFutureId: - var currentID* {.threadvar.}: uint -else: - template id*(f: FutureBase): uint = - cast[uint](addr f[]) - -when chronosFutureTracking: - var futureList* {.threadvar.}: FutureList - -template setupFutureBase(loc: ptr SrcLoc) = - new(result) - result.state = FutureState.Pending - when chronosStackTrace: - result.stackTrace = getStackTrace() - when chronosFutureId: - currentID.inc() - result.id = currentID - result.location[LocCreateIndex] = loc - - when chronosFutureTracking: - result.next = nil - result.prev = futureList.tail - if not(isNil(futureList.tail)): - futureList.tail.next = result - futureList.tail = result - if isNil(futureList.head): - futureList.head = result - futureList.count.inc() - proc newFutureImpl[T](loc: ptr SrcLoc): Future[T] = - setupFutureBase(loc) + let fut = Future[T]() + internalInitFutureBase(fut, loc, FutureState.Pending) + fut proc newFutureSeqImpl[A, B](loc: ptr SrcLoc): FutureSeq[A, B] = - setupFutureBase(loc) + let fut = FutureSeq[A, B]() + internalInitFutureBase(fut, loc, FutureState.Pending) + fut proc newFutureStrImpl[T](loc: ptr SrcLoc): FutureStr[T] = - setupFutureBase(loc) + let fut = FutureStr[T]() + internalInitFutureBase(fut, loc, FutureState.Pending) + fut template newFuture*[T](fromProc: static[string] = ""): Future[T] = ## Creates a new future. @@ -149,24 +83,6 @@ template newFutureStr*[T](fromProc: static[string] = ""): FutureStr[T] = ## that this future belongs to, is a good habit as it helps with debugging. newFutureStrImpl[T](getSrcLocation(fromProc)) -proc finished*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` has finished, i.e. ``future`` state changed - ## from state ``Pending`` to one of the states (``Finished``, ``Cancelled``, - ## ``Failed``). - (future.state != FutureState.Pending) - -proc cancelled*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` has cancelled. - (future.state == FutureState.Cancelled) - -proc failed*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` finished with an error. - (future.state == FutureState.Failed) - -proc completed*(future: FutureBase): bool {.inline.} = - ## Determines whether ``future`` finished with a value. - (future.state == FutureState.Completed) - proc done*(future: FutureBase): bool {.deprecated: "Use `completed` instead".} = ## This is an alias for ``completed(future)`` procedure. completed(future) @@ -178,8 +94,8 @@ when chronosFutureTracking: let future = cast[FutureBase](udata) if future == futureList.tail: futureList.tail = future.prev if future == futureList.head: futureList.head = future.next - if not(isNil(future.next)): future.next.prev = future.prev - if not(isNil(future.prev)): future.prev.next = future.next + if not(isNil(future.next)): future.next.internalPrev = future.prev + if not(isNil(future.prev)): future.prev.internalNext = future.next futureList.count.dec() proc scheduleDestructor(future: FutureBase) {.inline.} = @@ -194,9 +110,9 @@ proc checkFinished(future: FutureBase, loc: ptr SrcLoc) = msg.add("Details:") msg.add("\n Future ID: " & Base10.toString(future.id)) msg.add("\n Creation location:") - msg.add("\n " & $future.location[LocCreateIndex]) + msg.add("\n " & $future.location[LocationKind.Create]) msg.add("\n First completion location:") - msg.add("\n " & $future.location[LocFinishIndex]) + msg.add("\n " & $future.location[LocationKind.Finish]) msg.add("\n Second completion location:") msg.add("\n " & $loc) when chronosStackTrace: @@ -209,20 +125,21 @@ proc checkFinished(future: FutureBase, loc: ptr SrcLoc) = err.cause = future raise err else: - future.location[LocFinishIndex] = loc + future.internalLocation[LocationKind.Finish] = loc proc finish(fut: FutureBase, state: FutureState) = # We do not perform any checks here, because: # 1. `finish()` is a private procedure and `state` is under our control. # 2. `fut.state` is checked by `checkFinished()`. - fut.state = state - doAssert fut.cancelcb == nil or state != FutureState.Cancelled - fut.cancelcb = nil # release cancellation callback memory - for item in fut.callbacks.mitems(): + fut.internalState = state + when chronosStrictFutureAccess: + doAssert fut.internalCancelcb == nil or state != FutureState.Cancelled + fut.internalCancelcb = nil # release cancellation callback memory + for item in fut.internalCallbacks.mitems(): if not(isNil(item.function)): callSoon(item) item = default(AsyncCallback) # release memory as early as possible - fut.callbacks = default(seq[AsyncCallback]) # release seq as well + fut.internalCallbacks = default(seq[AsyncCallback]) # release seq as well when chronosFutureTracking: scheduleDestructor(fut) @@ -230,8 +147,8 @@ proc finish(fut: FutureBase, state: FutureState) = proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) - doAssert(isNil(future.error)) - future.value = val + doAssert(isNil(future.internalError)) + future.internalValue = val future.finish(FutureState.Completed) template complete*[T](future: Future[T], val: T) = @@ -241,7 +158,7 @@ template complete*[T](future: Future[T], val: T) = proc complete(future: Future[void], loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) - doAssert(isNil(future.error)) + doAssert(isNil(future.internalError)) future.finish(FutureState.Completed) template complete*(future: Future[void]) = @@ -251,9 +168,9 @@ template complete*(future: Future[void]) = proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) - future.error = error + future.internalError = error when chronosStackTrace: - future.errorStackTrace = if getStackTrace(error) == "": + future.internalErrorStackTrace = if getStackTrace(error) == "": getStackTrace() else: getStackTrace(error) @@ -269,9 +186,9 @@ template newCancelledError(): ref CancelledError = proc cancelAndSchedule(future: FutureBase, loc: ptr SrcLoc) = if not(future.finished()): checkFinished(future, loc) - future.error = newCancelledError() + future.internalError = newCancelledError() when chronosStackTrace: - future.errorStackTrace = getStackTrace() + future.internalErrorStackTrace = getStackTrace() future.finish(FutureState.Cancelled) template cancelAndSchedule*(future: FutureBase) = @@ -295,22 +212,23 @@ proc cancel(future: FutureBase, loc: ptr SrcLoc): bool = if future.finished(): return false - if not(isNil(future.child)): + if not(isNil(future.internalChild)): # If you hit this assertion, you should have used the `CancelledError` # mechanism and/or use a regular `addCallback` - doAssert future.cancelcb.isNil, - "futures returned from `{.async.}` functions must not use `cancelCallback`" + when chronosStrictFutureAccess: + doAssert future.internalCancelcb.isNil, + "futures returned from `{.async.}` functions must not use `cancelCallback`" - if cancel(future.child, getSrcLocation()): + if cancel(future.internalChild, getSrcLocation()): return true else: - if not(isNil(future.cancelcb)): - future.cancelcb(cast[pointer](future)) - future.cancelcb = nil + if not(isNil(future.internalCancelcb)): + future.internalCancelcb(cast[pointer](future)) + future.internalCancelcb = nil cancelAndSchedule(future, getSrcLocation()) - future.mustCancel = true + future.internalMustCancel = true return true template cancel*(future: FutureBase) = @@ -318,7 +236,7 @@ template cancel*(future: FutureBase) = discard cancel(future, getSrcLocation()) proc clearCallbacks(future: FutureBase) = - future.callbacks = default(seq[AsyncCallback]) + future.internalCallbacks = default(seq[AsyncCallback]) proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer) = ## Adds the callbacks proc to be called when the future completes. @@ -328,7 +246,7 @@ proc addCallback*(future: FutureBase, cb: CallbackFunc, udata: pointer) = if future.finished(): callSoon(cb, udata) else: - future.callbacks.add AsyncCallback(function: cb, udata: udata) + future.internalCallbacks.add AsyncCallback(function: cb, udata: udata) proc addCallback*(future: FutureBase, cb: CallbackFunc) = ## Adds the callbacks proc to be called when the future completes. @@ -343,7 +261,7 @@ proc removeCallback*(future: FutureBase, cb: CallbackFunc, doAssert(not isNil(cb)) # Make sure to release memory associated with callback, or reference chains # may be created! - future.callbacks.keepItIf: + future.internalCallbacks.keepItIf: it.function != cb or it.udata != udata proc removeCallback*(future: FutureBase, cb: CallbackFunc) = @@ -372,9 +290,10 @@ proc `cancelCallback=`*(future: FutureBase, cb: CallbackFunc) = ## This callback will be called immediately as ``future.cancel()`` invoked and ## must be set before future is finished. - doAssert not future.finished(), - "cancellation callback must be set before finishing the future" - future.cancelcb = cb + when chronosStrictFutureAccess: + doAssert not future.finished(), + "cancellation callback must be set before finishing the future" + future.internalCancelcb = cb {.push stackTrace: off.} proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} @@ -396,12 +315,12 @@ proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} = while true: # Call closure to make progress on `fut` until it reaches `yield` (inside # `await` typically) or completes / fails / is cancelled - next = fut.closure(fut) - if fut.closure.finished(): # Reached the end of the transformed proc + next = fut.internalClosure(fut) + if fut.internalClosure.finished(): # Reached the end of the transformed proc break if next == nil: - raiseAssert "Async procedure (" & ($fut.location[LocCreateIndex]) & + raiseAssert "Async procedure (" & ($fut.location[LocationKind.Create]) & ") yielded `nil`, are you await'ing a `nil` Future?" if not next.finished(): @@ -441,8 +360,8 @@ proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} = # `futureContinue` will not be called any more for this future so we can # clean it up - fut.closure = nil - fut.child = nil + fut.internalClosure = nil + fut.internalChild = nil {.pop.} @@ -527,15 +446,15 @@ when chronosStackTrace: proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} = # For internal use only. Used in asyncmacro - if not(isNil(fut.error)): + if not(isNil(fut.internalError)): when chronosStackTrace: - injectStacktrace(fut.error) - raise fut.error + injectStacktrace(fut.internalError) + raise fut.internalError proc internalRead*[T](fut: Future[T]): T {.inline.} = # For internal use only. Used in asyncmacro when T isnot void: - return fut.value + return fut.internalValue proc read*[T](future: Future[T] ): T {.raises: [CatchableError].} = ## Retrieves the value of ``future``. Future must be finished otherwise @@ -561,7 +480,7 @@ proc readError*(future: FutureBase): ref CatchableError {.raises: [ValueError].} raise newException(ValueError, "No error in future.") template taskFutureLocation(future: FutureBase): string = - let loc = future.location[0] + let loc = future.location[LocationKind.Create] "[" & ( if len(loc.procedure) == 0: "[unspecified]" else: $loc.procedure & "()" ) & " at " & $loc.file & ":" & $(loc.line) & "]" diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index a4359516e..774391621 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -13,10 +13,10 @@ from nativesockets import Port import std/[tables, strutils, heapqueue, deques] import stew/results -import "."/[config, osdefs, oserrno, osutils, timer] +import "."/[config, futures, osdefs, oserrno, osutils, timer] export Port -export timer, results +export futures, timer, results #{.injectStmt: newGcInvariant().} @@ -155,11 +155,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or export oserrno type - CallbackFunc* = proc (arg: pointer) {.gcsafe, raises: [].} - - AsyncCallback* = object - function*: CallbackFunc - udata*: pointer + AsyncCallback = InternalAsyncCallback AsyncError* = object of CatchableError ## Generic async exception diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index bcad60686..429e287ca 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -242,10 +242,10 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = newLit(prcName)) ) ) - # -> resultFuture.closure = iterator + # -> resultFuture.internalClosure = iterator outerProcBody.add( newAssignment( - newDotExpr(retFutureSym, newIdentNode("closure")), + newDotExpr(retFutureSym, newIdentNode("internalClosure")), iteratorNameSym) ) @@ -280,30 +280,30 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = template await*[T](f: Future[T]): untyped = when declared(chronosInternalRetFuture): - chronosInternalRetFuture.child = f + chronosInternalRetFuture.internalChild = f # `futureContinue` calls the iterator generated by the `async` # transformation - `yield` gives control back to `futureContinue` which is # responsible for resuming execution once the yielded future is finished - yield chronosInternalRetFuture.child + yield chronosInternalRetFuture.internalChild # `child` is guaranteed to have been `finished` after the yield - if chronosInternalRetFuture.mustCancel: + if chronosInternalRetFuture.internalMustCancel: raise newCancelledError() # `child` released by `futureContinue` - chronosInternalRetFuture.child.internalCheckComplete() + chronosInternalRetFuture.internalChild.internalCheckComplete() when T isnot void: - cast[type(f)](chronosInternalRetFuture.child).internalRead() + cast[type(f)](chronosInternalRetFuture.internalChild).internalRead() else: unsupported "await is only available within {.async.}" template awaitne*[T](f: Future[T]): Future[T] = when declared(chronosInternalRetFuture): - chronosInternalRetFuture.child = f - yield chronosInternalRetFuture.child - if chronosInternalRetFuture.mustCancel: + chronosInternalRetFuture.internalChild = f + yield chronosInternalRetFuture.internalChild + if chronosInternalRetFuture.internalMustCancel: raise newCancelledError() - cast[type(f)](chronosInternalRetFuture.child) + cast[type(f)](chronosInternalRetFuture.internalChild) else: unsupported "awaitne is only available within {.async.}" diff --git a/chronos/config.nim b/chronos/config.nim index cef8a63dc..0a439a12a 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -19,6 +19,8 @@ when (NimMajor, NimMinor) >= (1, 4): ## used from within `async` code may need to be be explicitly annotated ## with `raises: [CatchableError]` when this mode is enabled. + chronosStrictFutureAccess* {.booldefine.}: bool = defined(chronosPreviewV4) + chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug) ## Include stack traces in futures for creation and completion points @@ -52,6 +54,8 @@ else: const chronosStrictException*: bool = defined(chronosPreviewV4) or defined(chronosStrictException) + chronosStrictFutureAccess*: bool = + defined(chronosPreviewV4) or defined(chronosStrictFutureAccess) chronosStackTrace*: bool = defined(chronosDebug) or defined(chronosStackTrace) chronosFutureId*: bool = defined(chronosDebug) or defined(chronosFutureId) chronosFutureTracking*: bool = diff --git a/chronos/debugutils.nim b/chronos/debugutils.nim index 0bf7e3ef8..e26eeeaf7 100644 --- a/chronos/debugutils.nim +++ b/chronos/debugutils.nim @@ -37,7 +37,7 @@ proc dumpPendingFutures*(filter = AllFutureStates): string = for item in pendingFutures(): if item.state in filter: inc(count) - let loc = item.location[LocCreateIndex][] + let loc = item.location[LocationKind.Create][] let procedure = $loc.procedure let filename = $loc.file let procname = if len(procedure) == 0: diff --git a/chronos/futures.nim b/chronos/futures.nim new file mode 100644 index 000000000..edfae328b --- /dev/null +++ b/chronos/futures.nim @@ -0,0 +1,221 @@ +# +# Chronos +# +# (c) Copyright 2015 Dominik Picheta +# (c) Copyright 2018-2023 Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.push raises: [].} + +import "."/[config, srcloc] + +export srcloc + +when chronosStackTrace: + type StackTrace = string + +when chronosStrictException: + {.pragma: closureIter, raises: [CatchableError], gcsafe.} +else: + {.pragma: closureIter, raises: [Exception], gcsafe.} + +type + LocationKind* {.pure.} = enum + Create + Finish + + CallbackFunc* = proc (arg: pointer) {.gcsafe, raises: [].} + + # Internal type, not part of API + InternalAsyncCallback* = object + function*: CallbackFunc + udata*: pointer + + FutureState* {.pure.} = enum + Pending, Completed, Cancelled, Failed + + InternalFutureBase* = object of RootObj + # Internal untyped future representation - the fields are not part of the + # public API and neither is `InternalFutureBase`, ie the inheritance + # structure may change in the future (haha) + + internalLocation*: array[LocationKind, ptr SrcLoc] + internalCallbacks*: seq[InternalAsyncCallback] + internalCancelcb*: CallbackFunc + internalChild*: FutureBase + internalState*: FutureState + internalError*: ref CatchableError ## Stored exception + internalMustCancel*: bool + internalClosure*: iterator(f: FutureBase): FutureBase {.closureIter.} + + when chronosFutureId: + internalId*: uint + + when chronosStackTrace: + internalErrorStackTrace*: StackTrace + internalStackTrace*: StackTrace ## For debugging purposes only. + + when chronosFutureTracking: + internalNext*: FutureBase + internalPrev*: FutureBase + + FutureBase* = ref object of InternalFutureBase + ## Untyped Future + + Future*[T] = ref object of FutureBase ## Typed future. + when T isnot void: + internalValue*: T ## Stored value + + FutureDefect* = object of Defect + cause*: FutureBase + + FutureError* = object of CatchableError + + CancelledError* = object of FutureError + ## Exception raised when accessing the value of a cancelled future + +when chronosFutureId: + var currentID* {.threadvar.}: uint + template id*(fut: FutureBase): uint = fut.internalId +else: + template id*(fut: FutureBase): uint = + cast[uint](addr fut[]) + +when chronosFutureTracking: + type + FutureList* = object + head*: FutureBase + tail*: FutureBase + count*: uint + + var futureList* {.threadvar.}: FutureList + +# Internal utilities - these are not part of the stable API +proc internalInitFutureBase*( + fut: FutureBase, + loc: ptr SrcLoc, + state: FutureState) = + fut.internalState = state + fut.internalLocation[LocationKind.Create] = loc + if state != FutureState.Pending: + fut.internalLocation[LocationKind.Finish] = loc + + when chronosFutureId: + currentID.inc() + fut.internalId = currentID + + when chronosStackTrace: + fut.internalStackTrace = getStackTrace() + + when chronosFutureTracking: + if state == FutureState.Pending: + fut.internalNext = nil + fut.internalPrev = futureList.tail + if not(isNil(futureList.tail)): + futureList.tail.internalNext = fut + futureList.tail = fut + if isNil(futureList.head): + futureList.head = fut + futureList.count.inc() + +# Public API +template init*[T](F: type Future[T], fromProc: static[string] = ""): Future[T] = + ## Creates a new pending future. + ## + ## Specifying ``fromProc``, which is a string specifying the name of the proc + ## that this future belongs to, is a good habit as it helps with debugging. + let res = Future[T]() + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending) + res + +template completed*( + F: type Future, fromProc: static[string] = ""): Future[void] = + ## Create a new completed future + let res = Future[T]() + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed) + res + +template completed*[T: not void]( + F: type Future, valueParam: T, fromProc: static[string] = ""): Future[T] = + ## Create a new completed future + let res = Future[T](internalValue: valueParam) + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed) + res + +template failed*[T]( + F: type Future[T], errorParam: ref CatchableError, + fromProc: static[string] = ""): Future[T] = + ## Create a new failed future + let res = Future[T](internalError: errorParam) + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Failed) + when chronosStackTrace: + res.internalErrorStackTrace = + if getStackTrace(res.error) == "": + getStackTrace() + else: + getStackTrace(res.error) + + res + +func state*(future: FutureBase): FutureState = + future.internalState + +func finished*(future: FutureBase): bool {.inline.} = + ## Determines whether ``future`` has finished, i.e. ``future`` state changed + ## from state ``Pending`` to one of the states (``Finished``, ``Cancelled``, + ## ``Failed``). + future.state != FutureState.Pending + +func cancelled*(future: FutureBase): bool {.inline.} = + ## Determines whether ``future`` has cancelled. + future.state == FutureState.Cancelled + +func failed*(future: FutureBase): bool {.inline.} = + ## Determines whether ``future`` finished with an error. + future.state == FutureState.Failed + +func completed*(future: FutureBase): bool {.inline.} = + ## Determines whether ``future`` finished with a value. + future.state == FutureState.Completed + +func location*(future: FutureBase): array[LocationKind, ptr SrcLoc] = + future.internalLocation + +func value*[T](future: Future[T]): T = + ## Return the value in a completed future - raises Defect when + ## `fut.completed()` is `false`. + ## + ## See `read` for a version that raises an catchable error when future + ## has not completed. + when chronosStrictFutureAccess: + if not future.completed(): + raise (ref FutureDefect)( + msg: "Future not completed while accessing value", + cause: future) + + when T isnot void: + future.internalValue + +func error*(future: FutureBase): ref CatchableError = + ## Return the error of `future`, or `nil` if future did not fail. + ## + ## See `readError` for a version that raises a catchable error when the + ## future has not failed. + when chronosStrictFutureAccess: + if not future.failed() and not future.cancelled(): + raise (ref FutureDefect)( + msg: "Future not failed/cancelled while accessing error", + cause: future) + + future.internalError + +when chronosFutureTracking: + func next*(fut: FutureBase): FutureBase = fut.internalNext + func prev*(fut: FutureBase): FutureBase = fut.internalPrev + +when chronosStackTrace: + func errorStackTrace*(fut: FutureBase): StackTrace = fut.internalErrorStackTrace + func stackTrace*(fut: FutureBase): StackTrace = fut.internalStackTrace diff --git a/tests/testall.nim b/tests/testall.nim index eabe0a586..bf0e98a9b 100644 --- a/tests/testall.nim +++ b/tests/testall.nim @@ -8,7 +8,7 @@ import testmacro, testsync, testsoon, testtime, testfut, testsignal, testaddress, testdatagram, teststream, testserver, testbugs, testnet, testasyncstream, testhttpserver, testshttpserver, testhttpclient, - testproc, testratelimit + testproc, testratelimit, testfutures # Must be imported last to check for Pending futures import testutils diff --git a/tests/testfutures.nim b/tests/testfutures.nim new file mode 100644 index 000000000..bc4e026b4 --- /dev/null +++ b/tests/testfutures.nim @@ -0,0 +1,26 @@ +# Chronos Test Suite +# (c) Copyright 2018-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import unittest2 +import ../chronos/futures + +suite "Futures": + test "Future constructors": + let + completed = Future.completed(42) + failed = Future[int].failed((ref ValueError)(msg: "msg")) + + check: + completed.value == 42 + completed.state == FutureState.Completed + + check: + failed.error of ValueError + failed.state == FutureState.Failed From 47016961f50642f4654931f26b668a31d84013d6 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Fri, 23 Jun 2023 10:11:14 +0200 Subject: [PATCH 036/146] Less flaky rate limit tests (#408) --- chronos/ratelimit.nim | 18 ++++++------ tests/testratelimit.nim | 62 ++++++++++++++++++++--------------------- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/chronos/ratelimit.nim b/chronos/ratelimit.nim index 02d80f519..4147db788 100644 --- a/chronos/ratelimit.nim +++ b/chronos/ratelimit.nim @@ -28,13 +28,15 @@ type pendingRequests: seq[BucketWaiter] manuallyReplenished: AsyncEvent -proc update(bucket: TokenBucket) = +proc update(bucket: TokenBucket, currentTime: Moment) = if bucket.fillDuration == default(Duration): bucket.budget = min(bucket.budgetCap, bucket.budget) return + if currentTime < bucket.lastUpdate: + return + let - currentTime = Moment.now() timeDelta = currentTime - bucket.lastUpdate fillPercent = timeDelta.milliseconds.float / bucket.fillDuration.milliseconds.float replenished = @@ -46,7 +48,7 @@ proc update(bucket: TokenBucket) = bucket.lastUpdate += milliseconds(deltaFromReplenished) bucket.budget = min(bucket.budgetCap, bucket.budget + replenished) -proc tryConsume*(bucket: TokenBucket, tokens: int): bool = +proc tryConsume*(bucket: TokenBucket, tokens: int, now = Moment.now()): bool = ## If `tokens` are available, consume them, ## Otherwhise, return false. @@ -54,7 +56,7 @@ proc tryConsume*(bucket: TokenBucket, tokens: int): bool = bucket.budget -= tokens return true - bucket.update() + bucket.update(now) if bucket.budget >= tokens: bucket.budget -= tokens @@ -93,12 +95,12 @@ proc worker(bucket: TokenBucket) {.async.} = bucket.workFuture = nil -proc consume*(bucket: TokenBucket, tokens: int): Future[void] = +proc consume*(bucket: TokenBucket, tokens: int, now = Moment.now()): Future[void] = ## Wait for `tokens` to be available, and consume them. let retFuture = newFuture[void]("TokenBucket.consume") if isNil(bucket.workFuture) or bucket.workFuture.finished(): - if bucket.tryConsume(tokens): + if bucket.tryConsume(tokens, now): retFuture.complete() return retFuture @@ -119,10 +121,10 @@ proc consume*(bucket: TokenBucket, tokens: int): Future[void] = return retFuture -proc replenish*(bucket: TokenBucket, tokens: int) = +proc replenish*(bucket: TokenBucket, tokens: int, now = Moment.now()) = ## Add `tokens` to the budget (capped to the bucket capacity) bucket.budget += tokens - bucket.update() + bucket.update(now) bucket.manuallyReplenished.fire() proc new*( diff --git a/tests/testratelimit.nim b/tests/testratelimit.nim index 4c78664d9..bf281eec7 100644 --- a/tests/testratelimit.nim +++ b/tests/testratelimit.nim @@ -15,22 +15,23 @@ import ../chronos/ratelimit suite "Token Bucket": test "Sync test": var bucket = TokenBucket.new(1000, 1.milliseconds) + let + start = Moment.now() + fullTime = start + 1.milliseconds check: - bucket.tryConsume(800) == true - bucket.tryConsume(200) == true + bucket.tryConsume(800, start) == true + bucket.tryConsume(200, start) == true # Out of budget - bucket.tryConsume(100) == false - waitFor(sleepAsync(10.milliseconds)) - check: - bucket.tryConsume(800) == true - bucket.tryConsume(200) == true + bucket.tryConsume(100, start) == false + bucket.tryConsume(800, fullTime) == true + bucket.tryConsume(200, fullTime) == true # Out of budget - bucket.tryConsume(100) == false + bucket.tryConsume(100, fullTime) == false test "Async test": - var bucket = TokenBucket.new(1000, 500.milliseconds) + var bucket = TokenBucket.new(1000, 1000.milliseconds) check: bucket.tryConsume(1000) == true var toWait = newSeq[Future[void]]() @@ -41,28 +42,26 @@ suite "Token Bucket": waitFor(allFutures(toWait)) let duration = Moment.now() - start - check: duration in 700.milliseconds .. 1100.milliseconds + check: duration in 1400.milliseconds .. 2200.milliseconds test "Over budget async": - var bucket = TokenBucket.new(100, 10.milliseconds) + var bucket = TokenBucket.new(100, 100.milliseconds) # Consume 10* the budget cap let beforeStart = Moment.now() - waitFor(bucket.consume(1000).wait(1.seconds)) - when not defined(macosx): - # CI's macos scheduler is so jittery that this tests sometimes takes >500ms - # the test will still fail if it's >1 seconds - check Moment.now() - beforeStart in 90.milliseconds .. 150.milliseconds + waitFor(bucket.consume(1000).wait(5.seconds)) + check Moment.now() - beforeStart in 900.milliseconds .. 1500.milliseconds test "Sync manual replenish": var bucket = TokenBucket.new(1000, 0.seconds) + let start = Moment.now() check: - bucket.tryConsume(1000) == true - bucket.tryConsume(1000) == false + bucket.tryConsume(1000, start) == true + bucket.tryConsume(1000, start) == false bucket.replenish(2000) check: - bucket.tryConsume(1000) == true + bucket.tryConsume(1000, start) == true # replenish is capped to the bucket max - bucket.tryConsume(1000) == false + bucket.tryConsume(1000, start) == false test "Async manual replenish": var bucket = TokenBucket.new(10 * 150, 0.seconds) @@ -102,24 +101,25 @@ suite "Token Bucket": test "Very long replenish": var bucket = TokenBucket.new(7000, 1.hours) - check bucket.tryConsume(7000) - check bucket.tryConsume(1) == false + let start = Moment.now() + check bucket.tryConsume(7000, start) + check bucket.tryConsume(1, start) == false # With this setting, it takes 514 milliseconds # to tick one. Check that we can eventually # consume, even if we update multiple time # before that - let start = Moment.now() - while Moment.now() - start >= 514.milliseconds: - check bucket.tryConsume(1) == false - waitFor(sleepAsync(10.milliseconds)) + var fakeNow = start + while fakeNow - start < 514.milliseconds: + check bucket.tryConsume(1, fakeNow) == false + fakeNow += 30.milliseconds - check bucket.tryConsume(1) == false + check bucket.tryConsume(1, fakeNow) == true test "Short replenish": var bucket = TokenBucket.new(15000, 1.milliseconds) - check bucket.tryConsume(15000) - check bucket.tryConsume(1) == false + let start = Moment.now() + check bucket.tryConsume(15000, start) + check bucket.tryConsume(1, start) == false - waitFor(sleepAsync(1.milliseconds)) - check bucket.tryConsume(15000) == true + check bucket.tryConsume(15000, start + 1.milliseconds) == true From 73fd1206ab0efc4e4c4c6fc6d5fe93d36c1778b2 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 26 Jun 2023 16:28:33 +0300 Subject: [PATCH 037/146] Disable HTTP/1.1 pipeline support for HTTP/S server by default. (#410) * Disable HTTP/1.1 pipeline support for HTTP/S server by default. Add tests. * Fix tests according to new behavior. Fix tests to use random assigned OS ports instead of predefined. * Fix flaky tests in testasyncstream. --- chronos/apps/http/httpserver.nim | 23 ++- tests/testasyncstream.nim | 273 ++++++++++++++++--------------- tests/testhttpclient.nim | 223 ++++++++++++------------- tests/testhttpserver.nim | 211 ++++++++++++++++++------ 4 files changed, 433 insertions(+), 297 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 1da4b44cf..03aaaf96c 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -25,6 +25,8 @@ type QueryCommaSeparatedArray ## Enable usage of comma as an array item delimiter in url-encoded ## entities (e.g. query string or POST body). + Http11Pipeline + ## Enable HTTP/1.1 pipelining. HttpServerError* {.pure.} = enum TimeoutError, CatchableError, RecoverableError, CriticalError, @@ -198,6 +200,20 @@ proc new*(htype: typedesc[HttpServerRef], ) ok(res) +proc getResponseFlags*(req: HttpRequestRef): set[HttpResponseFlags] = + var defaultFlags: set[HttpResponseFlags] = {} + case req.version + of HttpVersion11: + if HttpServerFlags.Http11Pipeline notin req.connection.server.flags: + return defaultFlags + let header = req.headers.getString(ConnectionHeader, "keep-alive") + if header == "keep-alive": + {HttpResponseFlags.KeepAlive} + else: + defaultFlags + else: + defaultFlags + proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [].} = if req.response.isNone(): var resp = HttpResponseRef( @@ -206,10 +222,7 @@ proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [].} = version: req.version, headersTable: HttpTable.init(), connection: req.connection, - flags: if req.version == HttpVersion11: - {HttpResponseFlags.KeepAlive} - else: - {} + flags: req.getResponseFlags() ) req.response = Opt.some(resp) resp @@ -792,7 +805,7 @@ proc processLoop(server: HttpServerRef, transp: StreamTransport, break else: let request = arg.get() - var keepConn = if request.version == HttpVersion11: true else: false + var keepConn = HttpResponseFlags.KeepAlive in request.getResponseFlags() if lastErrorCode.isNone(): if isNil(resp): # Response was `nil`. diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index 47a6c9424..09a0b7e3f 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -145,7 +145,7 @@ proc createBigMessage(message: string, size: int): seq[byte] = suite "AsyncStream test suite": test "AsyncStream(StreamTransport) readExactly() test": - proc testReadExactly(address: TransportAddress): Future[bool] {.async.} = + proc testReadExactly(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -157,9 +157,10 @@ suite "AsyncStream test suite": server.close() var buffer = newSeq[byte](10) - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) await rstream.readExactly(addr buffer[0], 10) check cast[string](buffer) == "0000000000" @@ -171,9 +172,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testReadExactly(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testReadExactly()) == true + test "AsyncStream(StreamTransport) readUntil() test": - proc testReadUntil(address: TransportAddress): Future[bool] {.async.} = + proc testReadUntil(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -186,9 +188,10 @@ suite "AsyncStream test suite": var buffer = newSeq[byte](13) var sep = @[byte('N'), byte('N'), byte('z')] - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var r1 = await rstream.readUntil(addr buffer[0], len(buffer), sep) check: @@ -207,9 +210,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testReadUntil(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testReadUntil()) == true + test "AsyncStream(StreamTransport) readLine() test": - proc testReadLine(address: TransportAddress): Future[bool] {.async.} = + proc testReadLine(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -220,9 +224,10 @@ suite "AsyncStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var r1 = await rstream.readLine() check r1 == "0000000000" @@ -234,9 +239,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testReadLine(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testReadLine()) == true + test "AsyncStream(StreamTransport) read() test": - proc testRead(address: TransportAddress): Future[bool] {.async.} = + proc testRead(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -247,9 +253,10 @@ suite "AsyncStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var buf1 = await rstream.read(10) check cast[string](buf1) == "0000000000" @@ -259,9 +266,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testRead(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testRead()) == true + test "AsyncStream(StreamTransport) consume() test": - proc testConsume(address: TransportAddress): Future[bool] {.async.} = + proc testConsume(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -272,9 +280,10 @@ suite "AsyncStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var res1 = await rstream.consume(10) check: @@ -290,7 +299,8 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testConsume(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testConsume()) == true + test "AsyncStream(StreamTransport) leaks test": check: getTracker("async.stream.reader").isLeaked() == false @@ -299,7 +309,7 @@ suite "AsyncStream test suite": getTracker("stream.transport").isLeaked() == false test "AsyncStream(AsyncStream) readExactly() test": - proc testReadExactly2(address: TransportAddress): Future[bool] {.async.} = + proc testReadExactly2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -323,9 +333,10 @@ suite "AsyncStream test suite": server.close() var buffer = newSeq[byte](10) - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var rstream2 = newChunkedStreamReader(rstream) await rstream2.readExactly(addr buffer[0], 10) @@ -347,9 +358,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testReadExactly2(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testReadExactly2()) == true + test "AsyncStream(AsyncStream) readUntil() test": - proc testReadUntil2(address: TransportAddress): Future[bool] {.async.} = + proc testReadUntil2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -373,9 +385,10 @@ suite "AsyncStream test suite": var buffer = newSeq[byte](13) var sep = @[byte('N'), byte('N'), byte('z')] - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var rstream2 = newChunkedStreamReader(rstream) @@ -404,9 +417,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testReadUntil2(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testReadUntil2()) == true + test "AsyncStream(AsyncStream) readLine() test": - proc testReadLine2(address: TransportAddress): Future[bool] {.async.} = + proc testReadLine2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -425,9 +439,10 @@ suite "AsyncStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var rstream2 = newChunkedStreamReader(rstream) var r1 = await rstream2.readLine() @@ -449,9 +464,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testReadLine2(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testReadLine2()) == true + test "AsyncStream(AsyncStream) read() test": - proc testRead2(address: TransportAddress): Future[bool] {.async.} = + proc testRead2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -469,9 +485,10 @@ suite "AsyncStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var rstream2 = newChunkedStreamReader(rstream) var buf1 = await rstream2.read(10) @@ -488,9 +505,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testRead2(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testRead2()) == true + test "AsyncStream(AsyncStream) consume() test": - proc testConsume2(address: TransportAddress): Future[bool] {.async.} = + proc testConsume2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = const @@ -518,9 +536,10 @@ suite "AsyncStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var rstream2 = newChunkedStreamReader(rstream) @@ -547,9 +566,10 @@ suite "AsyncStream test suite": await transp.closeWait() await server.join() result = true - check waitFor(testConsume2(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testConsume2()) == true + test "AsyncStream(AsyncStream) write(eof) test": - proc testWriteEof(address: TransportAddress): Future[bool] {.async.} = + proc testWriteEof(): Future[bool] {.async.} = let size = 10240 message = createBigMessage("ABCDEFGHIJKLMNOP", size) @@ -578,7 +598,8 @@ suite "AsyncStream test suite": await transp.closeWait() let flags = {ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay} - var server = createStreamServer(address, processClient, flags = flags) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + processClient, flags = flags) server.start() var conn = await connect(server.localAddress()) try: @@ -589,7 +610,8 @@ suite "AsyncStream test suite": await server.closeWait() return true - check waitFor(testWriteEof(initTAddress("127.0.0.1:46001"))) == true + check waitFor(testWriteEof()) == true + test "AsyncStream(AsyncStream) leaks test": check: getTracker("async.stream.reader").isLeaked() == false @@ -624,8 +646,7 @@ suite "ChunkedStream test suite": " in\r\n\r\nchunks.\r\n0;position=4\r\n\r\n", "Wikipedia in\r\n\r\nchunks."], ] - proc checkVector(address: TransportAddress, - inputstr: string): Future[string] {.async.} = + proc checkVector(inputstr: string): Future[string] {.async.} = proc serveClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) @@ -637,9 +658,10 @@ suite "ChunkedStream test suite": server.stop() server.close() - var server = createStreamServer(address, serveClient, {ReuseAddr}) + var server = createStreamServer(initTAddress("127.0.0.1:0"), + serveClient, {ReuseAddr}) server.start() - var transp = await connect(address) + var transp = await connect(server.localAddress()) var rstream = newAsyncStreamReader(transp) var rstream2 = newChunkedStreamReader(rstream) var res = await rstream2.read() @@ -650,15 +672,16 @@ suite "ChunkedStream test suite": await server.join() result = ress - proc testVectors(address: TransportAddress): Future[bool] {.async.} = + proc testVectors(): Future[bool] {.async.} = var res = true for i in 0..= 5: let (code, data) = await session.fetch(ha.getUri()) await session.closeWait() @@ -691,26 +703,22 @@ suite "HTTP client testing suite": await server.closeWait() return "redirect-" & $res - proc testBasicAuthorization(): Future[bool] {.async.} = - let session = HttpSessionRef.new({HttpClientFlag.NoVerifyHost}, - maxRedirections = 10) - let url = parseUri("https://guest:guest@jigsaw.w3.org/HTTP/Basic/") - let resp = await session.fetch(url) - await session.closeWait() - if (resp.status == 200) and - ("Your browser made it!" in bytesToString(resp.data)): - return true - else: - echo "RESPONSE STATUS = [", resp.status, "]" - echo "RESPONSE = [", bytesToString(resp.data), "]" - return false - - proc testConnectionManagement(address: TransportAddress): Future[bool] {. + # proc testBasicAuthorization(): Future[bool] {.async.} = + # let session = HttpSessionRef.new({HttpClientFlag.NoVerifyHost}, + # maxRedirections = 10) + # let url = parseUri("https://guest:guest@jigsaw.w3.org/HTTP/Basic/") + # let resp = await session.fetch(url) + # await session.closeWait() + # if (resp.status == 200) and + # ("Your browser made it!" in bytesToString(resp.data)): + # return true + # else: + # echo "RESPONSE STATUS = [", resp.status, "]" + # echo "RESPONSE = [", bytesToString(resp.data), "]" + # return false + + proc testConnectionManagement(): Future[bool] {. async.} = - let - keepHa = getAddress(address, HttpClientScheme.NonSecure, "/keep") - dropHa = getAddress(address, HttpClientScheme.NonSecure, "/drop") - proc test1( a1: HttpAddress, version: HttpVersion, @@ -772,8 +780,13 @@ suite "HTTP client testing suite": else: return dumbResponse() - var server = createServer(address, process, false) + var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() + let address = server.instance.localAddress() + + let + keepHa = getAddress(address, HttpClientScheme.NonSecure, "/keep") + dropHa = getAddress(address, HttpClientScheme.NonSecure, "/drop") try: let @@ -872,11 +885,7 @@ suite "HTTP client testing suite": return true - proc testIdleConnection(address: TransportAddress): Future[bool] {. - async.} = - let - ha = getAddress(address, HttpClientScheme.NonSecure, "/test") - + proc testIdleConnection(): Future[bool] {.async.} = proc test( session: HttpSessionRef, a: HttpAddress @@ -902,11 +911,14 @@ suite "HTTP client testing suite": else: return dumbResponse() - var server = createServer(address, process, false) + var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() - let session = HttpSessionRef.new({HttpClientFlag.Http11Pipeline}, - idleTimeout = 1.seconds, - idlePeriod = 200.milliseconds) + let + address = server.instance.localAddress() + ha = getAddress(address, HttpClientScheme.NonSecure, "/test") + session = HttpSessionRef.new({HttpClientFlag.Http11Pipeline}, + idleTimeout = 1.seconds, + idlePeriod = 200.milliseconds) try: var f1 = test(session, ha) var f2 = test(session, ha) @@ -932,12 +944,7 @@ suite "HTTP client testing suite": return true - proc testNoPipeline(address: TransportAddress): Future[bool] {. - async.} = - let - ha = getAddress(address, HttpClientScheme.NonSecure, "/test") - hb = getAddress(address, HttpClientScheme.NonSecure, "/keep-test") - + proc testNoPipeline(): Future[bool] {.async.} = proc test( session: HttpSessionRef, a: HttpAddress @@ -966,10 +973,14 @@ suite "HTTP client testing suite": else: return dumbResponse() - var server = createServer(address, process, false) + var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() - let session = HttpSessionRef.new(idleTimeout = 100.seconds, - idlePeriod = 10.milliseconds) + let + address = server.instance.localAddress() + ha = getAddress(address, HttpClientScheme.NonSecure, "/test") + hb = getAddress(address, HttpClientScheme.NonSecure, "/keep-test") + session = HttpSessionRef.new(idleTimeout = 100.seconds, + idlePeriod = 10.milliseconds) try: var f1 = test(session, ha) var f2 = test(session, ha) @@ -1001,8 +1012,7 @@ suite "HTTP client testing suite": return true - proc testServerSentEvents(address: TransportAddress, - secure: bool): Future[bool] {.async.} = + proc testServerSentEvents(secure: bool): Future[bool] {.async.} = const SingleGoodTests = [ ("/test/single/1", "a:b\r\nc: d\re:f\n:comment\r\ng:\n h: j \n\n", @@ -1117,8 +1127,9 @@ suite "HTTP client testing suite": else: return dumbResponse() - var server = createServer(address, process, secure) + var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() + let address = server.instance.localAddress() var session = createSession(secure) @@ -1184,87 +1195,71 @@ suite "HTTP client testing suite": return true test "HTTP all request methods test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testMethods(address, false)) == 18 + check waitFor(testMethods(false)) == 18 test "HTTP(S) all request methods test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testMethods(address, true)) == 18 + check waitFor(testMethods(true)) == 18 test "HTTP client response streaming test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testResponseStreamReadingTest(address, false)) == 8 + check waitFor(testResponseStreamReadingTest(false)) == 8 test "HTTP(S) client response streaming test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testResponseStreamReadingTest(address, true)) == 8 + check waitFor(testResponseStreamReadingTest(true)) == 8 test "HTTP client (size) request streaming test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestSizeStreamWritingTest(address, false)) == 2 + check waitFor(testRequestSizeStreamWritingTest(false)) == 2 test "HTTP(S) client (size) request streaming test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestSizeStreamWritingTest(address, true)) == 2 + check waitFor(testRequestSizeStreamWritingTest(true)) == 2 test "HTTP client (chunked) request streaming test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestChunkedStreamWritingTest(address, false)) == 2 + check waitFor(testRequestChunkedStreamWritingTest(false)) == 2 test "HTTP(S) client (chunked) request streaming test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestChunkedStreamWritingTest(address, true)) == 2 + check waitFor(testRequestChunkedStreamWritingTest(true)) == 2 test "HTTP client (size + chunked) url-encoded POST test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestPostUrlEncodedTest(address, false)) == 2 + check waitFor(testRequestPostUrlEncodedTest(false)) == 2 test "HTTP(S) client (size + chunked) url-encoded POST test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestPostUrlEncodedTest(address, true)) == 2 + check waitFor(testRequestPostUrlEncodedTest(true)) == 2 test "HTTP client (size + chunked) multipart POST test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestPostMultipartTest(address, false)) == 2 + check waitFor(testRequestPostMultipartTest(false)) == 2 test "HTTP(S) client (size + chunked) multipart POST test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestPostMultipartTest(address, true)) == 2 + check waitFor(testRequestPostMultipartTest(true)) == 2 test "HTTP client redirection test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestRedirectTest(address, false, 5)) == "ok-5-200" + check waitFor(testRequestRedirectTest(false, 5)) == "ok-5-200" test "HTTP(S) client redirection test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestRedirectTest(address, true, 5)) == "ok-5-200" + check waitFor(testRequestRedirectTest(true, 5)) == "ok-5-200" test "HTTP client maximum redirections test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestRedirectTest(address, false, 4)) == "redirect-true" + check waitFor(testRequestRedirectTest(false, 4)) == "redirect-true" test "HTTP(S) client maximum redirections test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testRequestRedirectTest(address, true, 4)) == "redirect-true" + check waitFor(testRequestRedirectTest(true, 4)) == "redirect-true" test "HTTPS basic authorization test": - check waitFor(testBasicAuthorization()) == true + skip() + # This test disabled because remote service is pretty flaky and fails pretty + # often. As soon as more stable service will be found this test should be + # recovered + # check waitFor(testBasicAuthorization()) == true test "HTTP client connection management test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testConnectionManagement(address)) == true + check waitFor(testConnectionManagement()) == true test "HTTP client idle connection test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testIdleConnection(address)) == true + check waitFor(testIdleConnection()) == true test "HTTP client no-pipeline test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testNoPipeline(address)) == true + check waitFor(testNoPipeline()) == true test "HTTP client server-sent events test": - let address = initTAddress("127.0.0.1:30080") - check waitFor(testServerSentEvents(address, false)) == true + check waitFor(testServerSentEvents(false)) == true test "Leaks test": proc getTrackerLeaks(tracker: string): bool = diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index acf8b20b8..63c92b229 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -8,7 +8,8 @@ import std/[strutils, algorithm] import unittest2 import ../chronos, ../chronos/apps/http/httpserver, - ../chronos/apps/http/httpcommon + ../chronos/apps/http/httpcommon, + ../chronos/unittest2/asynctests import stew/base10 {.used.} @@ -17,6 +18,9 @@ suite "HTTP server testing suite": type TooBigTest = enum GetBodyTest, ConsumeBodyTest, PostUrlTest, PostMultipartTest + TestHttpResponse = object + headers: HttpTable + data: string proc httpClient(address: TransportAddress, data: string): Future[string] {.async.} = @@ -33,8 +37,32 @@ suite "HTTP server testing suite": if not(isNil(transp)): await closeWait(transp) - proc testTooBigBodyChunked(address: TransportAddress, - operation: TooBigTest): Future[bool] {.async.} = + proc httpClient2(transp: StreamTransport, + request: string, + length: int): Future[TestHttpResponse] {.async.} = + var buffer = newSeq[byte](4096) + var sep = @[0x0D'u8, 0x0A'u8, 0x0D'u8, 0x0A'u8] + let wres = await transp.write(request) + if wres != len(request): + raise newException(ValueError, "Unable to write full request") + let hres = await transp.readUntil(addr buffer[0], len(buffer), sep) + var hdata = @buffer + hdata.setLen(hres) + zeroMem(addr buffer[0], len(buffer)) + await transp.readExactly(addr buffer[0], length) + let data = bytesToString(buffer.toOpenArray(0, length - 1)) + let headers = + block: + let resp = parseResponse(hdata, false) + if resp.failed(): + raise newException(ValueError, "Unable to decode response headers") + var res = HttpTable.init() + for key, value in resp.headers(hdata): + res.add(key, value) + res + return TestHttpResponse(headers: headers, data: data) + + proc testTooBigBodyChunked(operation: TooBigTest): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -59,7 +87,7 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, maxRequestBodySize = 10, socketFlags = socketFlags) if res.isErr(): @@ -67,6 +95,7 @@ suite "HTTP server testing suite": let server = res.get() server.start() + let address = server.instance.localAddress() let request = case operation @@ -97,7 +126,7 @@ suite "HTTP server testing suite": return serverRes and (data.startsWith("HTTP/1.1 413")) test "Request headers timeout test": - proc testTimeout(address: TransportAddress): Future[bool] {.async.} = + proc testTimeout(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -110,23 +139,25 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, socketFlags = socketFlags, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), + process, socketFlags = socketFlags, httpHeadersTimeout = 100.milliseconds) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let data = await httpClient(address, "") await server.stop() await server.closeWait() return serverRes and (data.startsWith("HTTP/1.1 408")) - check waitFor(testTimeout(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testTimeout()) == true test "Empty headers test": - proc testEmpty(address: TransportAddress): Future[bool] {.async.} = + proc testEmpty(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -139,22 +170,24 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, socketFlags = socketFlags) + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), + process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let data = await httpClient(address, "\r\n\r\n") await server.stop() await server.closeWait() return serverRes and (data.startsWith("HTTP/1.1 400")) - check waitFor(testEmpty(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testEmpty()) == true test "Too big headers test": - proc testTooBig(address: TransportAddress): Future[bool] {.async.} = + proc testTooBig(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -167,7 +200,7 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, maxHeadersSize = 10, socketFlags = socketFlags) if res.isErr(): @@ -175,16 +208,17 @@ suite "HTTP server testing suite": let server = res.get() server.start() + let address = server.instance.localAddress() let data = await httpClient(address, "GET / HTTP/1.1\r\n\r\n") await server.stop() await server.closeWait() return serverRes and (data.startsWith("HTTP/1.1 431")) - check waitFor(testTooBig(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testTooBig()) == true test "Too big request body test (content-length)": - proc testTooBigBody(address: TransportAddress): Future[bool] {.async.} = + proc testTooBigBody(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -196,7 +230,7 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, maxRequestBodySize = 10, socketFlags = socketFlags) if res.isErr(): @@ -204,6 +238,7 @@ suite "HTTP server testing suite": let server = res.get() server.start() + let address = server.instance.localAddress() let request = "GET / HTTP/1.1\r\nContent-Length: 20\r\n\r\n" let data = await httpClient(address, request) @@ -211,30 +246,26 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.startsWith("HTTP/1.1 413")) - check waitFor(testTooBigBody(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testTooBigBody()) == true test "Too big request body test (getBody()/chunked encoding)": check: - waitFor(testTooBigBodyChunked(initTAddress("127.0.0.1:30080"), - GetBodyTest)) == true + waitFor(testTooBigBodyChunked(GetBodyTest)) == true test "Too big request body test (consumeBody()/chunked encoding)": check: - waitFor(testTooBigBodyChunked(initTAddress("127.0.0.1:30080"), - ConsumeBodyTest)) == true + waitFor(testTooBigBodyChunked(ConsumeBodyTest)) == true test "Too big request body test (post()/urlencoded/chunked encoding)": check: - waitFor(testTooBigBodyChunked(initTAddress("127.0.0.1:30080"), - PostUrlTest)) == true + waitFor(testTooBigBodyChunked(PostUrlTest)) == true test "Too big request body test (post()/multipart/chunked encoding)": check: - waitFor(testTooBigBodyChunked(initTAddress("127.0.0.1:30080"), - PostMultipartTest)) == true + waitFor(testTooBigBodyChunked(PostMultipartTest)) == true test "Query arguments test": - proc testQuery(address: TransportAddress): Future[bool] {.async.} = + proc testQuery(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -252,13 +283,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let data1 = await httpClient(address, "GET /?a=1&a=2&b=3&c=4 HTTP/1.0\r\n\r\n") @@ -271,10 +303,10 @@ suite "HTTP server testing suite": (data2.find("TEST_OK:a:П:b:Ц:c:Ю:Ф:Б") >= 0) return r - check waitFor(testQuery(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testQuery()) == true test "Headers test": - proc testHeaders(address: TransportAddress): Future[bool] {.async.} = + proc testHeaders(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -292,13 +324,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let message = "GET / HTTP/1.0\r\n" & @@ -314,10 +347,10 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.find(expect) >= 0) - check waitFor(testHeaders(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testHeaders()) == true test "POST arguments (urlencoded/content-length) test": - proc testPostUrl(address: TransportAddress): Future[bool] {.async.} = + proc testPostUrl(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -337,13 +370,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let message = "POST / HTTP/1.0\r\n" & @@ -357,10 +391,10 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.find(expect) >= 0) - check waitFor(testPostUrl(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testPostUrl()) == true test "POST arguments (urlencoded/chunked encoding) test": - proc testPostUrl2(address: TransportAddress): Future[bool] {.async.} = + proc testPostUrl2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -380,13 +414,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let message = "POST / HTTP/1.0\r\n" & @@ -401,10 +436,10 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.find(expect) >= 0) - check waitFor(testPostUrl2(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testPostUrl2()) == true test "POST arguments (multipart/content-length) test": - proc testPostMultipart(address: TransportAddress): Future[bool] {.async.} = + proc testPostMultipart(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -424,13 +459,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let message = "POST / HTTP/1.0\r\n" & @@ -456,10 +492,10 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.find(expect) >= 0) - check waitFor(testPostMultipart(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testPostMultipart()) == true test "POST arguments (multipart/chunked encoding) test": - proc testPostMultipart2(address: TransportAddress): Future[bool] {.async.} = + proc testPostMultipart2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -479,13 +515,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let message = "POST / HTTP/1.0\r\n" & @@ -520,12 +557,12 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.find(expect) >= 0) - check waitFor(testPostMultipart2(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testPostMultipart2()) == true test "drop() connections test": const ClientsCount = 10 - proc testHTTPdrop(address: TransportAddress): Future[bool] {.async.} = + proc testHTTPdrop(): Future[bool] {.async.} = var eventWait = newAsyncEvent() var eventContinue = newAsyncEvent() var count = 0 @@ -542,7 +579,7 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags, maxConnections = 100) if res.isErr(): @@ -550,6 +587,7 @@ suite "HTTP server testing suite": let server = res.get() server.start() + let address = server.instance.localAddress() var clients: seq[Future[string]] let message = "GET / HTTP/1.0\r\nHost: https://127.0.0.1:80\r\n\r\n" @@ -572,7 +610,7 @@ suite "HTTP server testing suite": return false return true - check waitFor(testHTTPdrop(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testHTTPdrop()) == true test "Content-Type multipart boundary test": const AllowedCharacters = { @@ -1190,7 +1228,7 @@ suite "HTTP server testing suite": r6.get() == MediaType.init(req[1][6]) test "SSE server-side events stream test": - proc testPostMultipart2(address: TransportAddress): Future[bool] {.async.} = + proc testPostMultipart2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. async.} = @@ -1212,13 +1250,14 @@ suite "HTTP server testing suite": return dumbResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} - let res = HttpServerRef.new(address, process, + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, socketFlags = socketFlags) if res.isErr(): return false let server = res.get() server.start() + let address = server.instance.localAddress() let message = "GET / HTTP/1.1\r\n" & @@ -1237,8 +1276,84 @@ suite "HTTP server testing suite": await server.closeWait() return serverRes and (data.find(expect) >= 0) - check waitFor(testPostMultipart2(initTAddress("127.0.0.1:30080"))) == true + check waitFor(testPostMultipart2()) == true + + asyncTest "HTTP/1.1 pipeline test": + const TestMessages = [ + ("GET / HTTP/1.0\r\n\r\n", + {HttpServerFlags.Http11Pipeline}, false, "close"), + ("GET / HTTP/1.0\r\nConnection: close\r\n\r\n", + {HttpServerFlags.Http11Pipeline}, false, "close"), + ("GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", + {HttpServerFlags.Http11Pipeline}, false, "close"), + ("GET / HTTP/1.0\r\n\r\n", + {}, false, "close"), + ("GET / HTTP/1.0\r\nConnection: close\r\n\r\n", + {}, false, "close"), + ("GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", + {}, false, "close"), + ("GET / HTTP/1.1\r\n\r\n", + {HttpServerFlags.Http11Pipeline}, true, "keep-alive"), + ("GET / HTTP/1.1\r\nConnection: close\r\n\r\n", + {HttpServerFlags.Http11Pipeline}, false, "close"), + ("GET / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n", + {HttpServerFlags.Http11Pipeline}, true, "keep-alive"), + ("GET / HTTP/1.1\r\n\r\n", + {}, false, "close"), + ("GET / HTTP/1.1\r\nConnection: close\r\n\r\n", + {}, false, "close"), + ("GET / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n", + {}, false, "close") + ] + proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + if r.isOk(): + let request = r.get() + return await request.respond(Http200, "TEST_OK", HttpTable.init()) + else: + return dumbResponse() + + for test in TestMessages: + let + socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} + serverFlags = test[1] + res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, + socketFlags = socketFlags, + serverFlags = serverFlags) + check res.isOk() + + let + server = res.get() + address = server.instance.localAddress() + + server.start() + var transp: StreamTransport + try: + transp = await connect(address) + block: + let response = await transp.httpClient2(test[0], 7) + check: + response.data == "TEST_OK" + response.headers.getString("connection") == test[3] + # We do this sleeping here just because we running both server and + # client in single process, so when we received response from server + # it does not mean that connection has been immediately closed - it + # takes some more calls, so we trying to get this calls happens. + await sleepAsync(50.milliseconds) + let connectionStillAvailable = + try: + let response {.used.} = await transp.httpClient2(test[0], 7) + true + except CatchableError: + false + + check connectionStillAvailable == test[2] + + finally: + if not(isNil(transp)): + await transp.closeWait() + await server.stop() + await server.closeWait() test "Leaks test": check: From 94ca0c38478e47ca806dc912b2a725fc41b532c2 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 26 Jun 2023 16:31:09 +0300 Subject: [PATCH 038/146] Fix remoteAddress() should raise TransportAbortedError too. (#409) Add ENOTCONN as TransportAbortedError. Refactor remoteAddress()/localAddress(). --- chronos/transports/common.nim | 71 +++++++++++++++++++++++++++-------- chronos/transports/stream.nim | 14 +++---- 2 files changed, 62 insertions(+), 23 deletions(-) diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index cbec5d6f4..5a9072cbd 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -574,10 +574,8 @@ template getTransportUseClosedError*(): ref TransportUseClosedError = newException(TransportUseClosedError, "Transport is already closed!") template getTransportOsError*(err: OSErrorCode): ref TransportOsError = - var msg = "(" & $int(err) & ") " & osErrorMsg(err) - var tre = newException(TransportOsError, msg) - tre.code = err - tre + (ref TransportOsError)( + code: err, msg: "(" & $int(err) & ") " & osErrorMsg(err)) template getTransportOsError*(err: cint): ref TransportOsError = getTransportOsError(OSErrorCode(err)) @@ -608,15 +606,16 @@ template getTransportTooManyError*( ): ref TransportTooManyError = let msg = when defined(posix): - if code == OSErrorCode(0): + case code + of OSErrorCode(0): "Too many open transports" - elif code == oserrno.EMFILE: + of EMFILE: "[EMFILE] Too many open files in the process" - elif code == oserrno.ENFILE: + of ENFILE: "[ENFILE] Too many open files in system" - elif code == oserrno.ENOBUFS: + of ENOBUFS: "[ENOBUFS] No buffer space available" - elif code == oserrno.ENOMEM: + of ENOMEM: "[ENOMEM] Not enough memory availble" else: "[" & $int(code) & "] Too many open transports" @@ -649,23 +648,26 @@ template getConnectionAbortedError*( ): ref TransportAbortedError = let msg = when defined(posix): - if code == OSErrorCode(0): + case code + of OSErrorCode(0), ECONNABORTED: "[ECONNABORTED] Connection has been aborted before being accepted" - elif code == oserrno.EPERM: + of EPERM: "[EPERM] Firewall rules forbid connection" - elif code == oserrno.ETIMEDOUT: + of ETIMEDOUT: "[ETIMEDOUT] Operation has been timed out" + of ENOTCONN: + "[ENOTCONN] Transport endpoint is not connected" else: "[" & $int(code) & "] Connection has been aborted" elif defined(windows): case code - of OSErrorCode(0), oserrno.WSAECONNABORTED: + of OSErrorCode(0), WSAECONNABORTED: "[ECONNABORTED] Connection has been aborted before being accepted" of WSAENETDOWN: "[ENETDOWN] Network is down" - of oserrno.WSAENETRESET: + of WSAENETRESET: "[ENETRESET] Network dropped connection on reset" - of oserrno.WSAECONNRESET: + of WSAECONNRESET: "[ECONNRESET] Connection reset by peer" of WSAETIMEDOUT: "[ETIMEDOUT] Connection timed out" @@ -675,3 +677,42 @@ template getConnectionAbortedError*( "[" & $int(code) & "] Connection has been aborted" newException(TransportAbortedError, msg) + +template getTransportError*(ecode: OSErrorCode): untyped = + when defined(posix): + case ecode + of ECONNABORTED, EPERM, ETIMEDOUT, ENOTCONN: + getConnectionAbortedError(ecode) + of EMFILE, ENFILE, ENOBUFS, ENOMEM: + getTransportTooManyError(ecode) + else: + getTransportOsError(ecode) + else: + case ecode + of WSAECONNABORTED, WSAENETDOWN, WSAENETRESET, WSAECONNRESET, WSAETIMEDOUT: + getConnectionAbortedError(ecode) + of ERROR_TOO_MANY_OPEN_FILES, WSAENOBUFS, WSAEMFILE: + getTransportTooManyError(ecode) + else: + getTransportOsError(ecode) + +proc raiseTransportError*(ecode: OSErrorCode) {. + raises: [TransportAbortedError, TransportTooManyError, TransportOsError], + noreturn.} = + ## Raises transport specific OS error. + when defined(posix): + case ecode + of ECONNABORTED, EPERM, ETIMEDOUT, ENOTCONN: + raise getConnectionAbortedError(ecode) + of EMFILE, ENFILE, ENOBUFS, ENOMEM: + raise getTransportTooManyError(ecode) + else: + raise getTransportOsError(ecode) + else: + case ecode + of WSAECONNABORTED, WSAENETDOWN, WSAENETRESET, WSAECONNRESET, WSAETIMEDOUT: + raise getConnectionAbortedError(ecode) + of ERROR_TOO_MANY_OPEN_FILES, WSAENOBUFS, WSAEMFILE: + raise getTransportTooManyError(ecode) + else: + raise getTransportOsError(ecode) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 18d6a506d..3abd942c8 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -141,30 +141,28 @@ type # transport for new client proc remoteAddress*(transp: StreamTransport): TransportAddress {. - raises: [TransportError].} = + raises: [TransportAbortedError, TransportTooManyError, TransportOsError].} = ## Returns ``transp`` remote socket address. - if transp.kind != TransportKind.Socket: - raise newException(TransportError, "Socket required!") + doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") if transp.remote.family == AddressFamily.None: var saddr: Sockaddr_storage var slen = SockLen(sizeof(saddr)) if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportOsError(osLastError()) + raiseTransportError(osLastError()) fromSAddr(addr saddr, slen, transp.remote) transp.remote proc localAddress*(transp: StreamTransport): TransportAddress {. - raises: [TransportError].} = + raises: [TransportAbortedError, TransportTooManyError, TransportOsError].} = ## Returns ``transp`` local socket address. - if transp.kind != TransportKind.Socket: - raise newException(TransportError, "Socket required!") + doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") if transp.local.family == AddressFamily.None: var saddr: Sockaddr_storage var slen = SockLen(sizeof(saddr)) if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportOsError(osLastError()) + raiseTransportError(osLastError()) fromSAddr(addr saddr, slen, transp.local) transp.local From 0a6f5854a78c33ac7e003045651257357e47e740 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 4 Jul 2023 00:11:08 +0200 Subject: [PATCH 039/146] fix unused result warning on implict return (fixes #414) (#417) * fix unused result warning on implict return * describe `{.used.}` tradeoffs * oops --- chronos/asyncmacro2.nim | 22 +++++++++++++++++++--- tests/testmacro.nim | 6 ++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index 429e287ca..45146a300 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -175,9 +175,25 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = nnkElseExpr.newTree( newStmtList( quote do: {.push warning[resultshadowed]: off.}, - # var result: `baseType` - nnkVarSection.newTree( - nnkIdentDefs.newTree(ident "result", baseType, newEmptyNode())), + # var result {.used.}: `baseType` + # In the proc body, result may or may not end up being used + # depending on how the body is written - with implicit returns / + # expressions in particular, it is likely but not guaranteed that + # it is not used. Ideally, we would avoid emitting it in this + # case to avoid the default initializaiton. {.used.} typically + # works better than {.push.} which has a tendency to leak out of + # scope. + # TODO figure out if there's a way to detect `result` usage in + # the proc body _after_ template exapnsion, and therefore + # avoid creating this variable - one option is to create an + # addtional when branch witha fake `result` and check + # `compiles(procBody)` - this is not without cost though + nnkVarSection.newTree(nnkIdentDefs.newTree( + nnkPragmaExpr.newTree( + ident "result", + nnkPragma.newTree(ident "used")), + baseType, newEmptyNode()) + ), quote do: {.pop.}, ) ) diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 2526c5dea..ad4c22f37 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -177,6 +177,10 @@ suite "Macro transformations test suite": of false: await implicit7(v) of true: 42 + proc implicit9(): Future[int] {.async.} = + result = 42 + result + let fin = new int check: waitFor(implicit()) == 42 @@ -193,6 +197,8 @@ suite "Macro transformations test suite": waitFor(implicit8(true)) == 42 waitFor(implicit8(false)) == 33 + waitFor(implicit9()) == 42 + suite "Closure iterator's exception transformation issues": test "Nested defer/finally not called on return": # issue #288 From 3d80ea9fc7eba6902db9178babfef130f92e03a4 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 6 Jul 2023 18:51:59 +0200 Subject: [PATCH 040/146] fix EpollEvent memory layout (#411) * fix EpollEvent memory layout on x86_64, `EpollEvent` is packed - the upstream version is correct here * copy-paste upstream instead * Adjust lines to be 80 characters per line. --------- Co-authored-by: cheatfate --- chronos/osdefs.nim | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 8106fb680..789da8cc2 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -1001,13 +1001,22 @@ elif defined(linux): EPOLL_CTL_DEL* = 2 EPOLL_CTL_MOD* = 3 + # https://github.com/torvalds/linux/blob/ff6992735ade75aae3e35d16b17da1008d753d28/include/uapi/linux/eventpoll.h#L77 + when defined(linux) and defined(amd64): + {.pragma: epollPacked, packed.} + else: + {.pragma: epollPacked.} + type - EpollData* {.importc: "union epoll_data", - header: "", pure, final.} = object + EpollData* {.importc: "epoll_data_t", + header: "", pure, final, union.} = object + `ptr`* {.importc: "ptr".}: pointer + fd* {.importc: "fd".}: cint + u32* {.importc: "u32".}: uint32 u64* {.importc: "u64".}: uint64 - EpollEvent* {.importc: "struct epoll_event", header: "", - pure, final.} = object + EpollEvent* {.importc: "struct epoll_event", + header: "", pure, final, epollPacked.} = object events*: uint32 # Epoll events data*: EpollData # User data variable From 155d89450ea4a53799bf418067dc4de7b14e04e3 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 14 Jul 2023 13:35:08 +0300 Subject: [PATCH 041/146] Trackers refactoring. (#416) * Refactor chronos trackers to be more simple. * Refactor trackers. Add HTTP server trackers. Refactor HTTP main processing loop. * Compatibility fixes. Add checkLeaks(). * Fix posix test issue. * Add httpdebug module which introduces HTTP connection dumping helpers. Add tests for it. * Recover and deprecate old version of Trackers. * Make public iterators to iterate over all tracker counters available. Fix asynctests to use public iterators instead private one. --- chronos/apps/http/httpbodyrw.nim | 73 +--- chronos/apps/http/httpclient.nim | 114 +------ chronos/apps/http/httpcommon.nim | 9 + chronos/apps/http/httpdebug.nim | 120 +++++++ chronos/apps/http/httpserver.nim | 547 ++++++++++++++++++------------ chronos/apps/http/shttpserver.nim | 28 +- chronos/asyncloop.nim | 61 +++- chronos/asyncproc.nim | 48 +-- chronos/streams/asyncstream.nim | 105 +----- chronos/transports/datagram.nim | 47 +-- chronos/transports/stream.nim | 102 +----- chronos/unittest2/asynctests.nim | 12 + tests/testasyncstream.nim | 34 +- tests/testdatagram.nim | 6 +- tests/testhttpclient.nim | 42 +-- tests/testhttpserver.nim | 128 +++++-- tests/testproc.nim | 12 +- tests/testshttpserver.nim | 11 +- tests/teststream.nim | 11 +- 19 files changed, 743 insertions(+), 767 deletions(-) create mode 100644 chronos/apps/http/httpdebug.nim diff --git a/chronos/apps/http/httpbodyrw.nim b/chronos/apps/http/httpbodyrw.nim index ba2b1d4fa..b948fbd3e 100644 --- a/chronos/apps/http/httpbodyrw.nim +++ b/chronos/apps/http/httpbodyrw.nim @@ -25,71 +25,6 @@ type bstate*: HttpState streams*: seq[AsyncStreamWriter] - HttpBodyTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - -proc setupHttpBodyWriterTracker(): HttpBodyTracker {.gcsafe, raises: [].} -proc setupHttpBodyReaderTracker(): HttpBodyTracker {.gcsafe, raises: [].} - -proc getHttpBodyWriterTracker(): HttpBodyTracker {.inline.} = - var res = cast[HttpBodyTracker](getTracker(HttpBodyWriterTrackerName)) - if isNil(res): - res = setupHttpBodyWriterTracker() - res - -proc getHttpBodyReaderTracker(): HttpBodyTracker {.inline.} = - var res = cast[HttpBodyTracker](getTracker(HttpBodyReaderTrackerName)) - if isNil(res): - res = setupHttpBodyReaderTracker() - res - -proc dumpHttpBodyWriterTracking(): string {.gcsafe.} = - let tracker = getHttpBodyWriterTracker() - "Opened HTTP body writers: " & $tracker.opened & "\n" & - "Closed HTTP body writers: " & $tracker.closed - -proc dumpHttpBodyReaderTracking(): string {.gcsafe.} = - let tracker = getHttpBodyReaderTracker() - "Opened HTTP body readers: " & $tracker.opened & "\n" & - "Closed HTTP body readers: " & $tracker.closed - -proc leakHttpBodyWriter(): bool {.gcsafe.} = - var tracker = getHttpBodyWriterTracker() - tracker.opened != tracker.closed - -proc leakHttpBodyReader(): bool {.gcsafe.} = - var tracker = getHttpBodyReaderTracker() - tracker.opened != tracker.closed - -proc trackHttpBodyWriter(t: HttpBodyWriter) {.inline.} = - inc(getHttpBodyWriterTracker().opened) - -proc untrackHttpBodyWriter*(t: HttpBodyWriter) {.inline.} = - inc(getHttpBodyWriterTracker().closed) - -proc trackHttpBodyReader(t: HttpBodyReader) {.inline.} = - inc(getHttpBodyReaderTracker().opened) - -proc untrackHttpBodyReader*(t: HttpBodyReader) {.inline.} = - inc(getHttpBodyReaderTracker().closed) - -proc setupHttpBodyWriterTracker(): HttpBodyTracker {.gcsafe.} = - var res = HttpBodyTracker(opened: 0, closed: 0, - dump: dumpHttpBodyWriterTracking, - isLeaked: leakHttpBodyWriter - ) - addTracker(HttpBodyWriterTrackerName, res) - res - -proc setupHttpBodyReaderTracker(): HttpBodyTracker {.gcsafe.} = - var res = HttpBodyTracker(opened: 0, closed: 0, - dump: dumpHttpBodyReaderTracking, - isLeaked: leakHttpBodyReader - ) - addTracker(HttpBodyReaderTrackerName, res) - res - proc newHttpBodyReader*(streams: varargs[AsyncStreamReader]): HttpBodyReader = ## HttpBodyReader is AsyncStreamReader which holds references to all the ## ``streams``. Also on close it will close all the ``streams``. @@ -98,7 +33,7 @@ proc newHttpBodyReader*(streams: varargs[AsyncStreamReader]): HttpBodyReader = doAssert(len(streams) > 0, "At least one stream must be added") var res = HttpBodyReader(bstate: HttpState.Alive, streams: @streams) res.init(streams[0]) - trackHttpBodyReader(res) + trackCounter(HttpBodyReaderTrackerName) res proc closeWait*(bstream: HttpBodyReader) {.async.} = @@ -113,7 +48,7 @@ proc closeWait*(bstream: HttpBodyReader) {.async.} = await allFutures(res) await procCall(closeWait(AsyncStreamReader(bstream))) bstream.bstate = HttpState.Closed - untrackHttpBodyReader(bstream) + untrackCounter(HttpBodyReaderTrackerName) proc newHttpBodyWriter*(streams: varargs[AsyncStreamWriter]): HttpBodyWriter = ## HttpBodyWriter is AsyncStreamWriter which holds references to all the @@ -123,7 +58,7 @@ proc newHttpBodyWriter*(streams: varargs[AsyncStreamWriter]): HttpBodyWriter = doAssert(len(streams) > 0, "At least one stream must be added") var res = HttpBodyWriter(bstate: HttpState.Alive, streams: @streams) res.init(streams[0]) - trackHttpBodyWriter(res) + trackCounter(HttpBodyWriterTrackerName) res proc closeWait*(bstream: HttpBodyWriter) {.async.} = @@ -136,7 +71,7 @@ proc closeWait*(bstream: HttpBodyWriter) {.async.} = await allFutures(res) await procCall(closeWait(AsyncStreamWriter(bstream))) bstream.bstate = HttpState.Closed - untrackHttpBodyWriter(bstream) + untrackCounter(HttpBodyWriterTrackerName) proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [].} = if len(bstream.streams) == 1: diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 311ff1bb0..6e9ea0cae 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -190,10 +190,6 @@ type HttpClientFlags* = set[HttpClientFlag] - HttpClientTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - ServerSentEvent* = object name*: string data*: string @@ -204,100 +200,6 @@ type # HttpClientResponseRef valid states are # Open -> (Finished, Error) -> (Closing, Closed) -proc setupHttpClientConnectionTracker(): HttpClientTracker {. - gcsafe, raises: [].} -proc setupHttpClientRequestTracker(): HttpClientTracker {. - gcsafe, raises: [].} -proc setupHttpClientResponseTracker(): HttpClientTracker {. - gcsafe, raises: [].} - -proc getHttpClientConnectionTracker(): HttpClientTracker {.inline.} = - var res = cast[HttpClientTracker](getTracker(HttpClientConnectionTrackerName)) - if isNil(res): - res = setupHttpClientConnectionTracker() - res - -proc getHttpClientRequestTracker(): HttpClientTracker {.inline.} = - var res = cast[HttpClientTracker](getTracker(HttpClientRequestTrackerName)) - if isNil(res): - res = setupHttpClientRequestTracker() - res - -proc getHttpClientResponseTracker(): HttpClientTracker {.inline.} = - var res = cast[HttpClientTracker](getTracker(HttpClientResponseTrackerName)) - if isNil(res): - res = setupHttpClientResponseTracker() - res - -proc dumpHttpClientConnectionTracking(): string {.gcsafe.} = - let tracker = getHttpClientConnectionTracker() - "Opened HTTP client connections: " & $tracker.opened & "\n" & - "Closed HTTP client connections: " & $tracker.closed - -proc dumpHttpClientRequestTracking(): string {.gcsafe.} = - let tracker = getHttpClientRequestTracker() - "Opened HTTP client requests: " & $tracker.opened & "\n" & - "Closed HTTP client requests: " & $tracker.closed - -proc dumpHttpClientResponseTracking(): string {.gcsafe.} = - let tracker = getHttpClientResponseTracker() - "Opened HTTP client responses: " & $tracker.opened & "\n" & - "Closed HTTP client responses: " & $tracker.closed - -proc leakHttpClientConnection(): bool {.gcsafe.} = - var tracker = getHttpClientConnectionTracker() - tracker.opened != tracker.closed - -proc leakHttpClientRequest(): bool {.gcsafe.} = - var tracker = getHttpClientRequestTracker() - tracker.opened != tracker.closed - -proc leakHttpClientResponse(): bool {.gcsafe.} = - var tracker = getHttpClientResponseTracker() - tracker.opened != tracker.closed - -proc trackHttpClientConnection(t: HttpClientConnectionRef) {.inline.} = - inc(getHttpClientConnectionTracker().opened) - -proc untrackHttpClientConnection*(t: HttpClientConnectionRef) {.inline.} = - inc(getHttpClientConnectionTracker().closed) - -proc trackHttpClientRequest(t: HttpClientRequestRef) {.inline.} = - inc(getHttpClientRequestTracker().opened) - -proc untrackHttpClientRequest*(t: HttpClientRequestRef) {.inline.} = - inc(getHttpClientRequestTracker().closed) - -proc trackHttpClientResponse(t: HttpClientResponseRef) {.inline.} = - inc(getHttpClientResponseTracker().opened) - -proc untrackHttpClientResponse*(t: HttpClientResponseRef) {.inline.} = - inc(getHttpClientResponseTracker().closed) - -proc setupHttpClientConnectionTracker(): HttpClientTracker {.gcsafe.} = - var res = HttpClientTracker(opened: 0, closed: 0, - dump: dumpHttpClientConnectionTracking, - isLeaked: leakHttpClientConnection - ) - addTracker(HttpClientConnectionTrackerName, res) - res - -proc setupHttpClientRequestTracker(): HttpClientTracker {.gcsafe.} = - var res = HttpClientTracker(opened: 0, closed: 0, - dump: dumpHttpClientRequestTracking, - isLeaked: leakHttpClientRequest - ) - addTracker(HttpClientRequestTrackerName, res) - res - -proc setupHttpClientResponseTracker(): HttpClientTracker {.gcsafe.} = - var res = HttpClientTracker(opened: 0, closed: 0, - dump: dumpHttpClientResponseTracking, - isLeaked: leakHttpClientResponse - ) - addTracker(HttpClientResponseTrackerName, res) - res - template checkClosed(reqresp: untyped): untyped = if reqresp.connection.state in {HttpClientConnectionState.Closing, HttpClientConnectionState.Closed}: @@ -556,7 +458,7 @@ proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef, state: HttpClientConnectionState.Connecting, remoteHostname: ha.id ) - trackHttpClientConnection(res) + trackCounter(HttpClientConnectionTrackerName) res of HttpClientScheme.Secure: let treader = newAsyncStreamReader(transp) @@ -575,7 +477,7 @@ proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef, state: HttpClientConnectionState.Connecting, remoteHostname: ha.id ) - trackHttpClientConnection(res) + trackCounter(HttpClientConnectionTrackerName) res proc setError(request: HttpClientRequestRef, error: ref HttpError) {. @@ -615,7 +517,7 @@ proc closeWait(conn: HttpClientConnectionRef) {.async.} = discard await conn.transp.closeWait() conn.state = HttpClientConnectionState.Closed - untrackHttpClientConnection(conn) + untrackCounter(HttpClientConnectionTrackerName) proc connect(session: HttpSessionRef, ha: HttpAddress): Future[HttpClientConnectionRef] {.async.} = @@ -835,7 +737,7 @@ proc closeWait*(request: HttpClientRequestRef) {.async.} = request.session = nil request.error = nil request.state = HttpReqRespState.Closed - untrackHttpClientRequest(request) + untrackCounter(HttpClientRequestTrackerName) proc closeWait*(response: HttpClientResponseRef) {.async.} = if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: @@ -848,7 +750,7 @@ proc closeWait*(response: HttpClientResponseRef) {.async.} = response.session = nil response.error = nil response.state = HttpReqRespState.Closed - untrackHttpClientResponse(response) + untrackCounter(HttpClientResponseTrackerName) proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] ): HttpResult[HttpClientResponseRef] {.raises: [] .} = @@ -958,7 +860,7 @@ proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] httpPipeline: res.connection.flags.incl(HttpClientConnectionFlag.KeepAlive) res.connection.flags.incl(HttpClientConnectionFlag.Response) - trackHttpClientResponse(res) + trackCounter(HttpClientResponseTrackerName) ok(res) proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. @@ -996,7 +898,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, version: version, flags: flags, headers: HttpTable.init(headers), address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body ) - trackHttpClientRequest(res) + trackCounter(HttpClientRequestTrackerName) res proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, @@ -1012,7 +914,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, version: version, flags: flags, headers: HttpTable.init(headers), address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body ) - trackHttpClientRequest(res) + trackCounter(HttpClientRequestTrackerName) ok(res) proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index cc2478d47..5a4a628c0 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -13,6 +13,15 @@ import ../../streams/[asyncstream, boundstream] export asyncloop, asyncsync, results, httputils, strutils const + HttpServerUnsecureConnectionTrackerName* = + "httpserver.unsecure.connection" + HttpServerSecureConnectionTrackerName* = + "httpserver.secure.connection" + HttpServerRequestTrackerName* = + "httpserver.request" + HttpServerResponseTrackerName* = + "httpserver.response" + HeadersMark* = @[0x0d'u8, 0x0a'u8, 0x0d'u8, 0x0a'u8] PostMethods* = {MethodPost, MethodPatch, MethodPut, MethodDelete} diff --git a/chronos/apps/http/httpdebug.nim b/chronos/apps/http/httpdebug.nim new file mode 100644 index 000000000..2f40674e6 --- /dev/null +++ b/chronos/apps/http/httpdebug.nim @@ -0,0 +1,120 @@ +# +# Chronos HTTP/S server implementation +# (c) Copyright 2021-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +import std/tables +import stew/results +import ../../timer +import httpserver, shttpserver +from httpclient import HttpClientScheme +from httpcommon import HttpState +from ../../osdefs import SocketHandle +from ../../transports/common import TransportAddress, ServerFlags +export HttpClientScheme, SocketHandle, TransportAddress, ServerFlags, HttpState + +{.push raises: [].} + +type + ConnectionType* {.pure.} = enum + NonSecure, Secure + + ConnectionState* {.pure.} = enum + Accepted, Alive, Closing, Closed + + ServerConnectionInfo* = object + handle*: SocketHandle + connectionType*: ConnectionType + connectionState*: ConnectionState + remoteAddress*: Opt[TransportAddress] + localAddress*: Opt[TransportAddress] + acceptMoment*: Moment + createMoment*: Opt[Moment] + + ServerInfo* = object + connectionType*: ConnectionType + address*: TransportAddress + state*: HttpServerState + maxConnections*: int + backlogSize*: int + baseUri*: Uri + serverIdent*: string + flags*: set[HttpServerFlags] + socketFlags*: set[ServerFlags] + headersTimeout*: Duration + bufferSize*: int + maxHeadersSize*: int + maxRequestBodySize*: int + +proc getConnectionType*( + server: HttpServerRef | SecureHttpServerRef): ConnectionType = + when server is SecureHttpServerRef: + ConnectionType.Secure + else: + if HttpServerFlags.Secure in server.flags: + ConnectionType.Secure + else: + ConnectionType.NonSecure + +proc getServerInfo*(server: HttpServerRef|SecureHttpServerRef): ServerInfo = + ServerInfo( + connectionType: server.getConnectionType(), + address: server.address, + state: server.state(), + maxConnections: server.maxConnections, + backlogSize: server.backlogSize, + baseUri: server.baseUri, + serverIdent: server.serverIdent, + flags: server.flags, + socketFlags: server.socketFlags, + headersTimeout: server.headersTimeout, + bufferSize: server.bufferSize, + maxHeadersSize: server.maxHeadersSize, + maxRequestBodySize: server.maxRequestBodySize + ) + +proc getConnectionState*(holder: HttpConnectionHolderRef): ConnectionState = + if not(isNil(holder.connection)): + case holder.connection.state + of HttpState.Alive: ConnectionState.Alive + of HttpState.Closing: ConnectionState.Closing + of HttpState.Closed: ConnectionState.Closed + else: + ConnectionState.Accepted + +proc init*(t: typedesc[ServerConnectionInfo], + holder: HttpConnectionHolderRef): ServerConnectionInfo = + let + localAddress = + try: + Opt.some(holder.transp.localAddress()) + except CatchableError: + Opt.none(TransportAddress) + remoteAddress = + try: + Opt.some(holder.transp.remoteAddress()) + except CatchableError: + Opt.none(TransportAddress) + + ServerConnectionInfo( + handle: SocketHandle(holder.transp.fd), + connectionType: holder.server.getConnectionType(), + connectionState: holder.getConnectionState(), + remoteAddress: remoteAddress, + localAddress: localAddress, + acceptMoment: holder.acceptMoment, + createMoment: + if not(isNil(holder.connection)): + Opt.some(holder.connection.createMoment) + else: + Opt.none(Moment) + ) + +proc getConnections*(server: HttpServerRef): seq[ServerConnectionInfo] = + var res: seq[ServerConnectionInfo] + for holder in server.connections.values(): + res.add(ServerConnectionInfo.init(holder)) + res diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 03aaaf96c..b5b8bfcc6 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -29,18 +29,20 @@ type ## Enable HTTP/1.1 pipelining. HttpServerError* {.pure.} = enum - TimeoutError, CatchableError, RecoverableError, CriticalError, - DisconnectError + InterruptError, TimeoutError, CatchableError, RecoverableError, + CriticalError, DisconnectError HttpServerState* {.pure.} = enum ServerRunning, ServerStopped, ServerClosed HttpProcessError* = object - error*: HttpServerError + kind*: HttpServerError code*: HttpCode exc*: ref CatchableError - remote*: TransportAddress + remote*: Opt[TransportAddress] + ConnectionFence* = Result[HttpConnectionRef, HttpProcessError] + ResponseFence* = Result[HttpResponseRef, HttpProcessError] RequestFence* = Result[HttpRequestRef, HttpProcessError] HttpRequestFlags* {.pure.} = enum @@ -53,7 +55,7 @@ type Plain, SSE, Chunked HttpResponseState* {.pure.} = enum - Empty, Prepared, Sending, Finished, Failed, Cancelled, Dumb + Empty, Prepared, Sending, Finished, Failed, Cancelled, Default HttpProcessCallback* = proc(req: RequestFence): Future[HttpResponseRef] {. @@ -64,6 +66,20 @@ type transp: StreamTransport): Future[HttpConnectionRef] {. gcsafe, raises: [].} + HttpCloseConnectionCallback* = + proc(connection: HttpConnectionRef): Future[void] {. + gcsafe, raises: [].} + + HttpConnectionHolder* = object of RootObj + connection*: HttpConnectionRef + server*: HttpServerRef + future*: Future[void] + transp*: StreamTransport + acceptMoment*: Moment + connectionId*: string + + HttpConnectionHolderRef* = ref HttpConnectionHolder + HttpServer* = object of RootObj instance*: StreamServer address*: TransportAddress @@ -74,7 +90,7 @@ type serverIdent*: string flags*: set[HttpServerFlags] socketFlags*: set[ServerFlags] - connections*: Table[string, Future[void]] + connections*: OrderedTable[string, HttpConnectionHolderRef] acceptLoop*: Future[void] lifetime*: Future[void] headersTimeout*: Duration @@ -122,11 +138,13 @@ type HttpConnection* = object of RootObj state*: HttpState server*: HttpServerRef - transp: StreamTransport + transp*: StreamTransport mainReader*: AsyncStreamReader mainWriter*: AsyncStreamWriter reader*: AsyncStreamReader writer*: AsyncStreamWriter + closeCb*: HttpCloseConnectionCallback + createMoment*: Moment buffer: seq[byte] HttpConnectionRef* = ref HttpConnection @@ -134,9 +152,24 @@ type ByteChar* = string | seq[byte] proc init(htype: typedesc[HttpProcessError], error: HttpServerError, - exc: ref CatchableError, remote: TransportAddress, - code: HttpCode): HttpProcessError {.raises: [].} = - HttpProcessError(error: error, exc: exc, remote: remote, code: code) + exc: ref CatchableError, remote: Opt[TransportAddress], + code: HttpCode): HttpProcessError {. + raises: [].} = + HttpProcessError(kind: error, exc: exc, remote: remote, code: code) + +proc init(htype: typedesc[HttpProcessError], + error: HttpServerError): HttpProcessError {. + raises: [].} = + HttpProcessError(kind: error) + +proc new(htype: typedesc[HttpConnectionHolderRef], server: HttpServerRef, + transp: StreamTransport, + connectionId: string): HttpConnectionHolderRef = + HttpConnectionHolderRef( + server: server, transp: transp, acceptMoment: Moment.now(), + connectionId: connectionId) + +proc error*(e: HttpProcessError): HttpServerError = e.kind proc createConnection(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. @@ -176,7 +209,7 @@ proc new*(htype: typedesc[HttpServerRef], return err(exc.msg) var res = HttpServerRef( - address: address, + address: serverInstance.localAddress(), instance: serverInstance, processCallback: processCallback, createConnCallback: createConnection, @@ -196,15 +229,22 @@ proc new*(htype: typedesc[HttpServerRef], # else: # nil lifetime: newFuture[void]("http.server.lifetime"), - connections: initTable[string, Future[void]]() + connections: initOrderedTable[string, HttpConnectionHolderRef]() ) ok(res) -proc getResponseFlags*(req: HttpRequestRef): set[HttpResponseFlags] = +proc getServerFlags(req: HttpRequestRef): set[HttpServerFlags] = + var defaultFlags: set[HttpServerFlags] = {} + if isNil(req): return defaultFlags + if isNil(req.connection): return defaultFlags + if isNil(req.connection.server): return defaultFlags + req.connection.server.flags + +proc getResponseFlags(req: HttpRequestRef): set[HttpResponseFlags] = var defaultFlags: set[HttpResponseFlags] = {} case req.version of HttpVersion11: - if HttpServerFlags.Http11Pipeline notin req.connection.server.flags: + if HttpServerFlags.Http11Pipeline notin req.getServerFlags(): return defaultFlags let header = req.headers.getString(ConnectionHeader, "keep-alive") if header == "keep-alive": @@ -214,6 +254,12 @@ proc getResponseFlags*(req: HttpRequestRef): set[HttpResponseFlags] = else: defaultFlags +proc getResponseVersion(reqFence: RequestFence): HttpVersion {.raises: [].} = + if reqFence.isErr(): + HttpVersion11 + else: + reqFence.get().version + proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [].} = if req.response.isNone(): var resp = HttpResponseRef( @@ -235,9 +281,14 @@ proc getHostname*(server: HttpServerRef): string = else: server.baseUri.hostname -proc dumbResponse*(): HttpResponseRef {.raises: [].} = +proc defaultResponse*(): HttpResponseRef {.raises: [].} = ## Create an empty response to return when request processor got no request. - HttpResponseRef(state: HttpResponseState.Dumb, version: HttpVersion11) + HttpResponseRef(state: HttpResponseState.Default, version: HttpVersion11) + +proc dumbResponse*(): HttpResponseRef {.raises: [], + deprecated: "Please use defaultResponse() instead".} = + ## Create an empty response to return when request processor got no request. + defaultResponse() proc getId(transp: StreamTransport): Result[string, string] {.inline.} = ## Returns string unique transport's identifier as string. @@ -371,6 +422,7 @@ proc prepareRequest(conn: HttpConnectionRef, if strip(expectHeader).toLowerAscii() == "100-continue": request.requestFlags.incl(HttpRequestFlags.ClientExpect) + trackCounter(HttpServerRequestTrackerName) ok(request) proc getBodyReader*(request: HttpRequestRef): HttpResult[HttpBodyReader] = @@ -579,7 +631,7 @@ proc preferredContentType*(request: HttpRequestRef, proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, code: HttpCode, keepAlive = true, datatype = "text/text", - databody = ""): Future[bool] {.async.} = + databody = "") {.async.} = var answer = $version & " " & $code & "\r\n" answer.add(DateHeader) answer.add(": ") @@ -605,13 +657,90 @@ proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, answer.add(databody) try: await conn.writer.write(answer) - return true + except CancelledError as exc: + raise exc + except CatchableError: + # We ignore errors here, because we indicating error already. + discard + +proc sendErrorResponse(conn: HttpConnectionRef, reqFence: RequestFence, + respError: HttpProcessError): Future[bool] {.async.} = + let version = getResponseVersion(reqFence) + try: + if reqFence.isOk(): + case respError.kind + of HttpServerError.CriticalError: + await conn.sendErrorResponse(version, respError.code, false) + false + of HttpServerError.RecoverableError: + await conn.sendErrorResponse(version, respError.code, true) + true + of HttpServerError.CatchableError: + await conn.sendErrorResponse(version, respError.code, false) + false + of HttpServerError.DisconnectError, + HttpServerError.InterruptError, + HttpServerError.TimeoutError: + raiseAssert("Unexpected response error: " & $respError.kind) + else: + false except CancelledError: - return false - except AsyncStreamWriteError: - return false - except AsyncStreamIncompleteError: - return false + false + +proc sendDefaultResponse(conn: HttpConnectionRef, reqFence: RequestFence, + response: HttpResponseRef): Future[bool] {.async.} = + let + version = getResponseVersion(reqFence) + keepConnection = + if isNil(response): + false + else: + HttpResponseFlags.KeepAlive in response.flags + try: + if reqFence.isOk(): + if isNil(response): + await conn.sendErrorResponse(version, Http404, keepConnection) + keepConnection + else: + case response.state + of HttpResponseState.Empty: + # Response was ignored, so we respond with not found. + await conn.sendErrorResponse(version, Http404, keepConnection) + keepConnection + of HttpResponseState.Prepared: + # Response was prepared but not sent, so we can respond with some + # error code + await conn.sendErrorResponse(HttpVersion11, Http409, keepConnection) + keepConnection + of HttpResponseState.Sending, HttpResponseState.Failed, + HttpResponseState.Cancelled: + # Just drop connection, because we dont know at what stage we are + false + of HttpResponseState.Default: + # Response was ignored, so we respond with not found. + await conn.sendErrorResponse(version, Http404, keepConnection) + keepConnection + of HttpResponseState.Finished: + keepConnection + else: + case reqFence.error.kind + of HttpServerError.TimeoutError: + await conn.sendErrorResponse(version, reqFence.error.code, false) + false + of HttpServerError.CriticalError: + await conn.sendErrorResponse(version, reqFence.error.code, false) + false + of HttpServerError.RecoverableError: + await conn.sendErrorResponse(version, reqFence.error.code, true) + false + of HttpServerError.CatchableError: + await conn.sendErrorResponse(version, reqFence.error.code, false) + false + of HttpServerError.DisconnectError, + HttpServerError.InterruptError: + raiseAssert("Unexpected request error: " & $reqFence.error.kind) + except CancelledError: + false proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {.async.} = try: @@ -644,31 +773,33 @@ proc init*(value: var HttpConnection, server: HttpServerRef, mainWriter: newAsyncStreamWriter(transp) ) +proc closeUnsecureConnection(conn: HttpConnectionRef) {.async.} = + if conn.state == HttpState.Alive: + conn.state = HttpState.Closing + var pending: seq[Future[void]] + pending.add(conn.mainReader.closeWait()) + pending.add(conn.mainWriter.closeWait()) + pending.add(conn.transp.closeWait()) + try: + await allFutures(pending) + except CancelledError: + await allFutures(pending) + untrackCounter(HttpServerUnsecureConnectionTrackerName) + conn.state = HttpState.Closed + proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef, transp: StreamTransport): HttpConnectionRef = var res = HttpConnectionRef() res[].init(server, transp) res.reader = res.mainReader res.writer = res.mainWriter + res.closeCb = closeUnsecureConnection + res.createMoment = Moment.now() + trackCounter(HttpServerUnsecureConnectionTrackerName) res -proc closeWait*(conn: HttpConnectionRef) {.async.} = - if conn.state == HttpState.Alive: - conn.state = HttpState.Closing - var pending: seq[Future[void]] - if conn.reader != conn.mainReader: - pending.add(conn.reader.closeWait()) - if conn.writer != conn.mainWriter: - pending.add(conn.writer.closeWait()) - if len(pending) > 0: - await allFutures(pending) - # After we going to close everything else. - pending.setLen(3) - pending[0] = conn.mainReader.closeWait() - pending[1] = conn.mainWriter.closeWait() - pending[2] = conn.transp.closeWait() - await allFutures(pending) - conn.state = HttpState.Closed +proc closeWait*(conn: HttpConnectionRef): Future[void] = + conn.closeCb(conn) proc closeWait*(req: HttpRequestRef) {.async.} = if req.state == HttpState.Alive: @@ -676,7 +807,12 @@ proc closeWait*(req: HttpRequestRef) {.async.} = req.state = HttpState.Closing let resp = req.response.get() if (HttpResponseFlags.Stream in resp.flags) and not(isNil(resp.writer)): - await resp.writer.closeWait() + var writer = resp.writer.closeWait() + try: + await writer + except CancelledError: + await writer + untrackCounter(HttpServerRequestTrackerName) req.state = HttpState.Closed proc createConnection(server: HttpServerRef, @@ -694,174 +830,168 @@ proc `keepalive=`*(resp: HttpResponseRef, value: bool) = proc keepalive*(resp: HttpResponseRef): bool {.raises: [].} = HttpResponseFlags.KeepAlive in resp.flags -proc processLoop(server: HttpServerRef, transp: StreamTransport, - connId: string) {.async.} = - var - conn: HttpConnectionRef - connArg: RequestFence - runLoop = false +proc getRemoteAddress(transp: StreamTransport): Opt[TransportAddress] {. + raises: [].} = + if isNil(transp): return Opt.none(TransportAddress) + try: + Opt.some(transp.remoteAddress()) + except CatchableError: + Opt.none(TransportAddress) + +proc getRemoteAddress(connection: HttpConnectionRef): Opt[TransportAddress] {. + raises: [].} = + if isNil(connection): return Opt.none(TransportAddress) + getRemoteAddress(connection.transp) +proc getResponseFence*(connection: HttpConnectionRef, + reqFence: RequestFence): Future[ResponseFence] {. + async.} = try: - conn = await server.createConnCallback(server, transp) - runLoop = true + let res = await connection.server.processCallback(reqFence) + ResponseFence.ok(res) except CancelledError: - server.connections.del(connId) - await transp.closeWait() - return + ResponseFence.err(HttpProcessError.init( + HttpServerError.InterruptError)) except HttpCriticalError as exc: - let error = HttpProcessError.init(HttpServerError.CriticalError, exc, - transp.remoteAddress(), exc.code) - connArg = RequestFence.err(error) - runLoop = false - - if not(runLoop): - try: - # We still want to notify process callback about failure, but we ignore - # result. - discard await server.processCallback(connArg) - except CancelledError: - runLoop = false - except CatchableError as exc: - # There should be no exceptions, so we will raise `Defect`. - raiseHttpDefect("Unexpected exception catched [" & $exc.name & "]") - - var breakLoop = false - while runLoop: - var - arg: RequestFence - resp: HttpResponseRef - - try: - let request = - if server.headersTimeout.isInfinite(): - await conn.getRequest() - else: - await conn.getRequest().wait(server.headersTimeout) - arg = RequestFence.ok(request) - except CancelledError: - breakLoop = true - except AsyncTimeoutError as exc: - let error = HttpProcessError.init(HttpServerError.TimeoutError, exc, - transp.remoteAddress(), Http408) - arg = RequestFence.err(error) - except HttpRecoverableError as exc: - let error = HttpProcessError.init(HttpServerError.RecoverableError, exc, - transp.remoteAddress(), exc.code) - arg = RequestFence.err(error) - except HttpCriticalError as exc: - let error = HttpProcessError.init(HttpServerError.CriticalError, exc, - transp.remoteAddress(), exc.code) - arg = RequestFence.err(error) - except HttpDisconnectError as exc: - if HttpServerFlags.NotifyDisconnect in server.flags: - let error = HttpProcessError.init(HttpServerError.DisconnectError, exc, - transp.remoteAddress(), Http400) - arg = RequestFence.err(error) + let address = connection.getRemoteAddress() + ResponseFence.err(HttpProcessError.init( + HttpServerError.CriticalError, exc, address, exc.code)) + except HttpRecoverableError as exc: + let address = connection.getRemoteAddress() + ResponseFence.err(HttpProcessError.init( + HttpServerError.RecoverableError, exc, address, exc.code)) + except CatchableError as exc: + let address = connection.getRemoteAddress() + ResponseFence.err(HttpProcessError.init( + HttpServerError.CatchableError, exc, address, Http503)) + +proc getResponseFence*(server: HttpServerRef, + connFence: ConnectionFence): Future[ResponseFence] {. + async.} = + doAssert(connFence.isErr()) + try: + let + reqFence = RequestFence.err(connFence.error) + res = await server.processCallback(reqFence) + ResponseFence.ok(res) + except CancelledError: + ResponseFence.err(HttpProcessError.init( + HttpServerError.InterruptError)) + except HttpCriticalError as exc: + let address = Opt.none(TransportAddress) + ResponseFence.err(HttpProcessError.init( + HttpServerError.CriticalError, exc, address, exc.code)) + except HttpRecoverableError as exc: + let address = Opt.none(TransportAddress) + ResponseFence.err(HttpProcessError.init( + HttpServerError.RecoverableError, exc, address, exc.code)) + except CatchableError as exc: + let address = Opt.none(TransportAddress) + ResponseFence.err(HttpProcessError.init( + HttpServerError.CatchableError, exc, address, Http503)) + +proc getRequestFence*(server: HttpServerRef, + connection: HttpConnectionRef): Future[RequestFence] {. + async.} = + try: + let res = + if server.headersTimeout.isInfinite(): + await connection.getRequest() else: - breakLoop = true - except CatchableError as exc: - let error = HttpProcessError.init(HttpServerError.CatchableError, exc, - transp.remoteAddress(), Http500) - arg = RequestFence.err(error) - - if breakLoop: - break - - breakLoop = false - var lastErrorCode: Opt[HttpCode] - - try: - resp = await conn.server.processCallback(arg) - except CancelledError: - breakLoop = true - except HttpCriticalError as exc: - lastErrorCode = Opt.some(exc.code) - except HttpRecoverableError as exc: - lastErrorCode = Opt.some(exc.code) - except CatchableError: - lastErrorCode = Opt.some(Http503) + await connection.getRequest().wait(server.headersTimeout) + RequestFence.ok(res) + except CancelledError: + RequestFence.err(HttpProcessError.init(HttpServerError.InterruptError)) + except AsyncTimeoutError as exc: + let address = connection.getRemoteAddress() + RequestFence.err(HttpProcessError.init( + HttpServerError.TimeoutError, exc, address, Http408)) + except HttpRecoverableError as exc: + let address = connection.getRemoteAddress() + RequestFence.err(HttpProcessError.init( + HttpServerError.RecoverableError, exc, address, exc.code)) + except HttpCriticalError as exc: + let address = connection.getRemoteAddress() + RequestFence.err(HttpProcessError.init( + HttpServerError.CriticalError, exc, address, exc.code)) + except HttpDisconnectError as exc: + let address = connection.getRemoteAddress() + RequestFence.err(HttpProcessError.init( + HttpServerError.DisconnectError, exc, address, Http400)) + except CatchableError as exc: + let address = connection.getRemoteAddress() + RequestFence.err(HttpProcessError.init( + HttpServerError.CatchableError, exc, address, Http500)) + +proc getConnectionFence*(server: HttpServerRef, + transp: StreamTransport): Future[ConnectionFence] {. + async.} = + try: + let res = await server.createConnCallback(server, transp) + ConnectionFence.ok(res) + except CancelledError: + await transp.closeWait() + ConnectionFence.err(HttpProcessError.init(HttpServerError.InterruptError)) + except HttpCriticalError as exc: + await transp.closeWait() + let address = transp.getRemoteAddress() + ConnectionFence.err(HttpProcessError.init( + HttpServerError.CriticalError, exc, address, exc.code)) + +proc processRequest(server: HttpServerRef, + connection: HttpConnectionRef, + connId: string): Future[bool] {.async.} = + let requestFence = await getRequestFence(server, connection) + if requestFence.isErr(): + case requestFence.error.kind + of HttpServerError.InterruptError: + return false + of HttpServerError.DisconnectError: + if HttpServerFlags.NotifyDisconnect notin server.flags: + return false + else: + discard - if breakLoop: - break + defer: + if requestFence.isOk(): + await requestFence.get().closeWait() - if arg.isErr(): - let code = arg.error().code - try: - case arg.error().error - of HttpServerError.TimeoutError: - discard await conn.sendErrorResponse(HttpVersion11, code, false) - of HttpServerError.RecoverableError: - discard await conn.sendErrorResponse(HttpVersion11, code, false) - of HttpServerError.CriticalError: - discard await conn.sendErrorResponse(HttpVersion11, code, false) - of HttpServerError.CatchableError: - discard await conn.sendErrorResponse(HttpVersion11, code, false) - of HttpServerError.DisconnectError: - discard - except CancelledError: - # We swallowing `CancelledError` in a loop, but we going to exit - # loop ASAP. - discard - break - else: - let request = arg.get() - var keepConn = HttpResponseFlags.KeepAlive in request.getResponseFlags() - if lastErrorCode.isNone(): - if isNil(resp): - # Response was `nil`. - try: - discard await conn.sendErrorResponse(HttpVersion11, Http404, false) - except CancelledError: - keepConn = false - else: - try: - case resp.state - of HttpResponseState.Empty: - # Response was ignored - discard await conn.sendErrorResponse(HttpVersion11, Http404, - keepConn) - of HttpResponseState.Prepared: - # Response was prepared but not sent. - discard await conn.sendErrorResponse(HttpVersion11, Http409, - keepConn) - else: - # some data was already sent to the client. - keepConn = resp.keepalive() - except CancelledError: - keepConn = false - else: - try: - discard await conn.sendErrorResponse(HttpVersion11, - lastErrorCode.get(), false) - except CancelledError: - keepConn = false + let responseFence = await getResponseFence(connection, requestFence) + if responseFence.isErr() and + (responseFence.error.kind == HttpServerError.InterruptError): + return false - # Closing and releasing all the request resources. - try: - await request.closeWait() - except CancelledError: - # We swallowing `CancelledError` in a loop, but we still need to close - # `request` before exiting. - await request.closeWait() + if responseFence.isErr(): + await connection.sendErrorResponse(requestFence, responseFence.error) + else: + await connection.sendDefaultResponse(requestFence, responseFence.get()) + +proc processLoop(holder: HttpConnectionHolderRef) {.async.} = + let + server = holder.server + transp = holder.transp + connectionId = holder.connectionId + connection = + block: + let res = await server.getConnectionFence(transp) + if res.isErr(): + if res.error.kind != HttpServerError.InterruptError: + discard await server.getResponseFence(res) + server.connections.del(connectionId) + return + res.get() - if not(keepConn): - break + holder.connection = connection - # Connection could be `nil` only when secure handshake is failed. - if not(isNil(conn)): - try: - await conn.closeWait() - except CancelledError: - # Cancellation could be happened while we closing `conn`. But we still - # need to close it. - await conn.closeWait() + defer: + server.connections.del(connectionId) + await connection.closeWait() - server.connections.del(connId) - # if server.maxConnections > 0: - # server.semaphore.release() + var runLoop = true + while runLoop: + runLoop = await server.processRequest(connection, connectionId) proc acceptClientLoop(server: HttpServerRef) {.async.} = - var breakLoop = false while true: try: # if server.maxConnections > 0: @@ -872,27 +1002,26 @@ proc acceptClientLoop(server: HttpServerRef) {.async.} = # We are unable to identify remote peer, it means that remote peer # disconnected before identification. await transp.closeWait() - breakLoop = false + break else: let connId = resId.get() - server.connections[connId] = processLoop(server, transp, connId) + let holder = HttpConnectionHolderRef.new(server, transp, resId.get()) + server.connections[connId] = holder + holder.future = processLoop(holder) except CancelledError: # Server was stopped - breakLoop = true + break except TransportOsError: # This is some critical unrecoverable error. - breakLoop = true + break except TransportTooManyError: # Non critical error - breakLoop = false + discard except TransportAbortedError: # Non critical error - breakLoop = false + discard except CatchableError: # Unexpected error - breakLoop = true - - if breakLoop: break proc state*(server: HttpServerRef): HttpServerState {.raises: [].} = @@ -922,11 +1051,11 @@ proc drop*(server: HttpServerRef) {.async.} = ## Drop all pending HTTP connections. var pending: seq[Future[void]] if server.state in {ServerStopped, ServerRunning}: - for fut in server.connections.values(): - if not(fut.finished()): - fut.cancel() - pending.add(fut) + for holder in server.connections.values(): + if not(isNil(holder.future)) and not(holder.future.finished()): + pending.add(holder.future.cancelAndWait()) await allFutures(pending) + server.connections.clear() proc closeWait*(server: HttpServerRef) {.async.} = ## Stop HTTP server and drop all the pending connections. diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 93f253b8e..927ca6290 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -24,6 +24,28 @@ type SecureHttpConnectionRef* = ref SecureHttpConnection +proc closeSecConnection(conn: HttpConnectionRef) {.async.} = + if conn.state == HttpState.Alive: + conn.state = HttpState.Closing + var pending: seq[Future[void]] + pending.add(conn.writer.closeWait()) + pending.add(conn.reader.closeWait()) + try: + await allFutures(pending) + except CancelledError: + await allFutures(pending) + # After we going to close everything else. + pending.setLen(3) + pending[0] = conn.mainReader.closeWait() + pending[1] = conn.mainWriter.closeWait() + pending[2] = conn.transp.closeWait() + try: + await allFutures(pending) + except CancelledError: + await allFutures(pending) + untrackCounter(HttpServerSecureConnectionTrackerName) + conn.state = HttpState.Closed + proc new*(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef, transp: StreamTransport): SecureHttpConnectionRef = var res = SecureHttpConnectionRef() @@ -37,6 +59,8 @@ proc new*(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef, res.tlsStream = tlsStream res.reader = AsyncStreamReader(tlsStream.reader) res.writer = AsyncStreamWriter(tlsStream.writer) + res.closeCb = closeSecConnection + trackCounter(HttpServerSecureConnectionTrackerName) res proc createSecConnection(server: HttpServerRef, @@ -100,7 +124,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], createConnCallback: createSecConnection, baseUri: serverUri, serverIdent: serverIdent, - flags: serverFlags, + flags: serverFlags + {HttpServerFlags.Secure}, socketFlags: socketFlags, maxConnections: maxConnections, bufferSize: bufferSize, @@ -114,7 +138,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], # else: # nil lifetime: newFuture[void]("http.server.lifetime"), - connections: initTable[string, Future[void]](), + connections: initOrderedTable[string, HttpConnectionHolderRef](), tlsCertificate: tlsCertificate, tlsPrivateKey: tlsPrivateKey, secureFlags: secureFlags diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index 774391621..a603ee4fe 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -171,11 +171,16 @@ type dump*: proc(): string {.gcsafe, raises: [].} isLeaked*: proc(): bool {.gcsafe, raises: [].} + TrackerCounter* = object + opened*: uint64 + closed*: uint64 + PDispatcherBase = ref object of RootRef timers*: HeapQueue[TimerCallback] callbacks*: Deque[AsyncCallback] idlers*: Deque[AsyncCallback] trackers*: Table[string, TrackerBase] + counters*: Table[string, TrackerCounter] proc sentinelCallbackImpl(arg: pointer) {.gcsafe.} = raiseAssert "Sentinel callback MUST not be scheduled" @@ -404,7 +409,8 @@ when defined(windows): timers: initHeapQueue[TimerCallback](), callbacks: initDeque[AsyncCallback](64), idlers: initDeque[AsyncCallback](), - trackers: initTable[string, TrackerBase]() + trackers: initTable[string, TrackerBase](), + counters: initTable[string, TrackerCounter]() ) res.callbacks.addLast(SentinelCallback) initAPI(res) @@ -814,7 +820,8 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or callbacks: initDeque[AsyncCallback](asyncEventsCount), idlers: initDeque[AsyncCallback](), keys: newSeq[ReadyKey](asyncEventsCount), - trackers: initTable[string, TrackerBase]() + trackers: initTable[string, TrackerBase](), + counters: initTable[string, TrackerCounter]() ) res.callbacks.addLast(SentinelCallback) initAPI(res) @@ -1505,16 +1512,54 @@ proc waitFor*[T](fut: Future[T]): T {.raises: [CatchableError].} = fut.read() -proc addTracker*[T](id: string, tracker: T) = +proc addTracker*[T](id: string, tracker: T) {. + deprecated: "Please use trackCounter facility instead".} = ## Add new ``tracker`` object to current thread dispatcher with identifier ## ``id``. - let loop = getThreadDispatcher() - loop.trackers[id] = tracker + getThreadDispatcher().trackers[id] = tracker -proc getTracker*(id: string): TrackerBase = +proc getTracker*(id: string): TrackerBase {. + deprecated: "Please use getTrackerCounter() instead".} = ## Get ``tracker`` from current thread dispatcher using identifier ``id``. - let loop = getThreadDispatcher() - result = loop.trackers.getOrDefault(id, nil) + getThreadDispatcher().trackers.getOrDefault(id, nil) + +proc trackCounter*(name: string) {.noinit.} = + ## Increase tracker counter with name ``name`` by 1. + let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) + inc(getThreadDispatcher().counters.mgetOrPut(name, tracker).opened) + +proc untrackCounter*(name: string) {.noinit.} = + ## Decrease tracker counter with name ``name`` by 1. + let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) + inc(getThreadDispatcher().counters.mgetOrPut(name, tracker).closed) + +proc getTrackerCounter*(name: string): TrackerCounter {.noinit.} = + ## Return value of counter with name ``name``. + let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) + getThreadDispatcher().counters.getOrDefault(name, tracker) + +proc isCounterLeaked*(name: string): bool {.noinit.} = + ## Returns ``true`` if leak is detected, number of `opened` not equal to + ## number of `closed` requests. + let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) + let res = getThreadDispatcher().counters.getOrDefault(name, tracker) + res.opened == res.closed + +iterator trackerCounters*( + loop: PDispatcher + ): tuple[name: string, value: TrackerCounter] = + ## Iterates over `loop` thread dispatcher tracker counter table, returns all + ## the tracker counter's names and values. + doAssert(not(isNil(loop))) + for key, value in loop.counters.pairs(): + yield (key, value) + +iterator trackerCounterKeys*(loop: PDispatcher): string = + doAssert(not(isNil(loop))) + ## Iterates over `loop` thread dispatcher tracker counter table, returns all + ## tracker names. + for key in loop.counters.keys(): + yield key when chronosFutureTracking: iterator pendingFutures*(): FutureBase = diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index 8d15b72e3..8d0cdb790 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -23,8 +23,6 @@ const AsyncProcessTrackerName* = "async.process" ## AsyncProcess leaks tracker name - - type AsyncProcessError* = object of CatchableError @@ -109,49 +107,9 @@ type stdError*: string status*: int - AsyncProcessTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - template Pipe*(t: typedesc[AsyncProcess]): ProcessStreamHandle = ProcessStreamHandle(kind: ProcessStreamHandleKind.Auto) -proc setupAsyncProcessTracker(): AsyncProcessTracker {.gcsafe.} - -proc getAsyncProcessTracker(): AsyncProcessTracker {.inline.} = - var res = cast[AsyncProcessTracker](getTracker(AsyncProcessTrackerName)) - if isNil(res): - res = setupAsyncProcessTracker() - res - -proc dumpAsyncProcessTracking(): string {.gcsafe.} = - var tracker = getAsyncProcessTracker() - let res = "Started async processes: " & $tracker.opened & "\n" & - "Closed async processes: " & $tracker.closed - res - -proc leakAsyncProccessTracker(): bool {.gcsafe.} = - var tracker = getAsyncProcessTracker() - tracker.opened != tracker.closed - -proc trackAsyncProccess(t: AsyncProcessRef) {.inline.} = - var tracker = getAsyncProcessTracker() - inc(tracker.opened) - -proc untrackAsyncProcess(t: AsyncProcessRef) {.inline.} = - var tracker = getAsyncProcessTracker() - inc(tracker.closed) - -proc setupAsyncProcessTracker(): AsyncProcessTracker {.gcsafe.} = - var res = AsyncProcessTracker( - opened: 0, - closed: 0, - dump: dumpAsyncProcessTracking, - isLeaked: leakAsyncProccessTracker - ) - addTracker(AsyncProcessTrackerName, res) - res - proc init*(t: typedesc[AsyncFD], handle: ProcessStreamHandle): AsyncFD = case handle.kind of ProcessStreamHandleKind.ProcHandle: @@ -502,7 +460,7 @@ when defined(windows): flags: pipes.flags ) - trackAsyncProccess(process) + trackCounter(AsyncProcessTrackerName) return process proc peekProcessExitCode(p: AsyncProcessRef): AsyncProcessResult[int] = @@ -919,7 +877,7 @@ else: flags: pipes.flags ) - trackAsyncProccess(process) + trackCounter(AsyncProcessTrackerName) return process proc peekProcessExitCode(p: AsyncProcessRef, @@ -1237,7 +1195,7 @@ proc closeWait*(p: AsyncProcessRef) {.async.} = discard closeProcessHandles(p.pipes, p.options, OSErrorCode(0)) await p.pipes.closeProcessStreams(p.options) discard p.closeThreadAndProcessHandle() - untrackAsyncProcess(p) + untrackCounter(AsyncProcessTrackerName) proc stdinStream*(p: AsyncProcessRef): AsyncStreamWriter = doAssert(p.pipes.stdinHolder.kind == StreamKind.Writer, diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 9920fc7cb..7e6e5d2d1 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -96,10 +96,6 @@ type reader*: AsyncStreamReader writer*: AsyncStreamWriter - AsyncStreamTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - AsyncStreamRW* = AsyncStreamReader | AsyncStreamWriter proc init*(t: typedesc[AsyncBuffer], size: int): AsyncBuffer = @@ -332,79 +328,6 @@ template checkStreamClosed*(t: untyped) = template checkStreamFinished*(t: untyped) = if t.atEof(): raiseAsyncStreamWriteEOFError() -proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {. - gcsafe, raises: [].} -proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {. - gcsafe, raises: [].} - -proc getAsyncStreamReaderTracker(): AsyncStreamTracker {.inline.} = - var res = cast[AsyncStreamTracker](getTracker(AsyncStreamReaderTrackerName)) - if isNil(res): - res = setupAsyncStreamReaderTracker() - res - -proc getAsyncStreamWriterTracker(): AsyncStreamTracker {.inline.} = - var res = cast[AsyncStreamTracker](getTracker(AsyncStreamWriterTrackerName)) - if isNil(res): - res = setupAsyncStreamWriterTracker() - res - -proc dumpAsyncStreamReaderTracking(): string {.gcsafe.} = - var tracker = getAsyncStreamReaderTracker() - let res = "Opened async stream readers: " & $tracker.opened & "\n" & - "Closed async stream readers: " & $tracker.closed - res - -proc dumpAsyncStreamWriterTracking(): string {.gcsafe.} = - var tracker = getAsyncStreamWriterTracker() - let res = "Opened async stream writers: " & $tracker.opened & "\n" & - "Closed async stream writers: " & $tracker.closed - res - -proc leakAsyncStreamReader(): bool {.gcsafe.} = - var tracker = getAsyncStreamReaderTracker() - tracker.opened != tracker.closed - -proc leakAsyncStreamWriter(): bool {.gcsafe.} = - var tracker = getAsyncStreamWriterTracker() - tracker.opened != tracker.closed - -proc trackAsyncStreamReader(t: AsyncStreamReader) {.inline.} = - var tracker = getAsyncStreamReaderTracker() - inc(tracker.opened) - -proc untrackAsyncStreamReader*(t: AsyncStreamReader) {.inline.} = - var tracker = getAsyncStreamReaderTracker() - inc(tracker.closed) - -proc trackAsyncStreamWriter(t: AsyncStreamWriter) {.inline.} = - var tracker = getAsyncStreamWriterTracker() - inc(tracker.opened) - -proc untrackAsyncStreamWriter*(t: AsyncStreamWriter) {.inline.} = - var tracker = getAsyncStreamWriterTracker() - inc(tracker.closed) - -proc setupAsyncStreamReaderTracker(): AsyncStreamTracker {.gcsafe.} = - var res = AsyncStreamTracker( - opened: 0, - closed: 0, - dump: dumpAsyncStreamReaderTracking, - isLeaked: leakAsyncStreamReader - ) - addTracker(AsyncStreamReaderTrackerName, res) - res - -proc setupAsyncStreamWriterTracker(): AsyncStreamTracker {.gcsafe.} = - var res = AsyncStreamTracker( - opened: 0, - closed: 0, - dump: dumpAsyncStreamWriterTracking, - isLeaked: leakAsyncStreamWriter - ) - addTracker(AsyncStreamWriterTrackerName, res) - res - template readLoop(body: untyped): untyped = while true: if rstream.buffer.dataLen() == 0: @@ -977,9 +900,9 @@ proc close*(rw: AsyncStreamRW) = if not(rw.future.finished()): rw.future.complete() when rw is AsyncStreamReader: - untrackAsyncStreamReader(rw) + untrackCounter(AsyncStreamReaderTrackerName) elif rw is AsyncStreamWriter: - untrackAsyncStreamWriter(rw) + untrackCounter(AsyncStreamWriterTrackerName) rw.state = AsyncStreamState.Closed when rw is AsyncStreamReader: @@ -1028,7 +951,7 @@ proc init*(child, wsource: AsyncStreamWriter, loop: StreamWriterLoop, child.wsource = wsource child.tsource = wsource.tsource child.queue = newAsyncQueue[WriteItem](queueSize) - trackAsyncStreamWriter(child) + trackCounter(AsyncStreamWriterTrackerName) child.startWriter() proc init*[T](child, wsource: AsyncStreamWriter, loop: StreamWriterLoop, @@ -1042,7 +965,7 @@ proc init*[T](child, wsource: AsyncStreamWriter, loop: StreamWriterLoop, if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) - trackAsyncStreamWriter(child) + trackCounter(AsyncStreamWriterTrackerName) child.startWriter() proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop, @@ -1053,7 +976,7 @@ proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.rsource = rsource child.tsource = rsource.tsource child.buffer = AsyncBuffer.init(bufferSize) - trackAsyncStreamReader(child) + trackCounter(AsyncStreamReaderTrackerName) child.startReader() proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop, @@ -1068,7 +991,7 @@ proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop, if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) - trackAsyncStreamReader(child) + trackCounter(AsyncStreamReaderTrackerName) child.startReader() proc init*(child: AsyncStreamWriter, tsource: StreamTransport) = @@ -1077,7 +1000,7 @@ proc init*(child: AsyncStreamWriter, tsource: StreamTransport) = child.writerLoop = nil child.wsource = nil child.tsource = tsource - trackAsyncStreamWriter(child) + trackCounter(AsyncStreamWriterTrackerName) child.startWriter() proc init*[T](child: AsyncStreamWriter, tsource: StreamTransport, @@ -1087,7 +1010,7 @@ proc init*[T](child: AsyncStreamWriter, tsource: StreamTransport, child.writerLoop = nil child.wsource = nil child.tsource = tsource - trackAsyncStreamWriter(child) + trackCounter(AsyncStreamWriterTrackerName) child.startWriter() proc init*(child, wsource: AsyncStreamWriter) = @@ -1096,7 +1019,7 @@ proc init*(child, wsource: AsyncStreamWriter) = child.writerLoop = nil child.wsource = wsource child.tsource = wsource.tsource - trackAsyncStreamWriter(child) + trackCounter(AsyncStreamWriterTrackerName) child.startWriter() proc init*[T](child, wsource: AsyncStreamWriter, udata: ref T) = @@ -1108,7 +1031,7 @@ proc init*[T](child, wsource: AsyncStreamWriter, udata: ref T) = if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) - trackAsyncStreamWriter(child) + trackCounter(AsyncStreamWriterTrackerName) child.startWriter() proc init*(child: AsyncStreamReader, tsource: StreamTransport) = @@ -1117,7 +1040,7 @@ proc init*(child: AsyncStreamReader, tsource: StreamTransport) = child.readerLoop = nil child.rsource = nil child.tsource = tsource - trackAsyncStreamReader(child) + trackCounter(AsyncStreamReaderTrackerName) child.startReader() proc init*[T](child: AsyncStreamReader, tsource: StreamTransport, @@ -1130,7 +1053,7 @@ proc init*[T](child: AsyncStreamReader, tsource: StreamTransport, if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) - trackAsyncStreamReader(child) + trackCounter(AsyncStreamReaderTrackerName) child.startReader() proc init*(child, rsource: AsyncStreamReader) = @@ -1139,7 +1062,7 @@ proc init*(child, rsource: AsyncStreamReader) = child.readerLoop = nil child.rsource = rsource child.tsource = rsource.tsource - trackAsyncStreamReader(child) + trackCounter(AsyncStreamReaderTrackerName) child.startReader() proc init*[T](child, rsource: AsyncStreamReader, udata: ref T) = @@ -1151,7 +1074,7 @@ proc init*[T](child, rsource: AsyncStreamReader, udata: ref T) = if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) - trackAsyncStreamReader(child) + trackCounter(AsyncStreamReaderTrackerName) child.startReader() proc newAsyncStreamReader*[T](rsource: AsyncStreamReader, diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 91a7e7a05..3e10f76e6 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -53,10 +53,6 @@ type rwsabuf: WSABUF # Reader WSABUF structure wwsabuf: WSABUF # Writer WSABUF structure - DgramTransportTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - const DgramTransportTrackerName* = "datagram.transport" @@ -88,39 +84,6 @@ template setReadError(t, e: untyped) = (t).state.incl(ReadError) (t).error = getTransportOsError(e) -proc setupDgramTransportTracker(): DgramTransportTracker {. - gcsafe, raises: [].} - -proc getDgramTransportTracker(): DgramTransportTracker {.inline.} = - var res = cast[DgramTransportTracker](getTracker(DgramTransportTrackerName)) - if isNil(res): - res = setupDgramTransportTracker() - doAssert(not(isNil(res))) - res - -proc dumpTransportTracking(): string {.gcsafe.} = - var tracker = getDgramTransportTracker() - "Opened transports: " & $tracker.opened & "\n" & - "Closed transports: " & $tracker.closed - -proc leakTransport(): bool {.gcsafe.} = - let tracker = getDgramTransportTracker() - tracker.opened != tracker.closed - -proc trackDgram(t: DatagramTransport) {.inline.} = - var tracker = getDgramTransportTracker() - inc(tracker.opened) - -proc untrackDgram(t: DatagramTransport) {.inline.} = - var tracker = getDgramTransportTracker() - inc(tracker.closed) - -proc setupDgramTransportTracker(): DgramTransportTracker {.gcsafe.} = - let res = DgramTransportTracker( - opened: 0, closed: 0, dump: dumpTransportTracking, isLeaked: leakTransport) - addTracker(DgramTransportTrackerName, res) - res - when defined(windows): template setWriterWSABuffer(t, v: untyped) = (t).wwsabuf.buf = cast[cstring](v.buf) @@ -213,7 +176,7 @@ when defined(windows): transp.state.incl(ReadPaused) if ReadClosed in transp.state and not(transp.future.finished()): # Stop tracking transport - untrackDgram(transp) + untrackCounter(DgramTransportTrackerName) # If `ReadClosed` present, then close(transport) was called. transp.future.complete() GC_unref(transp) @@ -259,7 +222,7 @@ when defined(windows): # WSARecvFrom session. if ReadClosed in transp.state and not(transp.future.finished()): # Stop tracking transport - untrackDgram(transp) + untrackCounter(DgramTransportTrackerName) transp.future.complete() GC_unref(transp) break @@ -394,7 +357,7 @@ when defined(windows): len: ULONG(len(res.buffer))) GC_ref(res) # Start tracking transport - trackDgram(res) + trackCounter(DgramTransportTrackerName) if NoAutoRead notin flags: let rres = res.resumeRead() if rres.isErr(): raiseTransportOsError(rres.error()) @@ -592,7 +555,7 @@ else: res.future = newFuture[void]("datagram.transport") GC_ref(res) # Start tracking transport - trackDgram(res) + trackCounter(DgramTransportTrackerName) if NoAutoRead notin flags: let rres = res.resumeRead() if rres.isErr(): raiseTransportOsError(rres.error()) @@ -603,7 +566,7 @@ proc close*(transp: DatagramTransport) = proc continuation(udata: pointer) {.raises: [].} = if not(transp.future.finished()): # Stop tracking transport - untrackDgram(transp) + untrackCounter(DgramTransportTrackerName) transp.future.complete() GC_unref(transp) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 3abd942c8..a4190da52 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -54,15 +54,6 @@ type ReuseAddr, ReusePort - - StreamTransportTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - - StreamServerTracker* = ref object of TrackerBase - opened*: int64 - closed*: int64 - ReadMessagePredicate* = proc (data: openArray[byte]): tuple[consumed: int, done: bool] {. gcsafe, raises: [].} @@ -199,71 +190,6 @@ template shiftVectorFile(v: var StreamVector, o: untyped) = (v).buf = cast[pointer](cast[uint]((v).buf) - uint(o)) (v).offset += uint(o) -proc setupStreamTransportTracker(): StreamTransportTracker {. - gcsafe, raises: [].} -proc setupStreamServerTracker(): StreamServerTracker {. - gcsafe, raises: [].} - -proc getStreamTransportTracker(): StreamTransportTracker {.inline.} = - var res = cast[StreamTransportTracker](getTracker(StreamTransportTrackerName)) - if isNil(res): - res = setupStreamTransportTracker() - doAssert(not(isNil(res))) - res - -proc getStreamServerTracker(): StreamServerTracker {.inline.} = - var res = cast[StreamServerTracker](getTracker(StreamServerTrackerName)) - if isNil(res): - res = setupStreamServerTracker() - doAssert(not(isNil(res))) - res - -proc dumpTransportTracking(): string {.gcsafe.} = - var tracker = getStreamTransportTracker() - "Opened transports: " & $tracker.opened & "\n" & - "Closed transports: " & $tracker.closed - -proc dumpServerTracking(): string {.gcsafe.} = - var tracker = getStreamServerTracker() - "Opened servers: " & $tracker.opened & "\n" & - "Closed servers: " & $tracker.closed - -proc leakTransport(): bool {.gcsafe.} = - var tracker = getStreamTransportTracker() - tracker.opened != tracker.closed - -proc leakServer(): bool {.gcsafe.} = - var tracker = getStreamServerTracker() - tracker.opened != tracker.closed - -proc trackStream(t: StreamTransport) {.inline.} = - var tracker = getStreamTransportTracker() - inc(tracker.opened) - -proc untrackStream(t: StreamTransport) {.inline.} = - var tracker = getStreamTransportTracker() - inc(tracker.closed) - -proc trackServer(s: StreamServer) {.inline.} = - var tracker = getStreamServerTracker() - inc(tracker.opened) - -proc untrackServer(s: StreamServer) {.inline.} = - var tracker = getStreamServerTracker() - inc(tracker.closed) - -proc setupStreamTransportTracker(): StreamTransportTracker {.gcsafe.} = - let res = StreamTransportTracker( - opened: 0, closed: 0, dump: dumpTransportTracking, isLeaked: leakTransport) - addTracker(StreamTransportTrackerName, res) - res - -proc setupStreamServerTracker(): StreamServerTracker {.gcsafe.} = - let res = StreamServerTracker( - opened: 0, closed: 0, dump: dumpServerTracking, isLeaked: leakServer) - addTracker(StreamServerTrackerName, res) - res - proc completePendingWriteQueue(queue: var Deque[StreamVector], v: int) {.inline.} = while len(queue) > 0: @@ -280,7 +206,7 @@ proc failPendingWriteQueue(queue: var Deque[StreamVector], proc clean(server: StreamServer) {.inline.} = if not(server.loopFuture.finished()): - untrackServer(server) + untrackCounter(StreamServerTrackerName) server.loopFuture.complete() if not(isNil(server.udata)) and (GCUserData in server.flags): GC_unref(cast[ref int](server.udata)) @@ -288,7 +214,7 @@ proc clean(server: StreamServer) {.inline.} = proc clean(transp: StreamTransport) {.inline.} = if not(transp.future.finished()): - untrackStream(transp) + untrackCounter(StreamTransportTrackerName) transp.future.complete() GC_unref(transp) @@ -784,7 +710,7 @@ when defined(windows): else: let transp = newStreamSocketTransport(sock, bufferSize, child) # Start tracking transport - trackStream(transp) + trackCounter(StreamTransportTrackerName) retFuture.complete(transp) else: sock.closeSocket() @@ -853,7 +779,7 @@ when defined(windows): let transp = newStreamPipeTransport(AsyncFD(pipeHandle), bufferSize, child) # Start tracking transport - trackStream(transp) + trackCounter(StreamTransportTrackerName) retFuture.complete(transp) pipeContinuation(nil) @@ -909,7 +835,7 @@ when defined(windows): ntransp = newStreamPipeTransport(server.sock, server.bufferSize, nil, flags) # Start tracking transport - trackStream(ntransp) + trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or close call. @@ -1013,7 +939,7 @@ when defined(windows): ntransp = newStreamSocketTransport(server.asock, server.bufferSize, nil) # Start tracking transport - trackStream(ntransp) + trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) of ERROR_OPERATION_ABORTED: @@ -1156,7 +1082,7 @@ when defined(windows): ntransp = newStreamSocketTransport(server.asock, server.bufferSize, nil) # Start tracking transport - trackStream(ntransp) + trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or close. @@ -1216,7 +1142,7 @@ when defined(windows): retFuture.fail(getTransportOsError(error)) return - trackStream(ntransp) + trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) of ERROR_OPERATION_ABORTED, ERROR_PIPE_NOT_CONNECTED: @@ -1626,7 +1552,7 @@ else: let transp = newStreamSocketTransport(sock, bufferSize, child) # Start tracking transport - trackStream(transp) + trackCounter(StreamTransportTrackerName) retFuture.complete(transp) proc cancel(udata: pointer) = @@ -1639,7 +1565,7 @@ else: if res == 0: let transp = newStreamSocketTransport(sock, bufferSize, child) # Start tracking transport - trackStream(transp) + trackCounter(StreamTransportTrackerName) retFuture.complete(transp) break else: @@ -1694,7 +1620,7 @@ else: newStreamSocketTransport(sock, server.bufferSize, transp) else: newStreamSocketTransport(sock, server.bufferSize, nil) - trackStream(ntransp) + trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) else: # Client was accepted, so we not going to raise assertion, but @@ -1782,7 +1708,7 @@ else: else: newStreamSocketTransport(sock, server.bufferSize, nil) # Start tracking transport - trackStream(ntransp) + trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) else: discard closeFd(cint(sock)) @@ -2098,7 +2024,7 @@ proc createStreamServer*(host: TransportAddress, sres.apending = false # Start tracking server - trackServer(sres) + trackCounter(StreamServerTrackerName) GC_ref(sres) sres @@ -2671,7 +2597,7 @@ proc fromPipe2*(fd: AsyncFD, child: StreamTransport = nil, ? register2(fd) var res = newStreamPipeTransport(fd, bufferSize, child) # Start tracking transport - trackStream(res) + trackCounter(StreamTransportTrackerName) ok(res) proc fromPipe*(fd: AsyncFD, child: StreamTransport = nil, diff --git a/chronos/unittest2/asynctests.nim b/chronos/unittest2/asynctests.nim index fda035377..bc703b7e9 100644 --- a/chronos/unittest2/asynctests.nim +++ b/chronos/unittest2/asynctests.nim @@ -6,6 +6,7 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) +import std/tables import unittest2 import ../../chronos @@ -17,3 +18,14 @@ template asyncTest*(name: string, body: untyped): untyped = proc() {.async, gcsafe.} = body )()) + +template checkLeaks*(name: string): untyped = + let counter = getTrackerCounter(name) + if counter.opened != counter.closed: + echo "[" & name & "] opened = ", counter.opened, + ", closed = ", counter.closed + check counter.opened == counter.closed + +template checkLeaks*(): untyped = + for key in getThreadDispatcher().trackerCounterKeys(): + checkLeaks(key) diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index 09a0b7e3f..d90b6887a 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -7,8 +7,8 @@ # MIT license (LICENSE-MIT) import unittest2 import bearssl/[x509] -import ../chronos -import ../chronos/streams/[tlsstream, chunkstream, boundstream] +import ".."/chronos/unittest2/asynctests +import ".."/chronos/streams/[tlsstream, chunkstream, boundstream] {.used.} @@ -302,11 +302,7 @@ suite "AsyncStream test suite": check waitFor(testConsume()) == true test "AsyncStream(StreamTransport) leaks test": - check: - getTracker("async.stream.reader").isLeaked() == false - getTracker("async.stream.writer").isLeaked() == false - getTracker("stream.server").isLeaked() == false - getTracker("stream.transport").isLeaked() == false + checkLeaks() test "AsyncStream(AsyncStream) readExactly() test": proc testReadExactly2(): Future[bool] {.async.} = @@ -613,11 +609,7 @@ suite "AsyncStream test suite": check waitFor(testWriteEof()) == true test "AsyncStream(AsyncStream) leaks test": - check: - getTracker("async.stream.reader").isLeaked() == false - getTracker("async.stream.writer").isLeaked() == false - getTracker("stream.server").isLeaked() == false - getTracker("stream.transport").isLeaked() == false + checkLeaks() suite "ChunkedStream test suite": test "ChunkedStream test vectors": @@ -911,11 +903,7 @@ suite "ChunkedStream test suite": check waitFor(testSmallChunk(767309, 4457, 173)) == true test "ChunkedStream leaks test": - check: - getTracker("async.stream.reader").isLeaked() == false - getTracker("async.stream.writer").isLeaked() == false - getTracker("stream.server").isLeaked() == false - getTracker("stream.transport").isLeaked() == false + checkLeaks() suite "TLSStream test suite": const HttpHeadersMark = @[byte(0x0D), byte(0x0A), byte(0x0D), byte(0x0A)] @@ -1039,11 +1027,7 @@ suite "TLSStream test suite": check res == "Some message\r\n" test "TLSStream leaks test": - check: - getTracker("async.stream.reader").isLeaked() == false - getTracker("async.stream.writer").isLeaked() == false - getTracker("stream.server").isLeaked() == false - getTracker("stream.transport").isLeaked() == false + checkLeaks() suite "BoundedStream test suite": @@ -1411,8 +1395,4 @@ suite "BoundedStream test suite": check waitFor(checkEmptyStreams()) == true test "BoundedStream leaks test": - check: - getTracker("async.stream.reader").isLeaked() == false - getTracker("async.stream.writer").isLeaked() == false - getTracker("stream.server").isLeaked() == false - getTracker("stream.transport").isLeaked() == false + checkLeaks() diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index 17385a3ff..7db04f93e 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -6,8 +6,8 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[strutils, net] -import unittest2 -import ../chronos +import ".."/chronos/unittest2/asynctests +import ".."/chronos {.used.} @@ -558,4 +558,4 @@ suite "Datagram Transport test suite": test "0.0.0.0/::0 (INADDR_ANY) test": check waitFor(testAnyAddress()) == 6 test "Transports leak test": - check getTracker("datagram.transport").isLeaked() == false + checkLeaks() diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 2807ebcb2..1eacc2155 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -6,8 +6,9 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[strutils, sha1] -import unittest2 -import ../chronos, ../chronos/apps/http/[httpserver, shttpserver, httpclient] +import ".."/chronos/unittest2/asynctests +import ".."/chronos, + ".."/chronos/apps/http/[httpserver, shttpserver, httpclient] import stew/base10 {.used.} @@ -138,7 +139,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -241,7 +242,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -324,7 +325,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -394,7 +395,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -470,7 +471,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -569,7 +570,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -667,7 +668,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -778,7 +779,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() @@ -909,7 +910,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() @@ -971,7 +972,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() @@ -1125,7 +1126,7 @@ suite "HTTP client testing suite": else: return await request.respond(Http404, "Page not found") else: - return dumbResponse() + return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -1262,17 +1263,4 @@ suite "HTTP client testing suite": check waitFor(testServerSentEvents(false)) == true test "Leaks test": - proc getTrackerLeaks(tracker: string): bool = - let tracker = getTracker(tracker) - if isNil(tracker): false else: tracker.isLeaked() - - check: - getTrackerLeaks("http.body.reader") == false - getTrackerLeaks("http.body.writer") == false - getTrackerLeaks("httpclient.connection") == false - getTrackerLeaks("httpclient.request") == false - getTrackerLeaks("httpclient.response") == false - getTrackerLeaks("async.stream.reader") == false - getTrackerLeaks("async.stream.writer") == false - getTrackerLeaks("stream.server") == false - getTrackerLeaks("stream.transport") == false + checkLeaks() diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 63c92b229..83372ea5a 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -6,10 +6,10 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[strutils, algorithm] -import unittest2 -import ../chronos, ../chronos/apps/http/httpserver, - ../chronos/apps/http/httpcommon, - ../chronos/unittest2/asynctests +import ".."/chronos/unittest2/asynctests, + ".."/chronos, ".."/chronos/apps/http/httpserver, + ".."/chronos/apps/http/httpcommon, + ".."/chronos/apps/http/httpdebug import stew/base10 {.used.} @@ -84,7 +84,7 @@ suite "HTTP server testing suite": # Reraising exception, because processor should properly handle it. raise exc else: - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -100,14 +100,14 @@ suite "HTTP server testing suite": let request = case operation of GetBodyTest, ConsumeBodyTest, PostUrlTest: - "POST / HTTP/1.0\r\n" & + "POST / HTTP/1.1\r\n" & "Content-Type: application/x-www-form-urlencoded\r\n" & "Transfer-Encoding: chunked\r\n" & "Cookie: 2\r\n\r\n" & "5\r\na=a&b\r\n5\r\n=b&c=\r\n4\r\nc&d=\r\n4\r\n%D0%\r\n" & "2\r\n9F\r\n0\r\n\r\n" of PostMultipartTest: - "POST / HTTP/1.0\r\n" & + "POST / HTTP/1.1\r\n" & "Host: 127.0.0.1:30080\r\n" & "Transfer-Encoding: chunked\r\n" & "Content-Type: multipart/form-data; boundary=f98f0\r\n\r\n" & @@ -134,9 +134,9 @@ suite "HTTP server testing suite": let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) else: - if r.error().error == HttpServerError.TimeoutError: + if r.error.kind == HttpServerError.TimeoutError: serverRes = true - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), @@ -148,7 +148,6 @@ suite "HTTP server testing suite": let server = res.get() server.start() let address = server.instance.localAddress() - let data = await httpClient(address, "") await server.stop() await server.closeWait() @@ -165,9 +164,9 @@ suite "HTTP server testing suite": let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) else: - if r.error().error == HttpServerError.CriticalError: + if r.error.kind == HttpServerError.CriticalError: serverRes = true - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), @@ -195,9 +194,9 @@ suite "HTTP server testing suite": let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) else: - if r.error().error == HttpServerError.CriticalError: + if r.error.error == HttpServerError.CriticalError: serverRes = true - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -225,9 +224,9 @@ suite "HTTP server testing suite": if r.isOk(): discard else: - if r.error().error == HttpServerError.CriticalError: + if r.error.error == HttpServerError.CriticalError: serverRes = true - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -280,7 +279,7 @@ suite "HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -321,7 +320,7 @@ suite "HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -367,7 +366,7 @@ suite "HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -411,7 +410,7 @@ suite "HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -456,7 +455,7 @@ suite "HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -512,7 +511,7 @@ suite "HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -576,7 +575,7 @@ suite "HTTP server testing suite": await eventContinue.wait() return await request.respond(Http404, "", HttpTable.init()) else: - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -1247,7 +1246,7 @@ suite "HTTP server testing suite": return response else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -1311,7 +1310,7 @@ suite "HTTP server testing suite": let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) else: - return dumbResponse() + return defaultResponse() for test in TestMessages: let @@ -1355,9 +1354,78 @@ suite "HTTP server testing suite": await server.stop() await server.closeWait() - test "Leaks test": + asyncTest "HTTP debug tests": + const + TestsCount = 10 + TestRequest = "GET / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n" + + proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + if r.isOk(): + let request = r.get() + return await request.respond(Http200, "TEST_OK", HttpTable.init()) + else: + return defaultResponse() + + proc client(address: TransportAddress, + data: string): Future[StreamTransport] {.async.} = + var transp: StreamTransport + var buffer = newSeq[byte](4096) + var sep = @[0x0D'u8, 0x0A'u8, 0x0D'u8, 0x0A'u8] + try: + transp = await connect(address) + let wres {.used.} = + await transp.write(data) + let hres {.used.} = + await transp.readUntil(addr buffer[0], len(buffer), sep) + transp + except CatchableError: + if not(isNil(transp)): await transp.closeWait() + nil + + let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} + let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, + serverFlags = {HttpServerFlags.Http11Pipeline}, + socketFlags = socketFlags) + check res.isOk() + + let server = res.get() + server.start() + let address = server.instance.localAddress() + + let info = server.getServerInfo() + check: - getTracker("async.stream.reader").isLeaked() == false - getTracker("async.stream.writer").isLeaked() == false - getTracker("stream.server").isLeaked() == false - getTracker("stream.transport").isLeaked() == false + info.connectionType == ConnectionType.NonSecure + info.address == address + info.state == HttpServerState.ServerRunning + info.flags == {HttpServerFlags.Http11Pipeline} + info.socketFlags == socketFlags + + try: + var clientFutures: seq[Future[StreamTransport]] + for i in 0 ..< TestsCount: + clientFutures.add(client(address, TestRequest)) + await allFutures(clientFutures) + + let connections = server.getConnections() + check len(connections) == TestsCount + let currentTime = Moment.now() + for index, connection in connections.pairs(): + let transp = clientFutures[index].read() + check: + connection.remoteAddress.get() == transp.localAddress() + connection.localAddress.get() == transp.remoteAddress() + connection.connectionType == ConnectionType.NonSecure + connection.connectionState == ConnectionState.Alive + (currentTime - connection.createMoment.get()) != ZeroDuration + (currentTime - connection.acceptMoment) != ZeroDuration + var pending: seq[Future[void]] + for transpFut in clientFutures: + pending.add(closeWait(transpFut.read())) + await allFutures(pending) + finally: + await server.stop() + await server.closeWait() + + test "Leaks test": + checkLeaks() diff --git a/tests/testproc.nim b/tests/testproc.nim index 05f793db8..b038325a1 100644 --- a/tests/testproc.nim +++ b/tests/testproc.nim @@ -6,7 +6,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/os -import unittest2, stew/[base10, byteutils] +import stew/[base10, byteutils] import ".."/chronos/unittest2/asynctests when defined(posix): @@ -414,12 +414,4 @@ suite "Asynchronous process management test suite": check getCurrentFD() == markFD test "Leaks test": - proc getTrackerLeaks(tracker: string): bool = - let tracker = getTracker(tracker) - if isNil(tracker): false else: tracker.isLeaked() - - check: - getTrackerLeaks("async.process") == false - getTrackerLeaks("async.stream.reader") == false - getTrackerLeaks("async.stream.writer") == false - getTrackerLeaks("stream.transport") == false + checkLeaks() diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index a258cc953..a83d0b29f 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -6,8 +6,8 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/strutils -import unittest2 -import ../chronos, ../chronos/apps/http/shttpserver +import ".."/chronos/unittest2/asynctests +import ".."/chronos, ".."/chronos/apps/http/shttpserver import stew/base10 {.used.} @@ -115,7 +115,7 @@ suite "Secure HTTP server testing suite": HttpTable.init()) else: serverRes = false - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let serverFlags = {Secure} @@ -154,7 +154,7 @@ suite "Secure HTTP server testing suite": else: serverRes = true testFut.complete() - return dumbResponse() + return defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let serverFlags = {Secure} @@ -178,3 +178,6 @@ suite "Secure HTTP server testing suite": return serverRes and data == "EXCEPTION" check waitFor(testHTTPS2(initTAddress("127.0.0.1:30080"))) == true + + test "Leaks test": + checkLeaks() diff --git a/tests/teststream.nim b/tests/teststream.nim index 7601a3975..f6bc99b66 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -6,7 +6,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[strutils, os] -import unittest2 +import ".."/chronos/unittest2/asynctests import ".."/chronos, ".."/chronos/[osdefs, oserrno] {.used.} @@ -1370,10 +1370,11 @@ suite "Stream Transport test suite": test prefixes[i] & "close() while in accept() waiting test": check waitFor(testAcceptClose(addresses[i])) == true test prefixes[i] & "Intermediate transports leak test #1": + checkLeaks() when defined(windows): skip() else: - check getTracker("stream.transport").isLeaked() == false + checkLeaks(StreamTransportTrackerName) test prefixes[i] & "accept() too many file descriptors test": when defined(windows): skip() @@ -1389,10 +1390,8 @@ suite "Stream Transport test suite": check waitFor(testPipe()) == true test "[IP] bind connect to local address": waitFor(testConnectBindLocalAddress()) - test "Servers leak test": - check getTracker("stream.server").isLeaked() == false - test "Transports leak test": - check getTracker("stream.transport").isLeaked() == false + test "Leaks test": + checkLeaks() test "File descriptors leak test": when defined(windows): # Windows handle numbers depends on many conditions, so we can't use From c8f650e65113ac44368a8c96af594056d270d24b Mon Sep 17 00:00:00 2001 From: cheatfate Date: Fri, 14 Jul 2023 14:57:24 +0300 Subject: [PATCH 042/146] Bump version to 3.2.1. --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index 6b4ac58a8..2487ad42f 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "3.2.0" +version = "3.2.1" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From 726fcb1915856b49a57bd561cf6bdb4b970fb94b Mon Sep 17 00:00:00 2001 From: cheatfate Date: Fri, 14 Jul 2023 16:40:07 +0300 Subject: [PATCH 043/146] Revert "Bump version to 3.2.1." This reverts commit c8f650e65113ac44368a8c96af594056d270d24b. --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index 2487ad42f..6b4ac58a8 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "3.2.1" +version = "3.2.0" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From d652c52142d67216848068874d0bf72821501706 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 17 Jul 2023 16:39:41 +0300 Subject: [PATCH 044/146] Fix regression introduced by #416. (#419) --- chronos/apps/http/httpserver.nim | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index b5b8bfcc6..6cddb22c9 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -736,8 +736,10 @@ proc sendDefaultResponse(conn: HttpConnectionRef, reqFence: RequestFence, of HttpServerError.CatchableError: await conn.sendErrorResponse(version, reqFence.error.code, false) false - of HttpServerError.DisconnectError, - HttpServerError.InterruptError: + of HttpServerError.DisconnectError: + # When `HttpServerFlags.NotifyDisconnect` is set. + false + of HttpServerError.InterruptError: raiseAssert("Unexpected request error: " & $reqFence.error.kind) except CancelledError: false From e04c042e8acfe0025c780de8a025aa4c4e042130 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 19 Jul 2023 20:33:28 +0300 Subject: [PATCH 045/146] Add cross-platform shutdown() call and use it for HTTP server. (#420) * Add cross-platform shutdown() call and use it for httpserver connection processing. * Fix Posix compilation issues and warnings. --- chronos/apps/http/httpserver.nim | 102 +++++++++++++++++++++---------- chronos/asyncloop.nim | 8 +++ chronos/osdefs.nim | 19 ++++-- chronos/transports/stream.nim | 51 ++++++++++++++++ 4 files changed, 144 insertions(+), 36 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 6cddb22c9..b86c0b3d1 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -54,6 +54,9 @@ type HttpResponseStreamType* {.pure.} = enum Plain, SSE, Chunked + HttpProcessExitType* {.pure.} = enum + KeepAlive, Graceful, Immediate + HttpResponseState* {.pure.} = enum Empty, Prepared, Sending, Finished, Failed, Cancelled, Default @@ -663,62 +666,83 @@ proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, # We ignore errors here, because we indicating error already. discard -proc sendErrorResponse(conn: HttpConnectionRef, reqFence: RequestFence, - respError: HttpProcessError): Future[bool] {.async.} = +proc sendErrorResponse( + conn: HttpConnectionRef, + reqFence: RequestFence, + respError: HttpProcessError + ): Future[HttpProcessExitType] {.async.} = let version = getResponseVersion(reqFence) try: if reqFence.isOk(): case respError.kind of HttpServerError.CriticalError: await conn.sendErrorResponse(version, respError.code, false) - false + HttpProcessExitType.Graceful of HttpServerError.RecoverableError: await conn.sendErrorResponse(version, respError.code, true) - true + HttpProcessExitType.Graceful of HttpServerError.CatchableError: await conn.sendErrorResponse(version, respError.code, false) - false + HttpProcessExitType.Graceful of HttpServerError.DisconnectError, HttpServerError.InterruptError, HttpServerError.TimeoutError: raiseAssert("Unexpected response error: " & $respError.kind) else: - false + HttpProcessExitType.Graceful except CancelledError: - false + HttpProcessExitType.Immediate + except CatchableError: + HttpProcessExitType.Immediate -proc sendDefaultResponse(conn: HttpConnectionRef, reqFence: RequestFence, - response: HttpResponseRef): Future[bool] {.async.} = +proc sendDefaultResponse( + conn: HttpConnectionRef, + reqFence: RequestFence, + response: HttpResponseRef + ): Future[HttpProcessExitType] {.async.} = let version = getResponseVersion(reqFence) keepConnection = - if isNil(response): - false + if isNil(response) or (HttpResponseFlags.KeepAlive notin response.flags): + HttpProcessExitType.Graceful else: - HttpResponseFlags.KeepAlive in response.flags + HttpProcessExitType.KeepAlive + + template toBool(hpet: HttpProcessExitType): bool = + case hpet + of HttpProcessExitType.KeepAlive: + true + of HttpProcessExitType.Immediate: + false + of HttpProcessExitType.Graceful: + false + try: if reqFence.isOk(): if isNil(response): - await conn.sendErrorResponse(version, Http404, keepConnection) + await conn.sendErrorResponse(version, Http404, keepConnection.toBool()) keepConnection else: case response.state of HttpResponseState.Empty: # Response was ignored, so we respond with not found. - await conn.sendErrorResponse(version, Http404, keepConnection) + await conn.sendErrorResponse(version, Http404, + keepConnection.toBool()) keepConnection of HttpResponseState.Prepared: # Response was prepared but not sent, so we can respond with some # error code - await conn.sendErrorResponse(HttpVersion11, Http409, keepConnection) + await conn.sendErrorResponse(HttpVersion11, Http409, + keepConnection.toBool()) keepConnection of HttpResponseState.Sending, HttpResponseState.Failed, HttpResponseState.Cancelled: # Just drop connection, because we dont know at what stage we are - false + HttpProcessExitType.Immediate of HttpResponseState.Default: # Response was ignored, so we respond with not found. - await conn.sendErrorResponse(version, Http404, keepConnection) + await conn.sendErrorResponse(version, Http404, + keepConnection.toBool()) keepConnection of HttpResponseState.Finished: keepConnection @@ -726,23 +750,25 @@ proc sendDefaultResponse(conn: HttpConnectionRef, reqFence: RequestFence, case reqFence.error.kind of HttpServerError.TimeoutError: await conn.sendErrorResponse(version, reqFence.error.code, false) - false + HttpProcessExitType.Graceful of HttpServerError.CriticalError: await conn.sendErrorResponse(version, reqFence.error.code, false) - false + HttpProcessExitType.Graceful of HttpServerError.RecoverableError: - await conn.sendErrorResponse(version, reqFence.error.code, true) - false + await conn.sendErrorResponse(version, reqFence.error.code, false) + HttpProcessExitType.Graceful of HttpServerError.CatchableError: await conn.sendErrorResponse(version, reqFence.error.code, false) - false + HttpProcessExitType.Graceful of HttpServerError.DisconnectError: # When `HttpServerFlags.NotifyDisconnect` is set. - false + HttpProcessExitType.Immediate of HttpServerError.InterruptError: raiseAssert("Unexpected request error: " & $reqFence.error.kind) except CancelledError: - false + HttpProcessExitType.Immediate + except CatchableError: + HttpProcessExitType.Immediate proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {.async.} = try: @@ -800,6 +826,10 @@ proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef, trackCounter(HttpServerUnsecureConnectionTrackerName) res +proc gracefulCloseWait*(conn: HttpConnectionRef) {.async.} = + await conn.transp.shutdownWait() + await conn.closeCb(conn) + proc closeWait*(conn: HttpConnectionRef): Future[void] = conn.closeCb(conn) @@ -942,15 +972,15 @@ proc getConnectionFence*(server: HttpServerRef, proc processRequest(server: HttpServerRef, connection: HttpConnectionRef, - connId: string): Future[bool] {.async.} = + connId: string): Future[HttpProcessExitType] {.async.} = let requestFence = await getRequestFence(server, connection) if requestFence.isErr(): case requestFence.error.kind of HttpServerError.InterruptError: - return false + return HttpProcessExitType.Immediate of HttpServerError.DisconnectError: if HttpServerFlags.NotifyDisconnect notin server.flags: - return false + return HttpProcessExitType.Immediate else: discard @@ -961,7 +991,7 @@ proc processRequest(server: HttpServerRef, let responseFence = await getResponseFence(connection, requestFence) if responseFence.isErr() and (responseFence.error.kind == HttpServerError.InterruptError): - return false + return HttpProcessExitType.Immediate if responseFence.isErr(): await connection.sendErrorResponse(requestFence, responseFence.error) @@ -985,12 +1015,20 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async.} = holder.connection = connection + var runLoop = HttpProcessExitType.KeepAlive + defer: server.connections.del(connectionId) - await connection.closeWait() - - var runLoop = true - while runLoop: + case runLoop + of HttpProcessExitType.KeepAlive: + # This could happened only on CancelledError. + await connection.closeWait() + of HttpProcessExitType.Immediate: + await connection.closeWait() + of HttpProcessExitType.Graceful: + await connection.gracefulCloseWait() + + while runLoop == HttpProcessExitType.KeepAlive: runLoop = await server.processRequest(connection, connectionId) proc acceptClientLoop(server: HttpServerRef) {.async.} = diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index a603ee4fe..9d5ac23ee 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -313,6 +313,7 @@ when defined(windows): getAcceptExSockAddrs*: WSAPROC_GETACCEPTEXSOCKADDRS transmitFile*: WSAPROC_TRANSMITFILE getQueuedCompletionStatusEx*: LPFN_GETQUEUEDCOMPLETIONSTATUSEX + disconnectEx*: WSAPROC_DISCONNECTEX flags: set[DispatcherFlag] PtrCustomOverlapped* = ptr CustomOverlapped @@ -393,6 +394,13 @@ when defined(windows): "dispatcher's TransmitFile()") loop.transmitFile = cast[WSAPROC_TRANSMITFILE](funcPointer) + block: + let res = getFunc(sock, funcPointer, WSAID_DISCONNECTEX) + if not(res): + raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & + "dispatcher's DisconnectEx()") + loop.disconnectEx = cast[WSAPROC_DISCONNECTEX](funcPointer) + if closeFd(sock) != 0: raiseOsDefect(osLastError(), "initAPI(): Unable to close control socket") diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 789da8cc2..a638056db 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -237,6 +237,10 @@ when defined(windows): GUID(D1: 0xb5367df0'u32, D2: 0xcbac'u16, D3: 0x11cf'u16, D4: [0x95'u8, 0xca'u8, 0x00'u8, 0x80'u8, 0x5f'u8, 0x48'u8, 0xa1'u8, 0x92'u8]) + WSAID_DISCONNECTEX* = + GUID(D1: 0x7fda2e11'u32, D2: 0x8630'u16, D3: 0x436f'u16, + D4: [0xa0'u8, 0x31'u8, 0xf5'u8, 0x36'u8, + 0xa6'u8, 0xee'u8, 0xc1'u8, 0x57'u8]) GAA_FLAG_INCLUDE_PREFIX* = 0x0010'u32 @@ -497,6 +501,11 @@ when defined(windows): lpTransmitBuffers: pointer, dwReserved: DWORD): WINBOOL {. stdcall, gcsafe, raises: [].} + WSAPROC_DISCONNECTEX* = proc ( + hSocket: SocketHandle, lpOverlapped: POVERLAPPED, dwFlags: DWORD, + dwReserved: DWORD): WINBOOL {. + stdcall, gcsafe, raises: [].} + LPFN_GETQUEUEDCOMPLETIONSTATUSEX* = proc ( completionPort: HANDLE, lpPortEntries: ptr OVERLAPPED_ENTRY, ulCount: ULONG, ulEntriesRemoved: var ULONG, @@ -879,7 +888,7 @@ elif defined(macos) or defined(macosx): AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, - SIG_BLOCK, SIG_UNBLOCK, + SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -900,7 +909,7 @@ elif defined(macos) or defined(macosx): AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, - SIG_BLOCK, SIG_UNBLOCK, + SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -939,7 +948,7 @@ elif defined(linux): SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, AF_INET, AF_INET6, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, - SOCK_DGRAM, + SOCK_DGRAM, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -962,7 +971,7 @@ elif defined(linux): SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, AF_INET, AF_INET6, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, - SOCK_DGRAM, + SOCK_DGRAM, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -1081,6 +1090,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, + SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -1101,6 +1111,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, + SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index a4190da52..257c47534 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2571,6 +2571,57 @@ proc closeWait*(transp: StreamTransport): Future[void] = transp.close() transp.join() +proc shutdownWait*(transp: StreamTransport): Future[void] = + ## Perform graceful shutdown of TCP connection backed by transport ``transp``. + doAssert(transp.kind == TransportKind.Socket) + let retFuture = newFuture[void]("stream.transport.shutdown") + transp.checkClosed(retFuture) + transp.checkWriteEof(retFuture) + + when defined(windows): + let loop = getThreadDispatcher() + proc continuation(udata: pointer) {.gcsafe.} = + let ovl = cast[RefCustomOverlapped](udata) + if not(retFuture.finished()): + if ovl.data.errCode == OSErrorCode(-1): + retFuture.complete() + else: + transp.state.excl({WriteEof}) + retFuture.fail(getTransportOsError(ovl.data.errCode)) + GC_unref(ovl) + + let povl = RefCustomOverlapped(data: CompletionData(cb: continuation)) + GC_ref(povl) + + let res = loop.disconnectEx(SocketHandle(transp.fd), + cast[POVERLAPPED](povl), 0'u32, 0'u32) + if res == FALSE: + let err = osLastError() + case err + of ERROR_IO_PENDING: + transp.state.incl({WriteEof}) + else: + GC_unref(povl) + retFuture.fail(getTransportOsError(err)) + else: + transp.state.incl({WriteEof}) + retFuture.complete() + + retFuture + else: + proc continuation(udata: pointer) {.gcsafe.} = + if not(retFuture.finished()): + retFuture.complete() + + let res = osdefs.shutdown(SocketHandle(transp.fd), SHUT_WR) + if res < 0: + let err = osLastError() + retFuture.fail(getTransportOsError(err)) + else: + transp.state.incl({WriteEof}) + callSoon(continuation, nil) + retFuture + proc closed*(transp: StreamTransport): bool {.inline.} = ## Returns ``true`` if transport in closed state. ({ReadClosed, WriteClosed} * transp.state != {}) From 0277b65be2c7a365ac13df002fba6e172be55537 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 21 Jul 2023 15:51:36 +0300 Subject: [PATCH 046/146] Asynchronous thread notification mechanism. (#406) * Initial commit. * Some fixes. * More fixes. * Add first test. * Further fixes for MacOS/BSD. * Fixes for Linux. * Add proper tests. * Lower number of tests. * Add threadsync tests to test suite. * There is no need to run tests when threads are off. * Address review comments. Fix the issue with multiple signals. Add tests. * Switch to use socketpair() instead of pipes. Fix semaphoring issue on MacOS/BSD. Add tests. * Add single threaded fire/wait tests. --- chronos/osdefs.nim | 71 ++++--- chronos/threadsync.nim | 416 +++++++++++++++++++++++++++++++++++++++ tests/testall.nim | 2 +- tests/testthreadsync.nim | 369 ++++++++++++++++++++++++++++++++++ 4 files changed, 834 insertions(+), 24 deletions(-) create mode 100644 chronos/threadsync.nim create mode 100644 tests/testthreadsync.nim diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index a638056db..ecf770b81 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -879,13 +879,17 @@ elif defined(macos) or defined(macosx): setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, - getcwd, chdir, waitpid, kill, + getcwd, chdir, waitpid, kill, select, pselect, + socketpair, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, - Sockaddr_un, SocketHandle, AddrInfo, RLimit, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, + Suseconds, + FD_CLR, FD_ISSET, FD_SET, FD_ZERO, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, - O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, - AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, + SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, + AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR, @@ -900,13 +904,17 @@ elif defined(macos) or defined(macosx): setrlimit, getpid, pthread_sigmask, sigprocmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, - getcwd, chdir, waitpid, kill, + getcwd, chdir, waitpid, kill, select, pselect, + socketpair, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, - Sockaddr_un, SocketHandle, AddrInfo, RLimit, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, + Suseconds, + FD_CLR, FD_ISSET, FD_SET, FD_ZERO, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, - O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, - AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, + SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, + AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR, @@ -938,17 +946,21 @@ elif defined(linux): recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, sendmsg, recvmsg, getpid, fcntl, pthread_sigmask, sigprocmask, clock_gettime, signal, - getcwd, chdir, waitpid, kill, + getcwd, chdir, waitpid, kill, select, pselect, + socketpair, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, - SigInfo, Id, Tmsghdr, IOVec, RLimit, + SigInfo, Id, Tmsghdr, IOVec, RLimit, Timeval, TFdSet, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle, + Suseconds, + FD_CLR, FD_ISSET, FD_SET, FD_ZERO, CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK, SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, - AF_INET, AF_INET6, SO_REUSEADDR, SO_REUSEPORT, + MSG_PEEK, + AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, - SOCK_DGRAM, SHUT_RD, SHUT_WR, SHUT_RDWR, + SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -961,17 +973,21 @@ elif defined(linux): recvfrom, sendto, send, bindSocket, recv, connect, unlink, listen, sendmsg, recvmsg, getpid, fcntl, pthread_sigmask, sigprocmask, clock_gettime, signal, - getcwd, chdir, waitpid, kill, + getcwd, chdir, waitpid, kill, select, pselect, + socketpair, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, - SigInfo, Id, Tmsghdr, IOVec, RLimit, + SigInfo, Id, Tmsghdr, IOVec, RLimit, TFdSet, Timeval, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle, + Suseconds, + FD_CLR, FD_ISSET, FD_SET, FD_ZERO, CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK, SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, - AF_INET, AF_INET6, SO_REUSEADDR, SO_REUSEPORT, + MSG_PEEK, + AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, - SOCK_DGRAM, SHUT_RD, SHUT_WR, SHUT_RDWR, + SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -1080,13 +1096,17 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or setrlimit, getpid, pthread_sigmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, - getcwd, chdir, waitpid, kill, + getcwd, chdir, waitpid, kill, select, pselect, + socketpair, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, - Sockaddr_un, SocketHandle, AddrInfo, RLimit, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, + Suseconds, + FD_CLR, FD_ISSET, FD_SET, FD_ZERO, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, - O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, - AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, + SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, + AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, @@ -1102,12 +1122,17 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or setrlimit, getpid, pthread_sigmask, sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, + getcwd, chdir, waitpid, kill, select, pselect, + socketpair, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, - Sockaddr_un, SocketHandle, AddrInfo, RLimit, + Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, + Suseconds, + FD_CLR, FD_ISSET, FD_SET, FD_ZERO, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, - O_NONBLOCK, SOL_SOCKET, SOCK_RAW, MSG_NOSIGNAL, - AF_INET, AF_INET6, SO_ERROR, SO_REUSEADDR, + O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, + SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, + AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, diff --git a/chronos/threadsync.nim b/chronos/threadsync.nim new file mode 100644 index 000000000..d41418121 --- /dev/null +++ b/chronos/threadsync.nim @@ -0,0 +1,416 @@ +# +# Chronos multithreaded synchronization primitives +# +# (c) Copyright 2023-Present Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +## This module implements some core async thread synchronization primitives. +import stew/results +import "."/[timer, asyncloop] + +export results + +{.push raises: [].} + +const hasThreadSupport* = compileOption("threads") +when not(hasThreadSupport): + {.fatal: "Compile this program with threads enabled!".} + +import "."/[osdefs, osutils, oserrno] + +type + ThreadSignal* = object + when defined(windows): + event: HANDLE + elif defined(linux): + efd: AsyncFD + else: + rfd, wfd: AsyncFD + + ThreadSignalPtr* = ptr ThreadSignal + +proc new*(t: typedesc[ThreadSignalPtr]): Result[ThreadSignalPtr, string] = + ## Create new ThreadSignal object. + let res = cast[ptr ThreadSignal](allocShared0(sizeof(ThreadSignal))) + when defined(windows): + var sa = getSecurityAttributes() + let event = osdefs.createEvent(addr sa, DWORD(0), DWORD(0), nil) + if event == HANDLE(0): + deallocShared(res) + return err(osErrorMsg(osLastError())) + res[] = ThreadSignal(event: event) + elif defined(linux): + let efd = eventfd(0, EFD_CLOEXEC or EFD_NONBLOCK) + if efd == -1: + deallocShared(res) + return err(osErrorMsg(osLastError())) + res[] = ThreadSignal(efd: AsyncFD(efd)) + else: + var sockets: array[2, cint] + block: + let sres = socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) + if sres < 0: + deallocShared(res) + return err(osErrorMsg(osLastError())) + # MacOS do not have SOCK_NONBLOCK and SOCK_CLOEXEC, so we forced to use + # setDescriptorFlags() for every socket. + block: + let sres = setDescriptorFlags(sockets[0], true, true) + if sres.isErr(): + discard closeFd(sockets[0]) + discard closeFd(sockets[1]) + deallocShared(res) + return err(osErrorMsg(sres.error)) + block: + let sres = setDescriptorFlags(sockets[1], true, true) + if sres.isErr(): + discard closeFd(sockets[0]) + discard closeFd(sockets[1]) + deallocShared(res) + return err(osErrorMsg(sres.error)) + res[] = ThreadSignal(rfd: AsyncFD(sockets[0]), wfd: AsyncFD(sockets[1])) + ok(ThreadSignalPtr(res)) + +when not(defined(windows)): + type + WaitKind {.pure.} = enum + Read, Write + + when defined(linux): + proc checkBusy(fd: cint): bool = false + else: + proc checkBusy(fd: cint): bool = + var data = 0'u64 + let res = handleEintr(recv(SocketHandle(fd), + addr data, sizeof(uint64), MSG_PEEK)) + if res == sizeof(uint64): + true + else: + false + + func toTimeval(a: Duration): Timeval = + ## Convert Duration ``a`` to ``Timeval`` object. + let nanos = a.nanoseconds + let m = nanos mod Second.nanoseconds() + Timeval( + tv_sec: Time(nanos div Second.nanoseconds()), + tv_usec: Suseconds(m div Microsecond.nanoseconds()) + ) + + proc waitReady(fd: cint, kind: WaitKind, + timeout: Duration): Result[bool, OSErrorCode] = + var + tv: Timeval + fdset = + block: + var res: TFdSet + FD_ZERO(res) + FD_SET(SocketHandle(fd), res) + res + + let + ptv = + if not(timeout.isInfinite()): + tv = timeout.toTimeval() + addr tv + else: + nil + nfd = cint(fd) + 1 + res = + case kind + of WaitKind.Read: + handleEintr(select(nfd, addr fdset, nil, nil, ptv)) + of WaitKind.Write: + handleEintr(select(nfd, nil, addr fdset, nil, ptv)) + + if res > 0: + ok(true) + elif res == 0: + ok(false) + else: + err(osLastError()) + + proc safeUnregisterAndCloseFd(fd: AsyncFD): Result[void, OSErrorCode] = + let loop = getThreadDispatcher() + if loop.contains(fd): + ? unregister2(fd) + if closeFd(cint(fd)) != 0: + err(osLastError()) + else: + ok() + +proc close*(signal: ThreadSignalPtr): Result[void, string] = + ## Close ThreadSignal object and free all the resources. + defer: deallocShared(signal) + when defined(windows): + # We do not need to perform unregistering on Windows, we can only close it. + if closeHandle(signal[].event) == 0'u32: + return err(osErrorMsg(osLastError())) + elif defined(linux): + let res = safeUnregisterAndCloseFd(signal[].efd) + if res.isErr(): + return err(osErrorMsg(res.error)) + else: + let res1 = safeUnregisterAndCloseFd(signal[].rfd) + let res2 = safeUnregisterAndCloseFd(signal[].wfd) + if res1.isErr(): return err(osErrorMsg(res1.error)) + if res2.isErr(): return err(osErrorMsg(res2.error)) + ok() + +proc fireSync*(signal: ThreadSignalPtr, + timeout = InfiniteDuration): Result[bool, string] = + ## Set state of ``signal`` to signaled state in blocking way. + ## + ## Returns ``false`` if signal was not signalled in time, and ``true`` + ## if operation was successful. + when defined(windows): + if setEvent(signal[].event) == 0'u32: + return err(osErrorMsg(osLastError())) + ok(true) + else: + let + eventFd = + when defined(linux): + cint(signal[].efd) + else: + cint(signal[].wfd) + checkFd = + when defined(linux): + cint(signal[].efd) + else: + cint(signal[].rfd) + + if checkBusy(checkFd): + # Signal is already in signalled state + return ok(true) + + var data = 1'u64 + while true: + let res = + when defined(linux): + handleEintr(write(eventFd, addr data, sizeof(uint64))) + else: + handleEintr(send(SocketHandle(eventFd), addr data, sizeof(uint64), + MSG_NOSIGNAL)) + if res < 0: + let errorCode = osLastError() + case errorCode + of EAGAIN: + let wres = waitReady(eventFd, WaitKind.Write, timeout) + if wres.isErr(): + return err(osErrorMsg(wres.error)) + if not(wres.get()): + return ok(false) + else: + return err(osErrorMsg(errorCode)) + elif res != sizeof(data): + return err(osErrorMsg(EINVAL)) + else: + return ok(true) + +proc waitSync*(signal: ThreadSignalPtr, + timeout = InfiniteDuration): Result[bool, string] = + ## Wait until the signal become signaled. This procedure is ``NOT`` async, + ## so it blocks execution flow, but this procedure do not need asynchronous + ## event loop to be present. + when defined(windows): + let + timeoutWin = + if timeout.isInfinite(): + INFINITE + else: + DWORD(timeout.milliseconds()) + handle = signal[].event + res = waitForSingleObject(handle, timeoutWin) + if res == WAIT_OBJECT_0: + ok(true) + elif res == WAIT_TIMEOUT: + ok(false) + elif res == WAIT_ABANDONED: + err("The wait operation has been abandoned") + else: + err("The wait operation has been failed") + else: + let eventFd = + when defined(linux): + cint(signal[].efd) + else: + cint(signal[].rfd) + var + data = 0'u64 + timer = timeout + while true: + let wres = + block: + let + start = Moment.now() + res = waitReady(eventFd, WaitKind.Read, timer) + timer = timer - (Moment.now() - start) + res + if wres.isErr(): + return err(osErrorMsg(wres.error)) + if not(wres.get()): + return ok(false) + let res = + when defined(linux): + handleEintr(read(eventFd, addr data, sizeof(uint64))) + else: + handleEintr(recv(SocketHandle(eventFd), addr data, sizeof(uint64), + cint(0))) + if res < 0: + let errorCode = osLastError() + # If errorCode == EAGAIN it means that reading operation is already + # pending and so some other consumer reading eventfd or pipe end, in + # this case we going to ignore error and wait for another event. + if errorCode != EAGAIN: + return err(osErrorMsg(errorCode)) + elif res != sizeof(data): + return err(osErrorMsg(EINVAL)) + else: + return ok(true) + +proc fire*(signal: ThreadSignalPtr): Future[void] = + ## Set state of ``signal`` to signaled in asynchronous way. + var retFuture = newFuture[void]("asyncthreadsignal.fire") + when defined(windows): + if setEvent(signal[].event) == 0'u32: + retFuture.fail(newException(AsyncError, osErrorMsg(osLastError()))) + else: + retFuture.complete() + else: + var data = 1'u64 + let + eventFd = + when defined(linux): + cint(signal[].efd) + else: + cint(signal[].wfd) + checkFd = + when defined(linux): + cint(signal[].efd) + else: + cint(signal[].rfd) + + proc continuation(udata: pointer) {.gcsafe, raises: [].} = + if not(retFuture.finished()): + let res = + when defined(linux): + handleEintr(write(eventFd, addr data, sizeof(uint64))) + else: + handleEintr(send(SocketHandle(eventFd), addr data, sizeof(uint64), + MSG_NOSIGNAL)) + if res < 0: + let errorCode = osLastError() + discard removeWriter2(AsyncFD(eventFd)) + retFuture.fail(newException(AsyncError, osErrorMsg(errorCode))) + elif res != sizeof(data): + discard removeWriter2(AsyncFD(eventFd)) + retFuture.fail(newException(AsyncError, osErrorMsg(EINVAL))) + else: + let eres = removeWriter2(AsyncFD(eventFd)) + if eres.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(eres.error))) + else: + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = + if not(retFuture.finished()): + discard removeWriter2(AsyncFD(eventFd)) + + if checkBusy(checkFd): + # Signal is already in signalled state + retFuture.complete() + return retFuture + + let res = + when defined(linux): + handleEintr(write(eventFd, addr data, sizeof(uint64))) + else: + handleEintr(send(SocketHandle(eventFd), addr data, sizeof(uint64), + MSG_NOSIGNAL)) + if res < 0: + let errorCode = osLastError() + case errorCode + of EAGAIN: + let loop = getThreadDispatcher() + if not(loop.contains(AsyncFD(eventFd))): + let rres = register2(AsyncFD(eventFd)) + if rres.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(rres.error))) + return retFuture + let wres = addWriter2(AsyncFD(eventFd), continuation) + if wres.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(wres.error))) + else: + retFuture.cancelCallback = cancellation + else: + retFuture.fail(newException(AsyncError, osErrorMsg(errorCode))) + elif res != sizeof(data): + retFuture.fail(newException(AsyncError, osErrorMsg(EINVAL))) + else: + retFuture.complete() + + retFuture + +when defined(windows): + proc wait*(signal: ThreadSignalPtr) {.async.} = + let handle = signal[].event + let res = await waitForSingleObject(handle, InfiniteDuration) + # There should be no other response, because we use `InfiniteDuration`. + doAssert(res == WaitableResult.Ok) +else: + proc wait*(signal: ThreadSignalPtr): Future[void] = + var retFuture = newFuture[void]("asyncthreadsignal.wait") + var data = 1'u64 + let eventFd = + when defined(linux): + cint(signal[].efd) + else: + cint(signal[].rfd) + + proc continuation(udata: pointer) {.gcsafe, raises: [].} = + if not(retFuture.finished()): + let res = + when defined(linux): + handleEintr(read(eventFd, addr data, sizeof(uint64))) + else: + handleEintr(recv(SocketHandle(eventFd), addr data, sizeof(uint64), + cint(0))) + if res < 0: + let errorCode = osLastError() + # If errorCode == EAGAIN it means that reading operation is already + # pending and so some other consumer reading eventfd or pipe end, in + # this case we going to ignore error and wait for another event. + if errorCode != EAGAIN: + discard removeReader2(AsyncFD(eventFd)) + retFuture.fail(newException(AsyncError, osErrorMsg(errorCode))) + elif res != sizeof(data): + discard removeReader2(AsyncFD(eventFd)) + retFuture.fail(newException(AsyncError, osErrorMsg(EINVAL))) + else: + let eres = removeReader2(AsyncFD(eventFd)) + if eres.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(eres.error))) + else: + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = + if not(retFuture.finished()): + # Future is already cancelled so we ignore errors. + discard removeReader2(AsyncFD(eventFd)) + + let loop = getThreadDispatcher() + if not(loop.contains(AsyncFD(eventFd))): + let res = register2(AsyncFD(eventFd)) + if res.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(res.error))) + return retFuture + let res = addReader2(AsyncFD(eventFd), continuation) + if res.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(res.error))) + return retFuture + retFuture.cancelCallback = cancellation + retFuture diff --git a/tests/testall.nim b/tests/testall.nim index bf0e98a9b..4861a85eb 100644 --- a/tests/testall.nim +++ b/tests/testall.nim @@ -8,7 +8,7 @@ import testmacro, testsync, testsoon, testtime, testfut, testsignal, testaddress, testdatagram, teststream, testserver, testbugs, testnet, testasyncstream, testhttpserver, testshttpserver, testhttpclient, - testproc, testratelimit, testfutures + testproc, testratelimit, testfutures, testthreadsync # Must be imported last to check for Pending futures import testutils diff --git a/tests/testthreadsync.nim b/tests/testthreadsync.nim new file mode 100644 index 000000000..fc85dc8c4 --- /dev/null +++ b/tests/testthreadsync.nim @@ -0,0 +1,369 @@ +# Chronos Test Suite +# (c) Copyright 2023-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +import std/[cpuinfo, locks, strutils] +import ../chronos/unittest2/asynctests +import ../chronos/threadsync + +{.used.} + +type + ThreadResult = object + value: int + + ThreadResultPtr = ptr ThreadResult + + LockPtr = ptr Lock + + ThreadArg = object + signal: ThreadSignalPtr + retval: ThreadResultPtr + index: int + + ThreadArg2 = object + signal1: ThreadSignalPtr + signal2: ThreadSignalPtr + retval: ThreadResultPtr + + ThreadArg3 = object + lock: LockPtr + signal: ThreadSignalPtr + retval: ThreadResultPtr + index: int + + WaitSendKind {.pure.} = enum + Sync, Async + +const + TestsCount = 1000 + +suite "Asynchronous multi-threading sync primitives test suite": + proc setResult(thr: ThreadResultPtr, value: int) = + thr[].value = value + + proc new(t: typedesc[ThreadResultPtr], value: int = 0): ThreadResultPtr = + var res = cast[ThreadResultPtr](allocShared0(sizeof(ThreadResult))) + res[].value = value + res + + proc free(thr: ThreadResultPtr) = + doAssert(not(isNil(thr))) + deallocShared(thr) + + let numProcs = countProcessors() * 2 + + template threadSignalTest(sendFlag, waitFlag: WaitSendKind) = + proc testSyncThread(arg: ThreadArg) {.thread.} = + let res = waitSync(arg.signal, 1500.milliseconds) + if res.isErr(): + arg.retval.setResult(1) + else: + if res.get(): + arg.retval.setResult(2) + else: + arg.retval.setResult(3) + + proc testAsyncThread(arg: ThreadArg) {.thread.} = + proc testAsyncCode(arg: ThreadArg) {.async.} = + try: + await wait(arg.signal).wait(1500.milliseconds) + arg.retval.setResult(2) + except AsyncTimeoutError: + arg.retval.setResult(3) + except CatchableError: + arg.retval.setResult(1) + + waitFor testAsyncCode(arg) + + let signal = ThreadSignalPtr.new().tryGet() + var args: seq[ThreadArg] + var threads = newSeq[Thread[ThreadArg]](numProcs) + for i in 0 ..< numProcs: + let + res = ThreadResultPtr.new() + arg = ThreadArg(signal: signal, retval: res, index: i) + args.add(arg) + case waitFlag + of WaitSendKind.Sync: + createThread(threads[i], testSyncThread, arg) + of WaitSendKind.Async: + createThread(threads[i], testAsyncThread, arg) + + await sleepAsync(500.milliseconds) + case sendFlag + of WaitSendKind.Sync: + check signal.fireSync().isOk() + of WaitSendKind.Async: + await signal.fire() + + joinThreads(threads) + + var ncheck: array[3, int] + for item in args: + if item.retval[].value == 1: + inc(ncheck[0]) + elif item.retval[].value == 2: + inc(ncheck[1]) + elif item.retval[].value == 3: + inc(ncheck[2]) + free(item.retval) + check: + signal.close().isOk() + ncheck[0] == 0 + ncheck[1] == 1 + ncheck[2] == numProcs - 1 + + template threadSignalTest2(testsCount: int, + sendFlag, waitFlag: WaitSendKind) = + proc testSyncThread(arg: ThreadArg2) {.thread.} = + for i in 0 ..< testsCount: + block: + let res = waitSync(arg.signal1, 1500.milliseconds) + if res.isErr(): + arg.retval.setResult(-1) + return + if not(res.get()): + arg.retval.setResult(-2) + return + + block: + let res = arg.signal2.fireSync() + if res.isErr(): + arg.retval.setResult(-3) + return + + arg.retval.setResult(i + 1) + + proc testAsyncThread(arg: ThreadArg2) {.thread.} = + proc testAsyncCode(arg: ThreadArg2) {.async.} = + for i in 0 ..< testsCount: + try: + await wait(arg.signal1).wait(1500.milliseconds) + except AsyncTimeoutError: + arg.retval.setResult(-2) + return + except AsyncError: + arg.retval.setResult(-1) + return + except CatchableError: + arg.retval.setResult(-3) + return + + try: + await arg.signal2.fire() + except AsyncError: + arg.retval.setResult(-4) + return + except CatchableError: + arg.retval.setResult(-5) + return + + arg.retval.setResult(i + 1) + + waitFor testAsyncCode(arg) + + let + signal1 = ThreadSignalPtr.new().tryGet() + signal2 = ThreadSignalPtr.new().tryGet() + retval = ThreadResultPtr.new() + arg = ThreadArg2(signal1: signal1, signal2: signal2, retval: retval) + var thread: Thread[ThreadArg2] + + case waitFlag + of WaitSendKind.Sync: + createThread(thread, testSyncThread, arg) + of WaitSendKind.Async: + createThread(thread, testAsyncThread, arg) + + let start = Moment.now() + for i in 0 ..< testsCount: + case sendFlag + of WaitSendKind.Sync: + block: + let res = signal1.fireSync() + check res.isOk() + block: + let res = waitSync(arg.signal2, 1500.milliseconds) + check: + res.isOk() + res.get() == true + of WaitSendKind.Async: + await arg.signal1.fire() + await wait(arg.signal2).wait(1500.milliseconds) + joinThreads(thread) + let finish = Moment.now() + let perf = (float64(nanoseconds(1.seconds)) / + float64(nanoseconds(finish - start))) * float64(testsCount) + echo "Switches tested: ", testsCount, ", elapsed time: ", (finish - start), + ", performance = ", formatFloat(perf, ffDecimal, 4), + " switches/second" + + check: + arg.retval[].value == testsCount + + template threadSignalTest3(testsCount: int, + sendFlag, waitFlag: WaitSendKind) = + proc testSyncThread(arg: ThreadArg3) {.thread.} = + withLock(arg.lock[]): + let res = waitSync(arg.signal, 10.milliseconds) + if res.isErr(): + arg.retval.setResult(1) + else: + if res.get(): + arg.retval.setResult(2) + else: + arg.retval.setResult(3) + + proc testAsyncThread(arg: ThreadArg3) {.thread.} = + proc testAsyncCode(arg: ThreadArg3) {.async.} = + withLock(arg.lock[]): + try: + await wait(arg.signal).wait(10.milliseconds) + arg.retval.setResult(2) + except AsyncTimeoutError: + arg.retval.setResult(3) + except CatchableError: + arg.retval.setResult(1) + + waitFor testAsyncCode(arg) + + let signal = ThreadSignalPtr.new().tryGet() + var args: seq[ThreadArg3] + var threads = newSeq[Thread[ThreadArg3]](numProcs) + var lockPtr = cast[LockPtr](allocShared0(sizeof(Lock))) + initLock(lockPtr[]) + acquire(lockPtr[]) + + for i in 0 ..< numProcs: + let + res = ThreadResultPtr.new() + arg = ThreadArg3(signal: signal, retval: res, index: i, lock: lockPtr) + args.add(arg) + case waitFlag + of WaitSendKind.Sync: + createThread(threads[i], testSyncThread, arg) + of WaitSendKind.Async: + createThread(threads[i], testAsyncThread, arg) + + await sleepAsync(500.milliseconds) + case sendFlag + of WaitSendKind.Sync: + for i in 0 ..< testsCount: + check signal.fireSync().isOk() + of WaitSendKind.Async: + for i in 0 ..< testsCount: + await signal.fire() + + release(lockPtr[]) + joinThreads(threads) + deinitLock(lockPtr[]) + deallocShared(lockPtr) + + var ncheck: array[3, int] + for item in args: + if item.retval[].value == 1: + inc(ncheck[0]) + elif item.retval[].value == 2: + inc(ncheck[1]) + elif item.retval[].value == 3: + inc(ncheck[2]) + free(item.retval) + check: + signal.close().isOk() + ncheck[0] == 0 + ncheck[1] == 1 + ncheck[2] == numProcs - 1 + + template threadSignalTest4(testsCount: int, + sendFlag, waitFlag: WaitSendKind) = + let signal = ThreadSignalPtr.new().tryGet() + let start = Moment.now() + for i in 0 ..< testsCount: + case sendFlag + of WaitSendKind.Sync: + check signal.fireSync().isOk() + of WaitSendKind.Async: + await signal.fire() + + case waitFlag + of WaitSendKind.Sync: + check waitSync(signal).isOk() + of WaitSendKind.Async: + await wait(signal) + let finish = Moment.now() + let perf = (float64(nanoseconds(1.seconds)) / + float64(nanoseconds(finish - start))) * float64(testsCount) + echo "Switches tested: ", testsCount, ", elapsed time: ", (finish - start), + ", performance = ", formatFloat(perf, ffDecimal, 4), + " switches/second" + + check: + signal.close.isOk() + + asyncTest "ThreadSignal: Multiple [" & $numProcs & + "] threads waiting test [sync -> sync]": + threadSignalTest(WaitSendKind.Sync, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Multiple [" & $numProcs & + "] threads waiting test [async -> async]": + threadSignalTest(WaitSendKind.Async, WaitSendKind.Async) + + asyncTest "ThreadSignal: Multiple [" & $numProcs & + "] threads waiting test [async -> sync]": + threadSignalTest(WaitSendKind.Async, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Multiple [" & $numProcs & + "] threads waiting test [sync -> async]": + threadSignalTest(WaitSendKind.Sync, WaitSendKind.Async) + + asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & + "] test [sync -> sync]": + threadSignalTest2(TestsCount, WaitSendKind.Sync, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & + "] test [async -> async]": + threadSignalTest2(TestsCount, WaitSendKind.Async, WaitSendKind.Async) + + asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & + "] test [sync -> async]": + threadSignalTest2(TestsCount, WaitSendKind.Sync, WaitSendKind.Async) + + asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & + "] test [async -> sync]": + threadSignalTest2(TestsCount, WaitSendKind.Async, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Multiple signals [" & $TestsCount & + "] to multiple threads [" & $numProcs & "] test [sync -> sync]": + threadSignalTest3(TestsCount, WaitSendKind.Sync, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Multiple signals [" & $TestsCount & + "] to multiple threads [" & $numProcs & "] test [async -> async]": + threadSignalTest3(TestsCount, WaitSendKind.Async, WaitSendKind.Async) + + asyncTest "ThreadSignal: Multiple signals [" & $TestsCount & + "] to multiple threads [" & $numProcs & "] test [sync -> async]": + threadSignalTest3(TestsCount, WaitSendKind.Sync, WaitSendKind.Async) + + asyncTest "ThreadSignal: Multiple signals [" & $TestsCount & + "] to multiple threads [" & $numProcs & "] test [async -> sync]": + threadSignalTest3(TestsCount, WaitSendKind.Async, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Single threaded switches [" & $TestsCount & + "] test [sync -> sync]": + threadSignalTest4(TestsCount, WaitSendKind.Sync, WaitSendKind.Sync) + + asyncTest "ThreadSignal: Single threaded switches [" & $TestsCount & + "] test [sync -> sync]": + threadSignalTest4(TestsCount, WaitSendKind.Async, WaitSendKind.Async) + + asyncTest "ThreadSignal: Single threaded switches [" & $TestsCount & + "] test [sync -> async]": + threadSignalTest4(TestsCount, WaitSendKind.Sync, WaitSendKind.Async) + + asyncTest "ThreadSignal: Single threaded switches [" & $TestsCount & + "] test [async -> sync]": + threadSignalTest4(TestsCount, WaitSendKind.Async, WaitSendKind.Sync) From f91ac169dc43c015b8631f33f1eb51623581b251 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 23 Jul 2023 19:40:57 +0300 Subject: [PATCH 047/146] Fix `NoVerifyServerName` do not actually disables SNI extension. (#423) Fix HTTP client SSL/TLS error information is now part of connection error exception. --- chronos/apps/http/httpclient.nim | 15 ++++++++++++--- chronos/streams/tlsstream.nim | 33 ++++++++++++++++++-------------- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 6e9ea0cae..9c7988948 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -523,7 +523,7 @@ proc connect(session: HttpSessionRef, ha: HttpAddress): Future[HttpClientConnectionRef] {.async.} = ## Establish new connection with remote server using ``url`` and ``flags``. ## On success returns ``HttpClientConnectionRef`` object. - + var lastError = "" # Here we trying to connect to every possible remote host address we got after # DNS resolution. for address in ha.addresses: @@ -547,9 +547,14 @@ proc connect(session: HttpSessionRef, except CancelledError as exc: await res.closeWait() raise exc - except AsyncStreamError: + except TLSStreamProtocolError as exc: + await res.closeWait() + res.state = HttpClientConnectionState.Error + lastError = $exc.msg + except AsyncStreamError as exc: await res.closeWait() res.state = HttpClientConnectionState.Error + lastError = $exc.msg of HttpClientScheme.Nonsecure: res.state = HttpClientConnectionState.Ready res @@ -557,7 +562,11 @@ proc connect(session: HttpSessionRef, return conn # If all attempts to connect to the remote host have failed. - raiseHttpConnectionError("Could not connect to remote host") + if len(lastError) > 0: + raiseHttpConnectionError("Could not connect to remote host, reason: " & + lastError) + else: + raiseHttpConnectionError("Could not connect to remote host") proc removeConnection(session: HttpSessionRef, conn: HttpClientConnectionRef) {.async.} = diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index ceacaff76..2999f7af6 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -95,6 +95,7 @@ type trustAnchors: TrustAnchorStore SomeTLSStreamType* = TLSStreamReader|TLSStreamWriter|TLSAsyncStream + SomeTrustAnchorType* = TrustAnchorStore | openArray[X509TrustAnchor] TLSStreamError* = object of AsyncStreamError TLSStreamHandshakeError* = object of TLSStreamError @@ -139,12 +140,14 @@ proc newTLSStreamProtocolError[T](message: T): ref TLSStreamProtocolError = proc raiseTLSStreamProtocolError[T](message: T) {.noreturn, noinline.} = raise newTLSStreamProtocolImpl(message) -proc new*(T: typedesc[TrustAnchorStore], anchors: openArray[X509TrustAnchor]): TrustAnchorStore = +proc new*(T: typedesc[TrustAnchorStore], + anchors: openArray[X509TrustAnchor]): TrustAnchorStore = var res: seq[X509TrustAnchor] for anchor in anchors: res.add(anchor) - doAssert(unsafeAddr(anchor) != unsafeAddr(res[^1]), "Anchors should be copied") - return TrustAnchorStore(anchors: res) + doAssert(unsafeAddr(anchor) != unsafeAddr(res[^1]), + "Anchors should be copied") + TrustAnchorStore(anchors: res) proc tlsWriteRec(engine: ptr SslEngineContext, writer: TLSStreamWriter): Future[TLSResult] {.async.} = @@ -453,15 +456,16 @@ proc getSignerAlgo(xc: X509Certificate): int = else: int(x509DecoderGetSignerKeyType(dc)) -proc newTLSClientAsyncStream*(rsource: AsyncStreamReader, - wsource: AsyncStreamWriter, - serverName: string, - bufferSize = SSL_BUFSIZE_BIDI, - minVersion = TLSVersion.TLS12, - maxVersion = TLSVersion.TLS12, - flags: set[TLSFlags] = {}, - trustAnchors: TrustAnchorStore | openArray[X509TrustAnchor] = MozillaTrustAnchors - ): TLSAsyncStream = +proc newTLSClientAsyncStream*( + rsource: AsyncStreamReader, + wsource: AsyncStreamWriter, + serverName: string, + bufferSize = SSL_BUFSIZE_BIDI, + minVersion = TLSVersion.TLS12, + maxVersion = TLSVersion.TLS12, + flags: set[TLSFlags] = {}, + trustAnchors: SomeTrustAnchorType = MozillaTrustAnchors + ): TLSAsyncStream = ## Create new TLS asynchronous stream for outbound (client) connections ## using reading stream ``rsource`` and writing stream ``wsource``. ## @@ -484,7 +488,8 @@ proc newTLSClientAsyncStream*(rsource: AsyncStreamReader, ## a ``TrustAnchorStore`` you should reuse the same instance for ## every call to avoid making a copy of the trust anchors per call. when trustAnchors is TrustAnchorStore: - doAssert(len(trustAnchors.anchors) > 0, "Empty trust anchor list is invalid") + doAssert(len(trustAnchors.anchors) > 0, + "Empty trust anchor list is invalid") else: doAssert(len(trustAnchors) > 0, "Empty trust anchor list is invalid") var res = TLSAsyncStream() @@ -524,7 +529,7 @@ proc newTLSClientAsyncStream*(rsource: AsyncStreamReader, uint16(maxVersion)) if TLSFlags.NoVerifyServerName in flags: - let err = sslClientReset(res.ccontext, "", 0) + let err = sslClientReset(res.ccontext, nil, 0) if err == 0: raise newException(TLSStreamInitError, "Could not initialize TLS layer") else: From 53e9f75735464ea196cfc2b92b8bb98fe59ca693 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 28 Jul 2023 11:54:53 +0300 Subject: [PATCH 048/146] Add some helpers for asyncproc. (#424) * Initial commit. * Adjust posix tests. * Fix compilation issue. * Attempt to fix flaky addProcess() test. --- chronos/asyncproc.nim | 102 +++++++++++++++++++++++++++++++++++++----- tests/testproc.bat | 6 +++ tests/testproc.nim | 52 ++++++++++++++++++++- tests/testproc.sh | 8 ++++ 4 files changed, 157 insertions(+), 11 deletions(-) diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index 8d0cdb790..8df8e33e5 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -24,7 +24,8 @@ const ## AsyncProcess leaks tracker name type - AsyncProcessError* = object of CatchableError + AsyncProcessError* = object of AsyncError + AsyncProcessTimeoutError* = object of AsyncProcessError AsyncProcessResult*[T] = Result[T, OSErrorCode] @@ -107,6 +108,9 @@ type stdError*: string status*: int + WaitOperation {.pure.} = enum + Kill, Terminate + template Pipe*(t: typedesc[AsyncProcess]): ProcessStreamHandle = ProcessStreamHandle(kind: ProcessStreamHandleKind.Auto) @@ -294,6 +298,11 @@ proc raiseAsyncProcessError(msg: string, exc: ref CatchableError = nil) {. msg & " ([" & $exc.name & "]: " & $exc.msg & ")" raise newException(AsyncProcessError, message) +proc raiseAsyncProcessTimeoutError() {. + noreturn, noinit, noinline, raises: [AsyncProcessTimeoutError].} = + let message = "Operation timed out" + raise newException(AsyncProcessTimeoutError, message) + proc raiseAsyncProcessError(msg: string, error: OSErrorCode|cint) {. noreturn, noinit, noinline, raises: [AsyncProcessError].} = when error is OSErrorCode: @@ -1189,6 +1198,45 @@ proc closeProcessStreams(pipes: AsyncProcessPipes, res allFutures(pending) +proc opAndWaitForExit(p: AsyncProcessRef, op: WaitOperation, + timeout = InfiniteDuration): Future[int] {.async.} = + let timerFut = + if timeout == InfiniteDuration: + newFuture[void]("chronos.killAndwaitForExit") + else: + sleepAsync(timeout) + + while true: + if p.running().get(true): + # We ignore operation errors because we going to repeat calling + # operation until process will not exit. + case op + of WaitOperation.Kill: + discard p.kill() + of WaitOperation.Terminate: + discard p.terminate() + else: + let exitCode = p.peekExitCode().valueOr: + raiseAsyncProcessError("Unable to peek process exit code", error) + if not(timerFut.finished()): + await cancelAndWait(timerFut) + return exitCode + + let waitFut = p.waitForExit().wait(100.milliseconds) + discard await race(FutureBase(waitFut), FutureBase(timerFut)) + + if waitFut.finished() and not(waitFut.failed()): + let res = p.peekExitCode() + if res.isOk(): + if not(timerFut.finished()): + await cancelAndWait(timerFut) + return res.get() + + if timerFut.finished(): + if not(waitFut.finished()): + await waitFut.cancelAndWait() + raiseAsyncProcessTimeoutError() + proc closeWait*(p: AsyncProcessRef) {.async.} = # Here we ignore all possible errrors, because we do not want to raise # exceptions. @@ -1216,14 +1264,15 @@ proc execCommand*(command: string, options = {AsyncProcessOption.EvalCommand}, timeout = InfiniteDuration ): Future[int] {.async.} = - let poptions = options + {AsyncProcessOption.EvalCommand} - let process = await startProcess(command, options = poptions) - let res = - try: - await process.waitForExit(timeout) - finally: - await process.closeWait() - return res + let + poptions = options + {AsyncProcessOption.EvalCommand} + process = await startProcess(command, options = poptions) + res = + try: + await process.waitForExit(timeout) + finally: + await process.closeWait() + res proc execCommandEx*(command: string, options = {AsyncProcessOption.EvalCommand}, @@ -1256,10 +1305,43 @@ proc execCommandEx*(command: string, finally: await process.closeWait() - return res + res proc pid*(p: AsyncProcessRef): int = ## Returns process ``p`` identifier. int(p.processId) template processId*(p: AsyncProcessRef): int = pid(p) + +proc killAndWaitForExit*(p: AsyncProcessRef, + timeout = InfiniteDuration): Future[int] = + ## Perform continuous attempts to kill the ``p`` process for specified period + ## of time ``timeout``. + ## + ## On Posix systems, killing means sending ``SIGKILL`` to the process ``p``, + ## On Windows, it uses ``TerminateProcess`` to kill the process ``p``. + ## + ## If the process ``p`` fails to be killed within the ``timeout`` time, it + ## will raise ``AsyncProcessTimeoutError``. + ## + ## In case of error this it will raise ``AsyncProcessError``. + ## + ## Returns process ``p`` exit code. + opAndWaitForExit(p, WaitOperation.Kill, timeout) + +proc terminateAndWaitForExit*(p: AsyncProcessRef, + timeout = InfiniteDuration): Future[int] = + ## Perform continuous attempts to terminate the ``p`` process for specified + ## period of time ``timeout``. + ## + ## On Posix systems, terminating means sending ``SIGTERM`` to the process + ## ``p``, on Windows, it uses ``TerminateProcess`` to terminate the process + ## ``p``. + ## + ## If the process ``p`` fails to be terminated within the ``timeout`` time, it + ## will raise ``AsyncProcessTimeoutError``. + ## + ## In case of error this it will raise ``AsyncProcessError``. + ## + ## Returns process ``p`` exit code. + opAndWaitForExit(p, WaitOperation.Terminate, timeout) diff --git a/tests/testproc.bat b/tests/testproc.bat index 314bea731..11b4047ee 100644 --- a/tests/testproc.bat +++ b/tests/testproc.bat @@ -2,6 +2,8 @@ IF /I "%1" == "STDIN" ( GOTO :STDINTEST +) ELSE IF /I "%1" == "TIMEOUT1" ( + GOTO :TIMEOUTTEST1 ) ELSE IF /I "%1" == "TIMEOUT2" ( GOTO :TIMEOUTTEST2 ) ELSE IF /I "%1" == "TIMEOUT10" ( @@ -19,6 +21,10 @@ SET /P "INPUTDATA=" ECHO STDIN DATA: %INPUTDATA% EXIT 0 +:TIMEOUTTEST1 +ping -n 1 127.0.0.1 > NUL +EXIT 1 + :TIMEOUTTEST2 ping -n 2 127.0.0.1 > NUL EXIT 2 diff --git a/tests/testproc.nim b/tests/testproc.nim index b038325a1..cfcafe6b7 100644 --- a/tests/testproc.nim +++ b/tests/testproc.nim @@ -96,7 +96,11 @@ suite "Asynchronous process management test suite": let options = {AsyncProcessOption.EvalCommand} - command = "exit 1" + command = + when defined(windows): + "tests\\testproc.bat timeout1" + else: + "tests/testproc.sh timeout1" process = await startProcess(command, options = options) @@ -407,6 +411,52 @@ suite "Asynchronous process management test suite": finally: await process.closeWait() + asyncTest "killAndWaitForExit() test": + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout10", 0) + else: + ("tests/testproc.sh", "timeout10", 128 + int(SIGKILL)) + let process = await startProcess(command[0], arguments = @[command[1]]) + try: + let exitCode = await process.killAndWaitForExit(10.seconds) + check exitCode == command[2] + finally: + await process.closeWait() + + asyncTest "terminateAndWaitForExit() test": + let command = + when defined(windows): + ("tests\\testproc.bat", "timeout10", 0) + else: + ("tests/testproc.sh", "timeout10", 128 + int(SIGTERM)) + let process = await startProcess(command[0], arguments = @[command[1]]) + try: + let exitCode = await process.terminateAndWaitForExit(10.seconds) + check exitCode == command[2] + finally: + await process.closeWait() + + asyncTest "terminateAndWaitForExit() timeout test": + when defined(windows): + skip() + else: + let + command = ("tests/testproc.sh", "noterm", 128 + int(SIGKILL)) + process = await startProcess(command[0], arguments = @[command[1]]) + # We should wait here to allow `bash` execute `trap` command, otherwise + # our test script will be killed with SIGTERM. Increase this timeout + # if test become flaky. + await sleepAsync(1.seconds) + try: + expect AsyncProcessTimeoutError: + let exitCode {.used.} = + await process.terminateAndWaitForExit(1.seconds) + let exitCode = await process.killAndWaitForExit(10.seconds) + check exitCode == command[2] + finally: + await process.closeWait() + test "File descriptors leaks test": when defined(windows): skip() diff --git a/tests/testproc.sh b/tests/testproc.sh index 1725d49d3..c5e7e0ac2 100755 --- a/tests/testproc.sh +++ b/tests/testproc.sh @@ -3,6 +3,9 @@ if [ "$1" == "stdin" ]; then read -r inputdata echo "STDIN DATA: $inputdata" +elif [ "$1" == "timeout1" ]; then + sleep 1 + exit 1 elif [ "$1" == "timeout2" ]; then sleep 2 exit 2 @@ -15,6 +18,11 @@ elif [ "$1" == "bigdata" ]; then done elif [ "$1" == "envtest" ]; then echo "$CHRONOSASYNC" +elif [ "$1" == "noterm" ]; then + trap -- '' SIGTERM + while true; do + sleep 1 + done else echo "arguments missing" fi From 926956bcbee5f4f49124f1f2215530a2d93bac96 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 30 Jul 2023 12:43:25 +0300 Subject: [PATCH 049/146] Add time used to establish HTTP client connection. (#427) --- chronos/apps/http/httpclient.nim | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 9c7988948..63ffc37b2 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -108,6 +108,7 @@ type remoteHostname*: string flags*: set[HttpClientConnectionFlag] timestamp*: Moment + duration*: Duration HttpClientConnectionRef* = ref HttpClientConnection @@ -233,6 +234,12 @@ template setDuration( reqresp.duration = timestamp - reqresp.timestamp reqresp.connection.setTimestamp(timestamp) +template setDuration(conn: HttpClientConnectionRef): untyped = + if not(isNil(conn)): + let timestamp = Moment.now() + conn.duration = timestamp - conn.timestamp + conn.setTimestamp(timestamp) + template isReady(conn: HttpClientConnectionRef): bool = (conn.state == HttpClientConnectionState.Ready) and (HttpClientConnectionFlag.KeepAlive in conn.flags) and @@ -596,9 +603,9 @@ proc acquireConnection( ): Future[HttpClientConnectionRef] {.async.} = ## Obtain connection from ``session`` or establish a new one. var default: seq[HttpClientConnectionRef] + let timestamp = Moment.now() if session.connectionPoolEnabled(flags): # Trying to reuse existing connection from our connection's pool. - let timestamp = Moment.now() # We looking for non-idle connection at `Ready` state, all idle connections # will be freed by sessionWatcher(). for connection in session.connections.getOrDefault(ha.id): @@ -615,6 +622,8 @@ proc acquireConnection( connection.state = HttpClientConnectionState.Acquired session.connections.mgetOrPut(ha.id, default).add(connection) inc(session.connectionsCount) + connection.setTimestamp(timestamp) + connection.setDuration() return connection proc releaseConnection(session: HttpSessionRef, From d214bcfb4f4995bcb9a66b17893a6d32a5e138ca Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 31 Jul 2023 22:40:00 +0300 Subject: [PATCH 050/146] Increase backlog defaults to maximum possible values. (#428) --- chronos/apps/http/httpserver.nim | 2 +- chronos/apps/http/shttpserver.nim | 2 +- chronos/transports/stream.nim | 34 +++++++++++++++++++++++++------ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index b86c0b3d1..c1e45c03e 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -187,7 +187,7 @@ proc new*(htype: typedesc[HttpServerRef], serverIdent = "", maxConnections: int = -1, bufferSize: int = 4096, - backlogSize: int = 100, + backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576): HttpResult[HttpServerRef] {. diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 927ca6290..b993cb5fb 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -90,7 +90,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], secureFlags: set[TLSFlags] = {}, maxConnections: int = -1, bufferSize: int = 4096, - backlogSize: int = 100, + backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576 diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 257c47534..45e4054e0 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -61,6 +61,7 @@ type const StreamTransportTrackerName* = "stream.transport" StreamServerTrackerName* = "stream.server" + DefaultBacklogSize* = high(int32) when defined(windows): type @@ -1819,11 +1820,32 @@ proc closeWait*(server: StreamServer): Future[void] = server.close() server.join() +proc getBacklogSize(backlog: int): cint = + doAssert(backlog >= 0 and backlog <= high(int32)) + when defined(windows): + # The maximum length of the queue of pending connections. If set to + # SOMAXCONN, the underlying service provider responsible for + # socket s will set the backlog to a maximum reasonable value. If set to + # SOMAXCONN_HINT(N) (where N is a number), the backlog value will be N, + # adjusted to be within the range (200, 65535). Note that SOMAXCONN_HINT + # can be used to set the backlog to a larger value than possible with + # SOMAXCONN. + # + # Microsoft SDK values are + # #define SOMAXCONN 0x7fffffff + # #define SOMAXCONN_HINT(b) (-(b)) + if backlog != high(int32): + cint(-backlog) + else: + cint(backlog) + else: + cint(backlog) + proc createStreamServer*(host: TransportAddress, cbproc: StreamCallback, flags: set[ServerFlags] = {}, sock: AsyncFD = asyncInvalidSocket, - backlog: int = 100, + backlog: int = DefaultBacklogSize, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil, @@ -1906,7 +1928,7 @@ proc createStreamServer*(host: TransportAddress, raiseTransportOsError(err) fromSAddr(addr saddr, slen, localAddress) - if listen(SocketHandle(serverSocket), cint(backlog)) != 0: + if listen(SocketHandle(serverSocket), getBacklogSize(backlog)) != 0: let err = osLastError() if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) @@ -1980,7 +2002,7 @@ proc createStreamServer*(host: TransportAddress, raiseTransportOsError(err) fromSAddr(addr saddr, slen, localAddress) - if listen(SocketHandle(serverSocket), cint(backlog)) != 0: + if listen(SocketHandle(serverSocket), getBacklogSize(backlog)) != 0: let err = osLastError() if sock == asyncInvalidSocket: discard unregisterAndCloseFd(serverSocket) @@ -2031,7 +2053,7 @@ proc createStreamServer*(host: TransportAddress, proc createStreamServer*(host: TransportAddress, flags: set[ServerFlags] = {}, sock: AsyncFD = asyncInvalidSocket, - backlog: int = 100, + backlog: int = DefaultBacklogSize, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil, @@ -2045,7 +2067,7 @@ proc createStreamServer*[T](host: TransportAddress, flags: set[ServerFlags] = {}, udata: ref T, sock: AsyncFD = asyncInvalidSocket, - backlog: int = 100, + backlog: int = DefaultBacklogSize, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil): StreamServer {. @@ -2059,7 +2081,7 @@ proc createStreamServer*[T](host: TransportAddress, flags: set[ServerFlags] = {}, udata: ref T, sock: AsyncFD = asyncInvalidSocket, - backlog: int = 100, + backlog: int = DefaultBacklogSize, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil): StreamServer {. From 5c39bf47bea3c7f98c11c707475ebfdb52438cef Mon Sep 17 00:00:00 2001 From: rockcavera Date: Mon, 31 Jul 2023 19:28:34 -0300 Subject: [PATCH 051/146] fixing unfreed memory leak with `freeAddrInfo()` (#425) * fixing unfreed memory leak with `freeAddrInfo()` * `freeaddrinfo` to `freeAddrInfo()` --- chronos/osdefs.nim | 14 +++++++------- chronos/transports/common.nim | 4 ++++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index ecf770b81..bf5c06058 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -708,7 +708,7 @@ when defined(windows): res: var ptr AddrInfo): cint {. stdcall, dynlib: "ws2_32", importc: "getaddrinfo", sideEffect.} - proc freeaddrinfo*(ai: ptr AddrInfo) {. + proc freeAddrInfo*(ai: ptr AddrInfo) {. stdcall, dynlib: "ws2_32", importc: "freeaddrinfo", sideEffect.} proc createIoCompletionPort*(fileHandle: HANDLE, @@ -880,7 +880,7 @@ elif defined(macos) or defined(macosx): sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, + socketpair, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, @@ -905,7 +905,7 @@ elif defined(macos) or defined(macosx): sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, + socketpair, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, @@ -947,7 +947,7 @@ elif defined(linux): unlink, listen, sendmsg, recvmsg, getpid, fcntl, pthread_sigmask, sigprocmask, clock_gettime, signal, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, + socketpair, freeAddrInfo, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, Timeval, TFdSet, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, @@ -974,7 +974,7 @@ elif defined(linux): unlink, listen, sendmsg, recvmsg, getpid, fcntl, pthread_sigmask, sigprocmask, clock_gettime, signal, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, + socketpair, freeAddrInfo, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, TFdSet, Timeval, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, @@ -1097,7 +1097,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, + socketpair, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, @@ -1123,7 +1123,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, + socketpair, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 5a9072cbd..4b4be7de3 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -298,6 +298,9 @@ proc getAddrInfo(address: string, port: Port, domain: Domain, raises: [TransportAddressError].} = ## We have this one copy of ``getAddrInfo()`` because of AI_V4MAPPED in ## ``net.nim:getAddrInfo()``, which is not cross-platform. + ## + ## Warning: `ptr AddrInfo` returned by `getAddrInfo()` needs to be freed by + ## calling `freeAddrInfo()`. var hints: AddrInfo var res: ptr AddrInfo = nil hints.ai_family = toInt(domain) @@ -420,6 +423,7 @@ proc resolveTAddress*(address: string, port: Port, if ta notin res: res.add(ta) it = it.ai_next + freeAddrInfo(aiList) res proc resolveTAddress*(address: string, domain: Domain): seq[TransportAddress] {. From 6b4f5a1d23b1583b2b0ccee409e2e7c6dc6fff93 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 1 Aug 2023 12:56:08 +0300 Subject: [PATCH 052/146] Recover `poll` engine and add tests. (#421) * Initial commit. * Fix one more place with deprecated constant. * Fix testall and nimble file. * Fix poll issue. * Workaround Nim's faulty declaration of `poll()` and types on MacOS. * Fix syntax errors. * Fix MacOS post-rebase issue. * Add more conditionals. * Address review comments. * Fix Nim 1.2 configuration defaults. --- chronos.nim | 5 ++- chronos.nimble | 23 +++++++++---- chronos/asyncloop.nim | 13 +++----- chronos/config.nim | 39 ++++++++++++++++++++++ chronos/ioselects/ioselectors_epoll.nim | 8 ++--- chronos/ioselects/ioselectors_kqueue.nim | 8 ++--- chronos/ioselects/ioselectors_poll.nim | 14 ++++---- chronos/osdefs.nim | 42 ++++++++++++++++++------ chronos/selectors2.nim | 34 +++++-------------- chronos/sendfile.nim | 6 +++- tests/testall.nim | 24 ++++++++++---- tests/testproc.nim | 1 + tests/teststream.nim | 5 ++- 13 files changed, 146 insertions(+), 76 deletions(-) diff --git a/chronos.nim b/chronos.nim index 6801b2894..8295924dd 100644 --- a/chronos.nim +++ b/chronos.nim @@ -5,6 +5,5 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import chronos/[asyncloop, asyncsync, handles, transport, timer, - asyncproc, debugutils] -export asyncloop, asyncsync, handles, transport, timer, asyncproc, debugutils +import chronos/[asyncloop, asyncsync, handles, transport, timer, debugutils] +export asyncloop, asyncsync, handles, transport, timer, debugutils diff --git a/chronos.nimble b/chronos.nimble index 6b4ac58a8..e9c1b11db 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -17,6 +17,22 @@ let nimc = getEnv("NIMC", "nim") # Which nim compiler to use let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js) let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler let verbose = getEnv("V", "") notin ["", "0"] +let testArguments = + when defined(windows): + [ + "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", + "-d:debug -d:chronosPreviewV4", + "-d:release", + "-d:release -d:chronosPreviewV4" + ] + else: + [ + "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", + "-d:debug -d:chronosPreviewV4", + "-d:debug -d:chronosDebug -d:chronosEventEngine=poll -d:useSysAssert -d:useGcAssert", + "-d:release", + "-d:release -d:chronosPreviewV4" + ] let styleCheckStyle = if (NimMajor, NimMinor) < (1, 6): "hint" else: "error" let cfg = @@ -31,12 +47,7 @@ proc run(args, path: string) = build args & " -r", path task test, "Run all tests": - for args in [ - "-d:debug -d:chronosDebug", - "-d:debug -d:chronosPreviewV4", - "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", - "-d:release", - "-d:release -d:chronosPreviewV4"]: + for args in testArguments: run args, "tests/testall" if (NimMajor, NimMinor) > (1, 6): run args & " --mm:refc", "tests/testall" diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index 9d5ac23ee..a644b778c 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -825,9 +825,9 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or var res = PDispatcher( selector: selector, timers: initHeapQueue[TimerCallback](), - callbacks: initDeque[AsyncCallback](asyncEventsCount), + callbacks: initDeque[AsyncCallback](chronosEventsCount), idlers: initDeque[AsyncCallback](), - keys: newSeq[ReadyKey](asyncEventsCount), + keys: newSeq[ReadyKey](chronosEventsCount), trackers: initTable[string, TrackerBase](), counters: initTable[string, TrackerCounter]() ) @@ -1009,7 +1009,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or ## You can execute ``aftercb`` before actual socket close operation. closeSocket(fd, aftercb) - when asyncEventEngine in ["epoll", "kqueue"]: + when chronosEventEngine in ["epoll", "kqueue"]: type ProcessHandle* = distinct int SignalHandle* = distinct int @@ -1123,7 +1123,7 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if not isNil(adata.reader.function): loop.callbacks.addLast(adata.reader) - when asyncEventEngine in ["epoll", "kqueue"]: + when chronosEventEngine in ["epoll", "kqueue"]: let customSet = {Event.Timer, Event.Signal, Event.Process, Event.Vnode} if customSet * events != {}: @@ -1257,10 +1257,7 @@ proc callIdle*(cbproc: CallbackFunc) = include asyncfutures2 - -when defined(macosx) or defined(macos) or defined(freebsd) or - defined(netbsd) or defined(openbsd) or defined(dragonfly) or - defined(linux) or defined(windows): +when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows): proc waitSignal*(signal: int): Future[void] {.raises: [].} = var retFuture = newFuture[void]("chronos.waitSignal()") diff --git a/chronos/config.nim b/chronos/config.nim index 0a439a12a..bd6c2b9d1 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -49,6 +49,27 @@ when (NimMajor, NimMinor) >= (1, 4): ## using `AsyncProcessOption.EvalCommand` and API calls such as ## ``execCommand(command)`` and ``execCommandEx(command)``. + chronosEventsCount* {.intdefine.} = 64 + ## Number of OS poll events retrieved by syscall (epoll, kqueue, poll). + + chronosInitialSize* {.intdefine.} = 64 + ## Initial size of Selector[T]'s array of file descriptors. + + chronosEventEngine* {.strdefine.}: string = + when defined(linux) and not(defined(android) or defined(emscripten)): + "epoll" + elif defined(macosx) or defined(macos) or defined(ios) or + defined(freebsd) or defined(netbsd) or defined(openbsd) or + defined(dragonfly): + "kqueue" + elif defined(android) or defined(emscripten): + "poll" + elif defined(posix): + "poll" + else: + "" + ## OS polling engine type which is going to be used by chronos. + else: # 1.2 doesn't support `booldefine` in `when` properly const @@ -69,6 +90,21 @@ else: "/system/bin/sh" else: "/bin/sh" + chronosEventsCount*: int = 64 + chronosInitialSize*: int = 64 + chronosEventEngine* {.strdefine.}: string = + when defined(linux) and not(defined(android) or defined(emscripten)): + "epoll" + elif defined(macosx) or defined(macos) or defined(ios) or + defined(freebsd) or defined(netbsd) or defined(openbsd) or + defined(dragonfly): + "kqueue" + elif defined(android) or defined(emscripten): + "poll" + elif defined(posix): + "poll" + else: + "" when defined(debug) or defined(chronosConfig): import std/macros @@ -83,3 +119,6 @@ when defined(debug) or defined(chronosConfig): printOption("chronosFutureTracking", chronosFutureTracking) printOption("chronosDumpAsync", chronosDumpAsync) printOption("chronosProcShell", chronosProcShell) + printOption("chronosEventEngine", chronosEventEngine) + printOption("chronosEventsCount", chronosEventsCount) + printOption("chronosInitialSize", chronosInitialSize) diff --git a/chronos/ioselects/ioselectors_epoll.nim b/chronos/ioselects/ioselectors_epoll.nim index d438bac02..161a5dfbe 100644 --- a/chronos/ioselects/ioselectors_epoll.nim +++ b/chronos/ioselects/ioselectors_epoll.nim @@ -97,12 +97,12 @@ proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] = var nmask: Sigset if sigemptyset(nmask) < 0: return err(osLastError()) - let epollFd = epoll_create(asyncEventsCount) + let epollFd = epoll_create(chronosEventsCount) if epollFd < 0: return err(osLastError()) let selector = Selector[T]( epollFd: epollFd, - fds: initTable[int32, SelectorKey[T]](asyncInitialSize), + fds: initTable[int32, SelectorKey[T]](chronosInitialSize), signalMask: nmask, virtualId: -1'i32, # Should start with -1, because `InvalidIdent` == -1 childrenExited: false, @@ -627,7 +627,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, readyKeys: var openArray[ReadyKey] ): SelectResult[int] = var - queueEvents: array[asyncEventsCount, EpollEvent] + queueEvents: array[chronosEventsCount, EpollEvent] k: int = 0 verifySelectParams(timeout, -1, int(high(cint))) @@ -668,7 +668,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, ok(k) proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] = - var res = newSeq[ReadyKey](asyncEventsCount) + var res = newSeq[ReadyKey](chronosEventsCount) let count = ? selectInto2(s, timeout, res) res.setLen(count) ok(res) diff --git a/chronos/ioselects/ioselectors_kqueue.nim b/chronos/ioselects/ioselectors_kqueue.nim index 9f0627aa9..e39f96892 100644 --- a/chronos/ioselects/ioselectors_kqueue.nim +++ b/chronos/ioselects/ioselectors_kqueue.nim @@ -110,7 +110,7 @@ proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] = let selector = Selector[T]( kqFd: kqFd, - fds: initTable[int32, SelectorKey[T]](asyncInitialSize), + fds: initTable[int32, SelectorKey[T]](chronosInitialSize), virtualId: -1'i32, # Should start with -1, because `InvalidIdent` == -1 virtualHoles: initDeque[int32]() ) @@ -559,7 +559,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, ): SelectResult[int] = var tv: Timespec - queueEvents: array[asyncEventsCount, KEvent] + queueEvents: array[chronosEventsCount, KEvent] verifySelectParams(timeout, -1, high(int)) @@ -575,7 +575,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, addr tv else: nil - maxEventsCount = cint(min(asyncEventsCount, len(readyKeys))) + maxEventsCount = cint(min(chronosEventsCount, len(readyKeys))) eventsCount = block: var res = 0 @@ -601,7 +601,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, proc select2*[T](s: Selector[T], timeout: int): Result[seq[ReadyKey], OSErrorCode] = - var res = newSeq[ReadyKey](asyncEventsCount) + var res = newSeq[ReadyKey](chronosEventsCount) let count = ? selectInto2(s, timeout, res) res.setLen(count) ok(res) diff --git a/chronos/ioselects/ioselectors_poll.nim b/chronos/ioselects/ioselectors_poll.nim index d0d533cd0..25cc03518 100644 --- a/chronos/ioselects/ioselectors_poll.nim +++ b/chronos/ioselects/ioselectors_poll.nim @@ -16,7 +16,7 @@ import stew/base10 type SelectorImpl[T] = object fds: Table[int32, SelectorKey[T]] - pollfds: seq[TPollFd] + pollfds: seq[TPollfd] Selector*[T] = ref SelectorImpl[T] type @@ -50,7 +50,7 @@ proc freeKey[T](s: Selector[T], key: int32) = proc new*(t: typedesc[Selector], T: typedesc): SelectResult[Selector[T]] = let selector = Selector[T]( - fds: initTable[int32, SelectorKey[T]](asyncInitialSize) + fds: initTable[int32, SelectorKey[T]](chronosInitialSize) ) ok(selector) @@ -72,7 +72,7 @@ proc trigger2*(event: SelectEvent): SelectResult[void] = if res == -1: err(osLastError()) elif res != sizeof(uint64): - err(OSErrorCode(osdefs.EINVAL)) + err(osdefs.EINVAL) else: ok() @@ -98,13 +98,14 @@ template toPollEvents(events: set[Event]): cshort = res template pollAdd[T](s: Selector[T], sock: cint, events: set[Event]) = - s.pollfds.add(TPollFd(fd: sock, events: toPollEvents(events), revents: 0)) + s.pollfds.add(TPollfd(fd: sock, events: toPollEvents(events), revents: 0)) template pollUpdate[T](s: Selector[T], sock: cint, events: set[Event]) = var updated = false for mitem in s.pollfds.mitems(): if mitem.fd == sock: mitem.events = toPollEvents(events) + updated = true break if not(updated): raiseAssert "Descriptor [" & $sock & "] is not registered in the queue!" @@ -177,7 +178,6 @@ proc unregister2*[T](s: Selector[T], event: SelectEvent): SelectResult[void] = proc prepareKey[T](s: Selector[T], event: var TPollfd): Opt[ReadyKey] = let - defaultKey = SelectorKey[T](ident: InvalidIdent) fdi32 = int32(event.fd) revents = event.revents @@ -224,7 +224,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, eventsCount = if maxEventsCount > 0: let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount), - timeout)) + cint(timeout))) if res < 0: return err(osLastError()) res @@ -241,7 +241,7 @@ proc selectInto2*[T](s: Selector[T], timeout: int, ok(k) proc select2*[T](s: Selector[T], timeout: int): SelectResult[seq[ReadyKey]] = - var res = newSeq[ReadyKey](asyncEventsCount) + var res = newSeq[ReadyKey](chronosEventsCount) let count = ? selectInto2(s, timeout, res) res.setLen(count) ok(res) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index bf5c06058..75ceb6769 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -880,7 +880,7 @@ elif defined(macos) or defined(macosx): sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, freeAddrInfo, + socketpair, poll, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, @@ -905,7 +905,7 @@ elif defined(macos) or defined(macosx): sigemptyset, sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, freeAddrInfo, + socketpair, poll, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, @@ -929,6 +929,21 @@ elif defined(macos) or defined(macosx): numer*: uint32 denom*: uint32 + TPollfd* {.importc: "struct pollfd", pure, final, + header: "".} = object + fd*: cint + events*: cshort + revents*: cshort + + Tnfds* {.importc: "nfds_t", header: "".} = cuint + + const + POLLIN* = 0x0001 + POLLOUT* = 0x0004 + POLLERR* = 0x0008 + POLLHUP* = 0x0010 + POLLNVAL* = 0x0020 + proc posix_gettimeofday*(tp: var Timeval, unused: pointer = nil) {. importc: "gettimeofday", header: "".} @@ -938,6 +953,9 @@ elif defined(macos) or defined(macosx): proc mach_absolute_time*(): uint64 {. importc, header: "".} + proc poll*(a1: ptr TPollfd, a2: Tnfds, a3: cint): cint {. + importc, header: "", sideEffect.} + elif defined(linux): from std/posix import close, shutdown, sigemptyset, sigaddset, sigismember, sigdelset, write, read, waitid, getaddrinfo, @@ -947,12 +965,12 @@ elif defined(linux): unlink, listen, sendmsg, recvmsg, getpid, fcntl, pthread_sigmask, sigprocmask, clock_gettime, signal, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, freeAddrInfo, + socketpair, poll, freeAddrInfo, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, Timeval, TFdSet, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle, - Suseconds, + Suseconds, TPollfd, Tnfds, FD_CLR, FD_ISSET, FD_SET, FD_ZERO, CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK, @@ -961,6 +979,7 @@ elif defined(linux): AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR, + POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -974,12 +993,12 @@ elif defined(linux): unlink, listen, sendmsg, recvmsg, getpid, fcntl, pthread_sigmask, sigprocmask, clock_gettime, signal, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, freeAddrInfo, + socketpair, poll, freeAddrInfo, ClockId, Itimerspec, Timespec, Sigset, Time, Pid, Mode, SigInfo, Id, Tmsghdr, IOVec, RLimit, TFdSet, Timeval, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, AddrInfo, SocketHandle, - Suseconds, + Suseconds, TPollfd, Tnfds, FD_CLR, FD_ISSET, FD_SET, FD_ZERO, CLOCK_MONOTONIC, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, O_NONBLOCK, SIG_BLOCK, SIG_UNBLOCK, @@ -988,6 +1007,7 @@ elif defined(linux): AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR, + POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -1097,11 +1117,11 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, freeAddrInfo, + socketpair, poll, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, - Suseconds, + Suseconds, TPollfd, Tnfds, FD_CLR, FD_ISSET, FD_SET, FD_ZERO, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, @@ -1111,6 +1131,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, SHUT_RD, SHUT_WR, SHUT_RDWR, + POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, @@ -1123,11 +1144,11 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or sigaddset, sigismember, fcntl, accept, pipe, write, signal, read, setsockopt, getsockopt, clock_gettime, getcwd, chdir, waitpid, kill, select, pselect, - socketpair, freeAddrInfo, + socketpair, poll, freeAddrInfo, Timeval, Timespec, Pid, Mode, Time, Sigset, SockAddr, SockLen, Sockaddr_storage, Sockaddr_in, Sockaddr_in6, Sockaddr_un, SocketHandle, AddrInfo, RLimit, TFdSet, - Suseconds, + Suseconds, TPollfd, Tnfds, FD_CLR, FD_ISSET, FD_SET, FD_ZERO, F_GETFL, F_SETFL, F_GETFD, F_SETFD, FD_CLOEXEC, O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, @@ -1137,6 +1158,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, SHUT_RD, SHUT_WR, SHUT_RDWR, + POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGPIPE, SIGCHLD, SIGSTOP, diff --git a/chronos/selectors2.nim b/chronos/selectors2.nim index 45c453304..c5918fdf5 100644 --- a/chronos/selectors2.nim +++ b/chronos/selectors2.nim @@ -32,29 +32,9 @@ # backwards-compatible. import stew/results -import osdefs, osutils, oserrno +import config, osdefs, osutils, oserrno export results, oserrno -const - asyncEventsCount* {.intdefine.} = 64 - ## Number of epoll events retrieved by syscall. - asyncInitialSize* {.intdefine.} = 64 - ## Initial size of Selector[T]'s array of file descriptors. - asyncEventEngine* {.strdefine.} = - when defined(linux): - "epoll" - elif defined(macosx) or defined(macos) or defined(ios) or - defined(freebsd) or defined(netbsd) or defined(openbsd) or - defined(dragonfly): - "kqueue" - elif defined(posix): - "poll" - else: - "" - ## Engine type which is going to be used by module. - - hasThreadSupport = compileOption("threads") - when defined(nimdoc): type @@ -281,7 +261,9 @@ else: var err = newException(IOSelectorsException, msg) raise err - when asyncEventEngine in ["epoll", "kqueue"]: + when chronosEventEngine in ["epoll", "kqueue"]: + const hasThreadSupport = compileOption("threads") + proc blockSignals(newmask: Sigset, oldmask: var Sigset): Result[void, OSErrorCode] = var nmask = newmask @@ -324,11 +306,11 @@ else: doAssert((timeout >= min) and (timeout <= max), "Cannot select with incorrect timeout value, got " & $timeout) -when asyncEventEngine == "epoll": +when chronosEventEngine == "epoll": include ./ioselects/ioselectors_epoll -elif asyncEventEngine == "kqueue": +elif chronosEventEngine == "kqueue": include ./ioselects/ioselectors_kqueue -elif asyncEventEngine == "poll": +elif chronosEventEngine == "poll": include ./ioselects/ioselectors_poll else: - {.fatal: "Event engine `" & asyncEventEngine & "` is not supported!".} + {.fatal: "Event engine `" & chronosEventEngine & "` is not supported!".} diff --git a/chronos/sendfile.nim b/chronos/sendfile.nim index 8cba9e83f..7afcb738d 100644 --- a/chronos/sendfile.nim +++ b/chronos/sendfile.nim @@ -38,8 +38,12 @@ when defined(nimdoc): ## be prepared to retry the call if there were unsent bytes. ## ## On error, ``-1`` is returned. +elif defined(emscripten): -elif defined(linux) or defined(android): + proc sendfile*(outfd, infd: int, offset: int, count: var int): int = + raiseAssert "sendfile() is not implemented yet" + +elif (defined(linux) or defined(android)) and not(defined(emscripten)): proc osSendFile*(outfd, infd: cint, offset: ptr int, count: int): int {.importc: "sendfile", header: "".} diff --git a/tests/testall.nim b/tests/testall.nim index 4861a85eb..6419f9836 100644 --- a/tests/testall.nim +++ b/tests/testall.nim @@ -5,10 +5,22 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import testmacro, testsync, testsoon, testtime, testfut, testsignal, - testaddress, testdatagram, teststream, testserver, testbugs, testnet, - testasyncstream, testhttpserver, testshttpserver, testhttpclient, - testproc, testratelimit, testfutures, testthreadsync +import ".."/chronos/config -# Must be imported last to check for Pending futures -import testutils +when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows): + import testmacro, testsync, testsoon, testtime, testfut, testsignal, + testaddress, testdatagram, teststream, testserver, testbugs, testnet, + testasyncstream, testhttpserver, testshttpserver, testhttpclient, + testproc, testratelimit, testfutures, testthreadsync + + # Must be imported last to check for Pending futures + import testutils +elif chronosEventEngine == "poll": + # `poll` engine do not support signals and processes + import testmacro, testsync, testsoon, testtime, testfut, testaddress, + testdatagram, teststream, testserver, testbugs, testnet, + testasyncstream, testhttpserver, testshttpserver, testhttpclient, + testratelimit, testfutures, testthreadsync + + # Must be imported last to check for Pending futures + import testutils diff --git a/tests/testproc.nim b/tests/testproc.nim index cfcafe6b7..288ec181d 100644 --- a/tests/testproc.nim +++ b/tests/testproc.nim @@ -8,6 +8,7 @@ import std/os import stew/[base10, byteutils] import ".."/chronos/unittest2/asynctests +import ".."/chronos/asyncproc when defined(posix): from ".."/chronos/osdefs import SIGKILL diff --git a/tests/teststream.nim b/tests/teststream.nim index f6bc99b66..8c3c77e1c 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1339,7 +1339,10 @@ suite "Stream Transport test suite": else: skip() else: - check waitFor(testSendFile(addresses[i])) == FilesCount + if defined(emscripten): + skip() + else: + check waitFor(testSendFile(addresses[i])) == FilesCount test prefixes[i] & "Connection refused test": var address: TransportAddress if addresses[i].family == AddressFamily.Unix: From c546a4329cad426fc1deb32f3a7bca6ead4c2b2c Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Wed, 2 Aug 2023 21:04:30 +0200 Subject: [PATCH 053/146] Use random ports (#429) --- tests/teststream.nim | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/tests/teststream.nim b/tests/teststream.nim index 8c3c77e1c..4b5cb153c 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1259,46 +1259,44 @@ suite "Stream Transport test suite": return buffer == message proc testConnectBindLocalAddress() {.async.} = - let dst1 = initTAddress("127.0.0.1:33335") - let dst2 = initTAddress("127.0.0.1:33336") - let dst3 = initTAddress("127.0.0.1:33337") proc client(server: StreamServer, transp: StreamTransport) {.async.} = await transp.closeWait() - # We use ReuseAddr here only to be able to reuse the same IP/Port when there's a TIME_WAIT socket. It's useful when - # running the test multiple times or if a test ran previously used the same port. - let servers = - [createStreamServer(dst1, client, {ReuseAddr}), - createStreamServer(dst2, client, {ReuseAddr}), - createStreamServer(dst3, client, {ReusePort})] + let server1 = createStreamServer(initTAddress("127.0.0.1:0"), client) + let server2 = createStreamServer(initTAddress("127.0.0.1:0"), client) + let server3 = createStreamServer(initTAddress("127.0.0.1:0"), client, {ReusePort}) - for server in servers: - server.start() - - let ta = initTAddress("0.0.0.0:35000") + server1.start() + server2.start() + server3.start() # It works cause there's no active listening socket bound to ta and we are using ReuseAddr - var transp1 = await connect(dst1, localAddress = ta, flags={SocketFlags.ReuseAddr}) - var transp2 = await connect(dst2, localAddress = ta, flags={SocketFlags.ReuseAddr}) + var transp1 = await connect(server1.local, flags={SocketFlags.ReuseAddr}) + let ta = transp1.localAddress + var transp2 = await connect(server2.local, localAddress = ta, flags={SocketFlags.ReuseAddr}) - # It works cause even thought there's an active listening socket bound to dst3, we are using ReusePort - var transp3 = await connect(dst2, localAddress = dst3, flags={SocketFlags.ReusePort}) + # It works cause even though there's an active listening socket bound to dst3, we are using ReusePort + var transp3 = await connect(server2.local, localAddress = server3.local, flags={SocketFlags.ReusePort}) expect(TransportOsError): - var transp2 {.used.} = await connect(dst3, localAddress = ta) + var transp2 {.used.} = await connect(server3.local, localAddress = ta) expect(TransportOsError): - var transp3 {.used.} = - await connect(dst3, localAddress = initTAddress(":::35000")) + var transp3 {.used.} = await connect(server3.local, localAddress = initTAddress("::", transp1.localAddress.port)) await transp1.closeWait() await transp2.closeWait() await transp3.closeWait() - for server in servers: - server.stop() - await server.closeWait() + server1.stop() + await server1.closeWait() + + server2.stop() + await server2.closeWait() + + server3.stop() + await server3.closeWait() markFD = getCurrentFD() From a1eb30360b10b850b3105dac1daae64c73de5a03 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 4 Aug 2023 08:08:34 +0200 Subject: [PATCH 054/146] fix invalid protocol casts (#430) --- chronos/transports/datagram.nim | 10 +++++----- chronos/transports/stream.nim | 20 +++++++++----------- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 3e10f76e6..665bc0ed5 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -466,11 +466,11 @@ else: var res = if isNil(child): DatagramTransport() else: child if sock == asyncInvalidSocket: - var proto = Protocol.IPPROTO_UDP - if local.family == AddressFamily.Unix: - # `Protocol` enum is missing `0` value, so we making here cast, until - # `Protocol` enum will not support IPPROTO_IP == 0. - proto = cast[Protocol](0) + let proto = + if local.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_UDP localSock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM, proto) if localSock == asyncInvalidSocket: diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 45e4054e0..44a39b292 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1475,14 +1475,13 @@ else: var saddr: Sockaddr_storage slen: SockLen - proto: Protocol var retFuture = newFuture[StreamTransport]("stream.transport.connect") address.toSAddr(saddr, slen) - proto = Protocol.IPPROTO_TCP - if address.family == AddressFamily.Unix: - # `Protocol` enum is missing `0` value, so we making here cast, until - # `Protocol` enum will not support IPPROTO_IP == 0. - proto = cast[Protocol](0) + let proto = + if address.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_TCP let sock = createAsyncSocket(address.getDomain(), SockType.SOCK_STREAM, proto) @@ -1938,11 +1937,10 @@ proc createStreamServer*(host: TransportAddress, else: # Posix if sock == asyncInvalidSocket: - var proto = Protocol.IPPROTO_TCP - if host.family == AddressFamily.Unix: - # `Protocol` enum is missing `0` value, so we making here cast, until - # `Protocol` enum will not support IPPROTO_IP == 0. - proto = cast[Protocol](0) + let proto = if host.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_TCP serverSocket = createAsyncSocket(host.getDomain(), SockType.SOCK_STREAM, proto) From 38c31e21d392c8a0924867bdebc764172065770e Mon Sep 17 00:00:00 2001 From: andri lim Date: Fri, 4 Aug 2023 14:27:01 +0700 Subject: [PATCH 055/146] fix type mismatch error in asyncstream join (#433) --- chronos/streams/asyncstream.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 7e6e5d2d1..191b36aea 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -873,10 +873,10 @@ proc join*(rw: AsyncStreamRW): Future[void] = else: var retFuture = newFuture[void]("async.stream.writer.join") - proc continuation(udata: pointer) {.gcsafe.} = + proc continuation(udata: pointer) {.gcsafe, raises:[].} = retFuture.complete() - proc cancellation(udata: pointer) {.gcsafe.} = + proc cancellation(udata: pointer) {.gcsafe, raises:[].} = rw.future.removeCallback(continuation, cast[pointer](retFuture)) if not(rw.future.finished()): From c4b066a2c4faeedd564e8467a9416920bfb4b9a9 Mon Sep 17 00:00:00 2001 From: andri lim Date: Fri, 4 Aug 2023 14:32:12 +0700 Subject: [PATCH 056/146] ci: upgrade github actions/cache to v3 (#434) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b78f2a121..b7aa0fa14 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -96,7 +96,7 @@ jobs: - name: Restore Nim DLLs dependencies (Windows) from cache if: runner.os == 'Windows' id: windows-dlls-cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: external/dlls-${{ matrix.target.cpu }} key: 'dlls-${{ matrix.target.cpu }}' From 194226a0e06e60bee9de503bc4a2d219c7dd93d2 Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Tue, 8 Aug 2023 02:10:28 +0200 Subject: [PATCH 057/146] Remove hard-coded ports when non-windows (#437) --- tests/teststream.nim | 66 ++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/tests/teststream.nim b/tests/teststream.nim index 4b5cb153c..73d34c65b 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -34,7 +34,7 @@ suite "Stream Transport test suite": ] else: let addresses = [ - initTAddress("127.0.0.1:33335"), + initTAddress("127.0.0.1:0"), initTAddress(r"/tmp/testpipe") ] @@ -43,7 +43,7 @@ suite "Stream Transport test suite": var markFD: int proc getCurrentFD(): int = - let local = initTAddress("127.0.0.1:33334") + let local = initTAddress("127.0.0.1:0") let sock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM, Protocol.IPPROTO_UDP) closeSocket(sock) @@ -348,7 +348,7 @@ suite "Stream Transport test suite": proc test1(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient1, {ReuseAddr}) server.start() - result = await swarmManager1(address) + result = await swarmManager1(server.local) server.stop() server.close() await server.join() @@ -356,7 +356,7 @@ suite "Stream Transport test suite": proc test2(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient2, {ReuseAddr}) server.start() - result = await swarmManager2(address) + result = await swarmManager2(server.local) server.stop() server.close() await server.join() @@ -364,7 +364,7 @@ suite "Stream Transport test suite": proc test3(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient3, {ReuseAddr}) server.start() - result = await swarmManager3(address) + result = await swarmManager3(server.local) server.stop() server.close() await server.join() @@ -372,7 +372,7 @@ suite "Stream Transport test suite": proc testSendFile(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient4, {ReuseAddr}) server.start() - result = await swarmManager4(address) + result = await swarmManager4(server.local) server.stop() server.close() await server.join() @@ -414,7 +414,7 @@ suite "Stream Transport test suite": var server = createStreamServer(address, serveClient, {ReuseAddr}) server.start() - result = await swarmManager(address) + result = await swarmManager(server.local) await server.join() proc testWCR(address: TransportAddress): Future[int] {.async.} = @@ -456,13 +456,13 @@ suite "Stream Transport test suite": var server = createStreamServer(address, serveClient, {ReuseAddr}) server.start() - result = await swarmManager(address) + result = await swarmManager(server.local) await server.join() proc test7(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient7, {ReuseAddr}) server.start() - result = await swarmWorker7(address) + result = await swarmWorker7(server.local) server.stop() server.close() await server.join() @@ -470,7 +470,7 @@ suite "Stream Transport test suite": proc test8(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient8, {ReuseAddr}) server.start() - result = await swarmWorker8(address) + result = await swarmWorker8(server.local) await server.join() # proc serveClient9(server: StreamServer, transp: StreamTransport) {.async.} = @@ -553,7 +553,7 @@ suite "Stream Transport test suite": proc test11(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient11, {ReuseAddr}) server.start() - result = await swarmWorker11(address) + result = await swarmWorker11(server.local) server.stop() server.close() await server.join() @@ -579,7 +579,7 @@ suite "Stream Transport test suite": proc test12(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient12, {ReuseAddr}) server.start() - result = await swarmWorker12(address) + result = await swarmWorker12(server.local) server.stop() server.close() await server.join() @@ -601,7 +601,7 @@ suite "Stream Transport test suite": proc test13(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient13, {ReuseAddr}) server.start() - result = await swarmWorker13(address) + result = await swarmWorker13(server.local) server.stop() server.close() await server.join() @@ -621,7 +621,7 @@ suite "Stream Transport test suite": subres = 0 server.start() - var transp = await connect(address) + var transp = await connect(server.local) var fut = swarmWorker(transp) # We perfrom shutdown(SHUT_RD/SD_RECEIVE) for the socket, in such way its # possible to emulate socket's EOF. @@ -674,7 +674,7 @@ suite "Stream Transport test suite": proc test16(address: TransportAddress): Future[int] {.async.} = var server = createStreamServer(address, serveClient16, {ReuseAddr}) server.start() - result = await swarmWorker16(address) + result = await swarmWorker16(server.local) server.stop() server.close() await server.join() @@ -701,7 +701,7 @@ suite "Stream Transport test suite": var server = createStreamServer(address, client, {ReuseAddr}) server.start() var msg = "HELLO" - var ntransp = await connect(address) + var ntransp = await connect(server.local) await syncFut while true: var res = await ntransp.write(msg) @@ -763,7 +763,7 @@ suite "Stream Transport test suite": var transp: StreamTransport try: - transp = await connect(address) + transp = await connect(server.local) flag = true except CatchableError: server.stop() @@ -796,31 +796,31 @@ suite "Stream Transport test suite": server.start() try: var r1, r2, r3, r4, r5: string - var t1 = await connect(address) + var t1 = await connect(server.local) try: r1 = await t1.readLine(4) finally: await t1.closeWait() - var t2 = await connect(address) + var t2 = await connect(server.local) try: r2 = await t2.readLine(6) finally: await t2.closeWait() - var t3 = await connect(address) + var t3 = await connect(server.local) try: r3 = await t3.readLine(8) finally: await t3.closeWait() - var t4 = await connect(address) + var t4 = await connect(server.local) try: r4 = await t4.readLine(8) finally: await t4.closeWait() - var t5 = await connect(address) + var t5 = await connect(server.local) try: r5 = await t5.readLine() finally: @@ -945,7 +945,7 @@ suite "Stream Transport test suite": var server = createStreamServer(address, serveClient, {ReuseAddr}) server.start() - var t1 = await connect(address) + var t1 = await connect(server.local) try: discard await t1.readLV(2000) except TransportIncompleteError: @@ -959,7 +959,7 @@ suite "Stream Transport test suite": await server.join() return false - var t2 = await connect(address) + var t2 = await connect(server.local) try: var r2 = await t2.readLV(2000) c2 = (r2 == @[]) @@ -972,7 +972,7 @@ suite "Stream Transport test suite": await server.join() return false - var t3 = await connect(address) + var t3 = await connect(server.local) try: discard await t3.readLV(2000) except TransportIncompleteError: @@ -986,7 +986,7 @@ suite "Stream Transport test suite": await server.join() return false - var t4 = await connect(address) + var t4 = await connect(server.local) try: discard await t4.readLV(2000) except TransportIncompleteError: @@ -1000,7 +1000,7 @@ suite "Stream Transport test suite": await server.join() return false - var t5 = await connect(address) + var t5 = await connect(server.local) try: discard await t5.readLV(1000) except ValueError: @@ -1014,7 +1014,7 @@ suite "Stream Transport test suite": await server.join() return false - var t6 = await connect(address) + var t6 = await connect(server.local) try: var expectMsg = createMessage(1024) var r6 = await t6.readLV(2000) @@ -1029,7 +1029,7 @@ suite "Stream Transport test suite": await server.join() return false - var t7 = await connect(address) + var t7 = await connect(server.local) try: var expectMsg = createMessage(1024) var expectDone = "DONE" @@ -1062,7 +1062,7 @@ suite "Stream Transport test suite": try: for i in 0 ..< TestsCount: - transp = await connect(address) + transp = await connect(server.local) await sleepAsync(10.milliseconds) await transp.closeWait() inc(connected) @@ -1117,7 +1117,7 @@ suite "Stream Transport test suite": try: for i in 0 ..< 3: try: - let transp = await connect(address) + let transp = await connect(server.local) await sleepAsync(10.milliseconds) await transp.closeWait() except TransportTooManyError: @@ -1166,7 +1166,7 @@ suite "Stream Transport test suite": await server.closeWait() var acceptFut = acceptTask(server) - var transp = await connect(address) + var transp = await connect(server.local) await server.join() await transp.closeWait() await acceptFut @@ -1187,7 +1187,7 @@ suite "Stream Transport test suite": await server.closeWait() var acceptFut = acceptTask(server) - var transp = await connect(address) + var transp = await connect(server.local) await server.join() await transp.closeWait() await acceptFut From 466241aa958af4ee4d07c5e0019d40bdaa9f6a36 Mon Sep 17 00:00:00 2001 From: diegomrsantos Date: Tue, 8 Aug 2023 02:11:35 +0200 Subject: [PATCH 058/146] Remove reuseaddr (#438) * Remove hard-coded ports when non-windows * Remove ReuseAddr from test --- tests/teststream.nim | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/tests/teststream.nim b/tests/teststream.nim index 73d34c65b..9e1ce557c 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1271,23 +1271,18 @@ suite "Stream Transport test suite": server2.start() server3.start() - # It works cause there's no active listening socket bound to ta and we are using ReuseAddr - var transp1 = await connect(server1.local, flags={SocketFlags.ReuseAddr}) - let ta = transp1.localAddress - var transp2 = await connect(server2.local, localAddress = ta, flags={SocketFlags.ReuseAddr}) - # It works cause even though there's an active listening socket bound to dst3, we are using ReusePort - var transp3 = await connect(server2.local, localAddress = server3.local, flags={SocketFlags.ReusePort}) + var transp1 = await connect(server1.local, localAddress = server3.local, flags={SocketFlags.ReusePort}) + var transp2 = await connect(server2.local, localAddress = server3.local, flags={SocketFlags.ReusePort}) expect(TransportOsError): - var transp2 {.used.} = await connect(server3.local, localAddress = ta) + var transp2 {.used.} = await connect(server2.local, localAddress = server3.local) expect(TransportOsError): - var transp3 {.used.} = await connect(server3.local, localAddress = initTAddress("::", transp1.localAddress.port)) + var transp3 {.used.} = await connect(server2.local, localAddress = initTAddress("::", server3.local.port)) await transp1.closeWait() await transp2.closeWait() - await transp3.closeWait() server1.stop() await server1.closeWait() From 6c2ea675123ed0bf5c5d76c92ed4985bacd1a9ec Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 9 Aug 2023 10:57:49 +0300 Subject: [PATCH 059/146] Unroll `defer`s and remove `break`s. (#440) * Unpack `finally/defer` blocks and introduce explicit cleaning of objects. Add request query to debug information. * Unroll one more loop to avoid `break`. Add test for query debug string. * Fix cancellation behavior. * Address review comments. --- chronos/apps/http/httpdebug.nim | 9 ++++ chronos/apps/http/httpserver.nim | 88 +++++++++++++++++-------------- chronos/apps/http/httptable.nim | 4 ++ chronos/apps/http/shttpserver.nim | 12 ++++- tests/testhttpserver.nim | 8 +-- tests/testshttpserver.nim | 3 +- 6 files changed, 77 insertions(+), 47 deletions(-) diff --git a/chronos/apps/http/httpdebug.nim b/chronos/apps/http/httpdebug.nim index 2f40674e6..a1dc02287 100644 --- a/chronos/apps/http/httpdebug.nim +++ b/chronos/apps/http/httpdebug.nim @@ -29,6 +29,7 @@ type handle*: SocketHandle connectionType*: ConnectionType connectionState*: ConnectionState + query*: Opt[string] remoteAddress*: Opt[TransportAddress] localAddress*: Opt[TransportAddress] acceptMoment*: Moment @@ -85,6 +86,12 @@ proc getConnectionState*(holder: HttpConnectionHolderRef): ConnectionState = else: ConnectionState.Accepted +proc getQueryString*(holder: HttpConnectionHolderRef): Opt[string] = + if not(isNil(holder.connection)): + holder.connection.currentRawQuery + else: + Opt.none(string) + proc init*(t: typedesc[ServerConnectionInfo], holder: HttpConnectionHolderRef): ServerConnectionInfo = let @@ -98,6 +105,7 @@ proc init*(t: typedesc[ServerConnectionInfo], Opt.some(holder.transp.remoteAddress()) except CatchableError: Opt.none(TransportAddress) + queryString = holder.getQueryString() ServerConnectionInfo( handle: SocketHandle(holder.transp.fd), @@ -106,6 +114,7 @@ proc init*(t: typedesc[ServerConnectionInfo], remoteAddress: remoteAddress, localAddress: localAddress, acceptMoment: holder.acceptMoment, + query: queryString, createMoment: if not(isNil(holder.connection)): Opt.some(holder.connection.createMoment) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index c1e45c03e..eafa27c66 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -148,6 +148,7 @@ type writer*: AsyncStreamWriter closeCb*: HttpCloseConnectionCallback createMoment*: Moment + currentRawQuery*: Opt[string] buffer: seq[byte] HttpConnectionRef* = ref HttpConnection @@ -813,6 +814,7 @@ proc closeUnsecureConnection(conn: HttpConnectionRef) {.async.} = except CancelledError: await allFutures(pending) untrackCounter(HttpServerUnsecureConnectionTrackerName) + reset(conn[]) conn.state = HttpState.Closed proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef, @@ -844,7 +846,9 @@ proc closeWait*(req: HttpRequestRef) {.async.} = await writer except CancelledError: await writer + reset(resp[]) untrackCounter(HttpServerRequestTrackerName) + reset(req[]) req.state = HttpState.Closed proc createConnection(server: HttpServerRef, @@ -931,6 +935,7 @@ proc getRequestFence*(server: HttpServerRef, await connection.getRequest() else: await connection.getRequest().wait(server.headersTimeout) + connection.currentRawQuery = Opt.some(res.rawPath) RequestFence.ok(res) except CancelledError: RequestFence.err(HttpProcessError.init(HttpServerError.InterruptError)) @@ -962,13 +967,17 @@ proc getConnectionFence*(server: HttpServerRef, let res = await server.createConnCallback(server, transp) ConnectionFence.ok(res) except CancelledError: - await transp.closeWait() ConnectionFence.err(HttpProcessError.init(HttpServerError.InterruptError)) except HttpCriticalError as exc: - await transp.closeWait() - let address = transp.getRemoteAddress() + # On error `transp` will be closed by `createConnCallback()` call. + let address = Opt.none(TransportAddress) ConnectionFence.err(HttpProcessError.init( HttpServerError.CriticalError, exc, address, exc.code)) + except CatchableError as exc: + # On error `transp` will be closed by `createConnCallback()` call. + let address = Opt.none(TransportAddress) + ConnectionFence.err(HttpProcessError.init( + HttpServerError.CriticalError, exc, address, Http503)) proc processRequest(server: HttpServerRef, connection: HttpConnectionRef, @@ -984,19 +993,23 @@ proc processRequest(server: HttpServerRef, else: discard - defer: - if requestFence.isOk(): - await requestFence.get().closeWait() - let responseFence = await getResponseFence(connection, requestFence) if responseFence.isErr() and (responseFence.error.kind == HttpServerError.InterruptError): + if requestFence.isOk(): + await requestFence.get().closeWait() return HttpProcessExitType.Immediate - if responseFence.isErr(): - await connection.sendErrorResponse(requestFence, responseFence.error) - else: - await connection.sendDefaultResponse(requestFence, responseFence.get()) + let res = + if responseFence.isErr(): + await connection.sendErrorResponse(requestFence, responseFence.error) + else: + await connection.sendDefaultResponse(requestFence, responseFence.get()) + + if requestFence.isOk(): + await requestFence.get().closeWait() + + res proc processLoop(holder: HttpConnectionHolderRef) {.async.} = let @@ -1016,23 +1029,27 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async.} = holder.connection = connection var runLoop = HttpProcessExitType.KeepAlive - - defer: - server.connections.del(connectionId) - case runLoop - of HttpProcessExitType.KeepAlive: - # This could happened only on CancelledError. - await connection.closeWait() - of HttpProcessExitType.Immediate: - await connection.closeWait() - of HttpProcessExitType.Graceful: - await connection.gracefulCloseWait() - while runLoop == HttpProcessExitType.KeepAlive: - runLoop = await server.processRequest(connection, connectionId) + runLoop = + try: + await server.processRequest(connection, connectionId) + except CancelledError: + HttpProcessExitType.Immediate + except CatchableError as exc: + raiseAssert "Unexpected error [" & $exc.name & "] happens: " & $exc.msg + + server.connections.del(connectionId) + case runLoop + of HttpProcessExitType.KeepAlive: + await connection.closeWait() + of HttpProcessExitType.Immediate: + await connection.closeWait() + of HttpProcessExitType.Graceful: + await connection.gracefulCloseWait() proc acceptClientLoop(server: HttpServerRef) {.async.} = - while true: + var runLoop = true + while runLoop: try: # if server.maxConnections > 0: # await server.semaphore.acquire() @@ -1042,27 +1059,18 @@ proc acceptClientLoop(server: HttpServerRef) {.async.} = # We are unable to identify remote peer, it means that remote peer # disconnected before identification. await transp.closeWait() - break + runLoop = false else: let connId = resId.get() let holder = HttpConnectionHolderRef.new(server, transp, resId.get()) server.connections[connId] = holder holder.future = processLoop(holder) - except CancelledError: - # Server was stopped - break - except TransportOsError: - # This is some critical unrecoverable error. - break - except TransportTooManyError: - # Non critical error - discard - except TransportAbortedError: - # Non critical error + except TransportTooManyError, TransportAbortedError: + # Non-critical error discard - except CatchableError: - # Unexpected error - break + except CancelledError, TransportOsError, CatchableError: + # Critical, cancellation or unexpected error + runLoop = false proc state*(server: HttpServerRef): HttpServerState {.raises: [].} = ## Returns current HTTP server's state. diff --git a/chronos/apps/http/httptable.nim b/chronos/apps/http/httptable.nim index 86060de30..f44765aed 100644 --- a/chronos/apps/http/httptable.nim +++ b/chronos/apps/http/httptable.nim @@ -197,3 +197,7 @@ proc toList*(ht: HttpTables, normKey = false): auto = for key, value in ht.stringItems(normKey): res.add((key, value)) res + +proc clear*(ht: var HttpTables) = + ## Resets the HtppTable so that it is empty. + ht.table.clear() diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index b993cb5fb..bc5c3fbeb 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -43,6 +43,7 @@ proc closeSecConnection(conn: HttpConnectionRef) {.async.} = await allFutures(pending) except CancelledError: await allFutures(pending) + reset(cast[SecureHttpConnectionRef](conn)[]) untrackCounter(HttpServerSecureConnectionTrackerName) conn.state = HttpState.Closed @@ -74,9 +75,16 @@ proc createSecConnection(server: HttpServerRef, except CancelledError as exc: await HttpConnectionRef(sconn).closeWait() raise exc - except TLSStreamError: + except TLSStreamError as exc: await HttpConnectionRef(sconn).closeWait() - raiseHttpCriticalError("Unable to establish secure connection") + let msg = "Unable to establish secure connection, reason [" & + $exc.msg & "]" + raiseHttpCriticalError(msg) + except CatchableError as exc: + await HttpConnectionRef(sconn).closeWait() + let msg = "Unexpected error while trying to establish secure connection, " & + "reason [" & $exc.msg & "]" + raiseHttpCriticalError(msg) proc new*(htype: typedesc[SecureHttpServerRef], address: TransportAddress, diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 83372ea5a..0ecc9aa4f 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -7,9 +7,8 @@ # MIT license (LICENSE-MIT) import std/[strutils, algorithm] import ".."/chronos/unittest2/asynctests, - ".."/chronos, ".."/chronos/apps/http/httpserver, - ".."/chronos/apps/http/httpcommon, - ".."/chronos/apps/http/httpdebug + ".."/chronos, + ".."/chronos/apps/http/[httpserver, httpcommon, httpdebug] import stew/base10 {.used.} @@ -1357,7 +1356,7 @@ suite "HTTP server testing suite": asyncTest "HTTP debug tests": const TestsCount = 10 - TestRequest = "GET / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n" + TestRequest = "GET /httpdebug HTTP/1.1\r\nConnection: keep-alive\r\n\r\n" proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = if r.isOk(): @@ -1417,6 +1416,7 @@ suite "HTTP server testing suite": connection.localAddress.get() == transp.remoteAddress() connection.connectionType == ConnectionType.NonSecure connection.connectionState == ConnectionState.Alive + connection.query.get("") == "/httpdebug" (currentTime - connection.createMoment.get()) != ZeroDuration (currentTime - connection.acceptMoment) != ZeroDuration var pending: seq[Future[void]] diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index a83d0b29f..8aacb8e43 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -7,7 +7,8 @@ # MIT license (LICENSE-MIT) import std/strutils import ".."/chronos/unittest2/asynctests -import ".."/chronos, ".."/chronos/apps/http/shttpserver +import ".."/chronos, + ".."/chronos/apps/http/shttpserver import stew/base10 {.used.} From a7f708bea897ab81ee57aab66628c62f12aa213a Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 9 Aug 2023 16:27:17 +0200 Subject: [PATCH 060/146] futures: lentify (#413) sometimes avoid copies when reading from `Future` --- chronos/asyncfutures2.nim | 18 ++++++++++++------ chronos/asyncmacro2.nim | 2 +- chronos/futures.nim | 17 ++++++++++++++--- tests/testfut.nim | 2 ++ 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index d170f0825..d3954bab1 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -451,19 +451,25 @@ proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} = injectStacktrace(fut.internalError) raise fut.internalError -proc internalRead*[T](fut: Future[T]): T {.inline.} = - # For internal use only. Used in asyncmacro - when T isnot void: - return fut.internalValue +proc read*[T: not void](future: Future[T] ): lent T {.raises: [CatchableError].} = + ## Retrieves the value of ``future``. Future must be finished otherwise + ## this function will fail with a ``ValueError`` exception. + ## + ## If the result of the future is an error then that error will be raised. + if not future.finished(): + # TODO: Make a custom exception type for this? + raise newException(ValueError, "Future still in progress.") + + internalCheckComplete(future) + future.internalValue -proc read*[T](future: Future[T] ): T {.raises: [CatchableError].} = +proc read*(future: Future[void] ) {.raises: [CatchableError].} = ## Retrieves the value of ``future``. Future must be finished otherwise ## this function will fail with a ``ValueError`` exception. ## ## If the result of the future is an error then that error will be raised. if future.finished(): internalCheckComplete(future) - internalRead(future) else: # TODO: Make a custom exception type for this? raise newException(ValueError, "Future still in progress.") diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index 45146a300..8e7407309 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -309,7 +309,7 @@ template await*[T](f: Future[T]): untyped = # `child` released by `futureContinue` chronosInternalRetFuture.internalChild.internalCheckComplete() when T isnot void: - cast[type(f)](chronosInternalRetFuture.internalChild).internalRead() + cast[type(f)](chronosInternalRetFuture.internalChild).value() else: unsupported "await is only available within {.async.}" diff --git a/chronos/futures.nim b/chronos/futures.nim index edfae328b..9b2667b62 100644 --- a/chronos/futures.nim +++ b/chronos/futures.nim @@ -184,7 +184,7 @@ func completed*(future: FutureBase): bool {.inline.} = func location*(future: FutureBase): array[LocationKind, ptr SrcLoc] = future.internalLocation -func value*[T](future: Future[T]): T = +func value*[T: not void](future: Future[T]): lent T = ## Return the value in a completed future - raises Defect when ## `fut.completed()` is `false`. ## @@ -196,8 +196,19 @@ func value*[T](future: Future[T]): T = msg: "Future not completed while accessing value", cause: future) - when T isnot void: - future.internalValue + future.internalValue + +func value*(future: Future[void]) = + ## Return the value in a completed future - raises Defect when + ## `fut.completed()` is `false`. + ## + ## See `read` for a version that raises an catchable error when future + ## has not completed. + when chronosStrictFutureAccess: + if not future.completed(): + raise (ref FutureDefect)( + msg: "Future not completed while accessing value", + cause: future) func error*(future: FutureBase): ref CatchableError = ## Return the error of `future`, or `nil` if future did not fail. diff --git a/tests/testfut.nim b/tests/testfut.nim index af92354b5..a9fba0539 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1237,12 +1237,14 @@ suite "Future[T] behavior test suite": fut2.complete() # LINE POSITION 4 fut3.complete() # LINE POSITION 6 + {.push warning[Deprecated]: off.} # testing backwards compatibility interface let loc10 = fut1.location[0] let loc11 = fut1.location[1] let loc20 = fut2.location[0] let loc21 = fut2.location[1] let loc30 = fut3.location[0] let loc31 = fut3.location[1] + {.pop.} proc chk(loc: ptr SrcLoc, file: string, line: int, procedure: string): bool = From 60e6fc55bf93895f71816046284d55c1ec42a6ac Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 11 Aug 2023 00:31:47 +0300 Subject: [PATCH 061/146] Fix #431. (#441) --- chronos/asyncloop.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index a644b778c..c6d69fd75 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -1548,7 +1548,7 @@ proc isCounterLeaked*(name: string): bool {.noinit.} = ## number of `closed` requests. let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) let res = getThreadDispatcher().counters.getOrDefault(name, tracker) - res.opened == res.closed + res.opened != res.closed iterator trackerCounters*( loop: PDispatcher From 300fbaaf09cf8cc8d3798daa328d90d015906623 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 4 Sep 2023 21:49:45 +0300 Subject: [PATCH 062/146] HttpAddress errors should be not only critical. (#446) * Distinguish between resolve errors and check errors. * Fix issues and add test for getHttpAddress() procedure. * Address review comments. --- chronos/apps/http/httpclient.nim | 85 ++++++++++++++++++++++++++++++++ chronos/apps/http/httpcommon.nim | 42 ++++++++++++++++ tests/testhttpclient.nim | 83 +++++++++++++++++++++++++++++++ 3 files changed, 210 insertions(+) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 63ffc37b2..b4b32025d 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -195,6 +195,8 @@ type name*: string data*: string + HttpAddressResult* = Result[HttpAddress, HttpAddressErrorType] + # HttpClientRequestRef valid states are: # Ready -> Open -> (Finished, Error) -> (Closing, Closed) # @@ -298,6 +300,89 @@ proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [] .} = res.incl(TLSFlags.NoVerifyServerName) res +proc getHttpAddress*( + url: Uri, + flags: HttpClientFlags = {} + ): HttpAddressResult {.raises: [].} = + let + scheme = + if len(url.scheme) == 0: + HttpClientScheme.NonSecure + else: + case toLowerAscii(url.scheme) + of "http": + HttpClientScheme.NonSecure + of "https": + HttpClientScheme.Secure + else: + return err(HttpAddressErrorType.InvalidUrlScheme) + port = + if len(url.port) == 0: + case scheme + of HttpClientScheme.NonSecure: + 80'u16 + of HttpClientScheme.Secure: + 443'u16 + else: + Base10.decode(uint16, url.port).valueOr: + return err(HttpAddressErrorType.InvalidPortNumber) + hostname = + block: + if len(url.hostname) == 0: + return err(HttpAddressErrorType.MissingHostname) + url.hostname + id = hostname & ":" & Base10.toString(port) + addresses = + if (HttpClientFlag.NoInet4Resolution in flags) and + (HttpClientFlag.NoInet6Resolution in flags): + # DNS resolution is disabled. + try: + @[initTAddress(hostname, Port(port))] + except TransportAddressError: + return err(HttpAddressErrorType.InvalidIpHostname) + else: + try: + if (HttpClientFlag.NoInet4Resolution notin flags) and + (HttpClientFlag.NoInet6Resolution notin flags): + # DNS resolution for both IPv4 and IPv6 addresses. + resolveTAddress(hostname, Port(port)) + else: + if HttpClientFlag.NoInet6Resolution in flags: + # DNS resolution only for IPv4 addresses. + resolveTAddress(hostname, Port(port), AddressFamily.IPv4) + else: + # DNS resolution only for IPv6 addresses + resolveTAddress(hostname, Port(port), AddressFamily.IPv6) + except TransportAddressError: + return err(HttpAddressErrorType.NameLookupFailed) + + if len(addresses) == 0: + return err(HttpAddressErrorType.NoAddressResolved) + + ok(HttpAddress(id: id, scheme: scheme, hostname: hostname, port: port, + path: url.path, query: url.query, anchor: url.anchor, + username: url.username, password: url.password, + addresses: addresses)) + +proc getHttpAddress*( + url: string, + flags: HttpClientFlags = {} + ): HttpAddressResult {.raises: [].} = + getHttpAddress(parseUri(url), flags) + +proc getHttpAddress*( + session: HttpSessionRef, + url: Uri + ): HttpAddressResult {.raises: [].} = + getHttpAddress(url, session.flags) + +proc getHttpAddress*( + session: HttpSessionRef, + url: string + ): HttpAddressResult {.raises: [].} = + ## Create new HTTP address using URL string ``url`` and . + getHttpAddress(parseUri(url), session.flags) + proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {. raises: [] .} = let scheme = diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index 5a4a628c0..c01c1c3cf 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -82,6 +82,48 @@ type HttpState* {.pure.} = enum Alive, Closing, Closed + HttpAddressErrorType* {.pure.} = enum + InvalidUrlScheme, + InvalidPortNumber, + MissingHostname, + InvalidIpHostname, + NameLookupFailed, + NoAddressResolved + +const + CriticalHttpAddressError* = { + HttpAddressErrorType.InvalidUrlScheme, + HttpAddressErrorType.InvalidPortNumber, + HttpAddressErrorType.MissingHostname, + HttpAddressErrorType.InvalidIpHostname + } + + RecoverableHttpAddressError* = { + HttpAddressErrorType.NameLookupFailed, + HttpAddressErrorType.NoAddressResolved + } + +func isCriticalError*(error: HttpAddressErrorType): bool = + error in CriticalHttpAddressError + +func isRecoverableError*(error: HttpAddressErrorType): bool = + error in RecoverableHttpAddressError + +func toString*(error: HttpAddressErrorType): string = + case error + of HttpAddressErrorType.InvalidUrlScheme: + "URL scheme not supported" + of HttpAddressErrorType.InvalidPortNumber: + "Invalid URL port number" + of HttpAddressErrorType.MissingHostname: + "Missing URL hostname" + of HttpAddressErrorType.InvalidIpHostname: + "Invalid IPv4/IPv6 address in hostname" + of HttpAddressErrorType.NameLookupFailed: + "Could not resolve remote address" + of HttpAddressErrorType.NoAddressResolved: + "No address has been resolved" + proc raiseHttpCriticalError*(msg: string, code = Http400) {.noinline, noreturn.} = raise (ref HttpCriticalError)(code: code, msg: msg) diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 1eacc2155..4daaf87aa 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -1262,5 +1262,88 @@ suite "HTTP client testing suite": test "HTTP client server-sent events test": check waitFor(testServerSentEvents(false)) == true + test "HTTP getHttpAddress() test": + block: + # HTTP client supports only `http` and `https` schemes in URL. + let res = getHttpAddress("ftp://ftp.scene.org") + check: + res.isErr() + res.error == HttpAddressErrorType.InvalidUrlScheme + res.error.isCriticalError() + block: + # HTTP URL default ports and custom ports test + let + res1 = getHttpAddress("http://www.google.com") + res2 = getHttpAddress("https://www.google.com") + res3 = getHttpAddress("http://www.google.com:35000") + res4 = getHttpAddress("https://www.google.com:25000") + check: + res1.isOk() + res2.isOk() + res3.isOk() + res4.isOk() + res1.get().port == 80 + res2.get().port == 443 + res3.get().port == 35000 + res4.get().port == 25000 + block: + # HTTP URL invalid port values test + let + res1 = getHttpAddress("http://www.google.com:-80") + res2 = getHttpAddress("http://www.google.com:0") + res3 = getHttpAddress("http://www.google.com:65536") + res4 = getHttpAddress("http://www.google.com:65537") + res5 = getHttpAddress("https://www.google.com:-443") + res6 = getHttpAddress("https://www.google.com:0") + res7 = getHttpAddress("https://www.google.com:65536") + res8 = getHttpAddress("https://www.google.com:65537") + check: + res1.isErr() and res1.error == HttpAddressErrorType.InvalidPortNumber + res1.error.isCriticalError() + res2.isOk() + res2.get().port == 0 + res3.isErr() and res3.error == HttpAddressErrorType.InvalidPortNumber + res3.error.isCriticalError() + res4.isErr() and res4.error == HttpAddressErrorType.InvalidPortNumber + res4.error.isCriticalError() + res5.isErr() and res5.error == HttpAddressErrorType.InvalidPortNumber + res5.error.isCriticalError() + res6.isOk() + res6.get().port == 0 + res7.isErr() and res7.error == HttpAddressErrorType.InvalidPortNumber + res7.error.isCriticalError() + res8.isErr() and res8.error == HttpAddressErrorType.InvalidPortNumber + res8.error.isCriticalError() + block: + # HTTP URL missing hostname + let + res1 = getHttpAddress("http://") + res2 = getHttpAddress("https://") + check: + res1.isErr() and res1.error == HttpAddressErrorType.MissingHostname + res1.error.isCriticalError() + res2.isErr() and res2.error == HttpAddressErrorType.MissingHostname + res2.error.isCriticalError() + block: + # No resolution flags and incorrect URL + let + flags = {HttpClientFlag.NoInet4Resolution, + HttpClientFlag.NoInet6Resolution} + res1 = getHttpAddress("http://256.256.256.256", flags) + res2 = getHttpAddress( + "http://[FFFFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]", flags) + check: + res1.isErr() and res1.error == HttpAddressErrorType.InvalidIpHostname + res1.error.isCriticalError() + res2.isErr() and res2.error == HttpAddressErrorType.InvalidIpHostname + res2.error.isCriticalError() + block: + # Resolution of non-existent hostname + let res = getHttpAddress("http://eYr6bdBo.com") + check: + res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed + res.error.isRecoverableError() + not(res.error.isCriticalError()) + test "Leaks test": checkLeaks() From e706167a532cbb0ba4346e3dde7fd3f7f5c16f4f Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 5 Sep 2023 12:41:52 +0200 Subject: [PATCH 063/146] add connect cancellation test (#444) --- chronos/unittest2/asynctests.nim | 6 +- tests/testserver.nim | 105 ++++++++++++++++++------------- 2 files changed, 64 insertions(+), 47 deletions(-) diff --git a/chronos/unittest2/asynctests.nim b/chronos/unittest2/asynctests.nim index bc703b7e9..758e0a6b0 100644 --- a/chronos/unittest2/asynctests.nim +++ b/chronos/unittest2/asynctests.nim @@ -21,9 +21,9 @@ template asyncTest*(name: string, body: untyped): untyped = template checkLeaks*(name: string): untyped = let counter = getTrackerCounter(name) - if counter.opened != counter.closed: - echo "[" & name & "] opened = ", counter.opened, - ", closed = ", counter.closed + checkpoint: + "[" & name & "] opened = " & $counter.opened & + ", closed = " & $ counter.closed check counter.opened == counter.closed template checkLeaks*(): untyped = diff --git a/tests/testserver.nim b/tests/testserver.nim index e7e834e2d..a63c9df70 100644 --- a/tests/testserver.nim +++ b/tests/testserver.nim @@ -5,8 +5,8 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import unittest2 -import ../chronos + +import ../chronos/unittest2/asynctests {.used.} @@ -23,6 +23,9 @@ suite "Server's test suite": CustomData = ref object test: string + teardown: + checkLeaks() + proc serveStreamClient(server: StreamServer, transp: StreamTransport) {.async.} = discard @@ -54,37 +57,47 @@ suite "Server's test suite": transp.test = "CUSTOM" result = cast[StreamTransport](transp) - proc test1(): bool = + asyncTest "Stream Server start/stop test": var ta = initTAddress("127.0.0.1:31354") var server1 = createStreamServer(ta, serveStreamClient, {ReuseAddr}) server1.start() server1.stop() server1.close() - waitFor server1.join() + await server1.join() + var server2 = createStreamServer(ta, serveStreamClient, {ReuseAddr}) server2.start() server2.stop() server2.close() - waitFor server2.join() - result = true + await server2.join() - proc test5(): bool = - var ta = initTAddress("127.0.0.1:31354") + asyncTest "Stream Server stop without start test": + var ta = initTAddress("127.0.0.1:0") var server1 = createStreamServer(ta, serveStreamClient, {ReuseAddr}) + ta = server1.localAddress() server1.stop() server1.close() - waitFor server1.join() + + await server1.join() var server2 = createStreamServer(ta, serveStreamClient, {ReuseAddr}) server2.stop() server2.close() - waitFor server2.join() - result = true + await server2.join() + + asyncTest "Stream Server inherited object test": + var server = CustomServer() + server.test1 = "TEST" + var ta = initTAddress("127.0.0.1:0") + var pserver = createStreamServer(ta, serveCustomStreamClient, {ReuseAddr}, + child = server, + init = customServerTransport) + check: + pserver == server - proc client1(server: CustomServer, ta: TransportAddress) {.async.} = var transp = CustomTransport() transp.test = "CLIENT" server.start() - var ptransp = await connect(ta, child = transp) + var ptransp = await connect(server.localAddress(), child = transp) var etransp = cast[CustomTransport](ptransp) doAssert(etransp.test == "CLIENT") var msg = "TEST\r\n" @@ -96,44 +109,48 @@ suite "Server's test suite": server.close() await server.join() - proc client2(server: StreamServer, - ta: TransportAddress): Future[bool] {.async.} = + check: + server.test1 == "CONNECTION" + server.test2 == "CUSTOM" + + asyncTest "StreamServer[T] test": + var co = CustomData() + co.test = "CUSTOMDATA" + var ta = initTAddress("127.0.0.1:0") + var server = createStreamServer(ta, serveUdataStreamClient, {ReuseAddr}, + udata = co) + server.start() - var transp = await connect(ta) + var transp = await connect(server.localAddress()) var msg = "TEST\r\n" discard await transp.write(msg) var line = await transp.readLine() - result = (line == "TESTCUSTOMDATA") + check: + line == "TESTCUSTOMDATA" transp.close() server.stop() server.close() await server.join() - proc test3(): bool = - var server = CustomServer() - server.test1 = "TEST" - var ta = initTAddress("127.0.0.1:31354") - var pserver = createStreamServer(ta, serveCustomStreamClient, {ReuseAddr}, - child = cast[StreamServer](server), - init = customServerTransport) - doAssert(not isNil(pserver)) - waitFor client1(server, ta) - result = (server.test1 == "CONNECTION") and (server.test2 == "CUSTOM") - - proc test4(): bool = - var co = CustomData() - co.test = "CUSTOMDATA" - var ta = initTAddress("127.0.0.1:31354") - var server = createStreamServer(ta, serveUdataStreamClient, {ReuseAddr}, - udata = co) - result = waitFor client2(server, ta) - + asyncTest "Backlog and connect cancellation": + var ta = initTAddress("127.0.0.1:0") + var server1 = createStreamServer(ta, serveStreamClient, {ReuseAddr}, backlog = 1) + ta = server1.localAddress() + + var clients: seq[Future[StreamTransport]] + for i in 0..<10: + clients.add(connect(server1.localAddress)) + + # Check for leaks in cancellation / connect when server is not accepting + for c in clients: + if not c.finished: + await c.cancelAndWait() + else: + # The backlog connection "should" end up here + try: + await c.read().closeWait() + except CatchableError: + discard - test "Stream Server start/stop test": - check test1() == true - test "Stream Server stop without start test": - check test5() == true - test "Stream Server inherited object test": - check test3() == true - test "StreamServer[T] test": - check test4() == true + server1.close() + await server1.join() From db6410f835c51676f78002a9aa786630e16fbb08 Mon Sep 17 00:00:00 2001 From: cheatfate Date: Tue, 5 Sep 2023 13:48:09 +0300 Subject: [PATCH 064/146] Fix CI badge status. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c0cc2309f..3772c125f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Chronos - An efficient library for asynchronous programming -[![Github action](https://github.com/status-im/nim-chronos/workflows/nim-chronos%20CI/badge.svg)](https://github.com/status-im/nim-chronos/actions/workflows/ci.yml) +[![Github action](https://github.com/status-im/nim-chronos/workflows/CI/badge.svg)](https://github.com/status-im/nim-chronos/actions/workflows/ci.yml) [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) ![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg) From 00614476c68f0553432b4bb505e24d6ad5586ae4 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 7 Sep 2023 16:25:25 +0300 Subject: [PATCH 065/146] Address issue #443. (#447) * Address issue #443. * Address review comments. --- chronos/apps/http/httpclient.nim | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index b4b32025d..1815d2876 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -13,7 +13,7 @@ import ../../streams/[asyncstream, tlsstream, chunkstream, boundstream] import httptable, httpcommon, httpagent, httpbodyrw, multipart export results, asyncloop, asyncsync, asyncstream, tlsstream, chunkstream, boundstream, httptable, httpcommon, httpagent, httpbodyrw, multipart, - httputils + httputils, uri export SocketFlags const @@ -1420,8 +1420,13 @@ proc redirect*(request: HttpClientRequestRef, if redirectCount > request.session.maxRedirections: err("Maximum number of redirects exceeded") else: + let headers = + block: + var res = request.headers + res.set(HostHeader, ha.hostname) + res var res = HttpClientRequestRef.new(request.session, ha, request.meth, - request.version, request.flags, request.headers.toList(), request.buffer) + request.version, request.flags, headers.toList(), request.buffer) res.redirectCount = redirectCount ok(res) @@ -1438,8 +1443,14 @@ proc redirect*(request: HttpClientRequestRef, err("Maximum number of redirects exceeded") else: let address = ? request.session.redirect(request.address, uri) + # Update Host header to redirected URL hostname + let headers = + block: + var res = request.headers + res.set(HostHeader, address.hostname) + res var res = HttpClientRequestRef.new(request.session, address, request.meth, - request.version, request.flags, request.headers.toList(), request.buffer) + request.version, request.flags, headers.toList(), request.buffer) res.redirectCount = redirectCount ok(res) From 2e8551b0d973cfbebfab3be7f3329e11b9049007 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 15 Sep 2023 19:38:39 +0300 Subject: [PATCH 066/146] Cancellation fixes and tests. (#445) * Add callTick and stream cancellation tests. * Fix stepsAsync() test. * Cancellation changes. * Update and add more cancellation tests. * Fix Posix shutdown call to handle ENOTCONN error. * With new changes to to cancellation its now possible. * Refactor testsoon.nim to not produce artifacts after tests are finished. * Debugging MacOS issue. * Adjust flaky test times. * Fix issue. * Add test for issue #334 which was also addressed in this PR. Avoid `break` in problematic test. * Add noCancelWait() call which prohibits cancellation. Fix closeWait() calls to use noCancelWait() predicate. Adding sleep to flaky MacOS test. * Remove all debugging echoes. * Fix cancelAndWait() which now could perform multiple attempts to cancel target Future (mustCancel behavior). * Fix issues revealed by switch to different cancelAndWait(). * Address review comments. * Fix testutils compilation warning. * Rename callTick() to internalCallTick(). * Add some documentation comments. * Disable flaky ratelimit test. * Rename noCancelWait() to noCancel(). Address review comments. --- chronos/apps/http/httpbodyrw.nim | 6 +- chronos/apps/http/httpclient.nim | 31 +- chronos/apps/http/httpserver.nim | 18 +- chronos/apps/http/shttpserver.nim | 17 +- chronos/asyncfutures2.nim | 201 ++++++++---- chronos/asyncloop.nim | 171 +++++----- chronos/asyncmacro2.nim | 7 - chronos/asyncproc.nim | 2 +- chronos/asyncsync.nim | 12 +- chronos/futures.nim | 41 ++- chronos/ratelimit.nim | 4 +- chronos/streams/asyncstream.nim | 30 +- chronos/streams/tlsstream.nim | 18 +- chronos/transports/datagram.nim | 22 +- chronos/transports/stream.nim | 32 +- tests/testbugs.nim | 2 +- tests/testfut.nim | 496 +++++++++++++++++++++++++++++- tests/testhttpclient.nim | 113 +++++++ tests/testhttpserver.nim | 101 +++--- tests/testratelimit.nim | 19 +- tests/testsoon.nim | 140 +++++---- tests/teststream.nim | 95 +++++- tests/testsync.nim | 4 +- tests/testtime.nim | 24 +- tests/testutils.nim | 2 +- 25 files changed, 1241 insertions(+), 367 deletions(-) diff --git a/chronos/apps/http/httpbodyrw.nim b/chronos/apps/http/httpbodyrw.nim index b948fbd3e..bb28ea643 100644 --- a/chronos/apps/http/httpbodyrw.nim +++ b/chronos/apps/http/httpbodyrw.nim @@ -45,8 +45,8 @@ proc closeWait*(bstream: HttpBodyReader) {.async.} = # data from stream at position [1]. for index in countdown((len(bstream.streams) - 1), 0): res.add(bstream.streams[index].closeWait()) - await allFutures(res) - await procCall(closeWait(AsyncStreamReader(bstream))) + res.add(procCall(closeWait(AsyncStreamReader(bstream)))) + await noCancel(allFutures(res)) bstream.bstate = HttpState.Closed untrackCounter(HttpBodyReaderTrackerName) @@ -68,7 +68,7 @@ proc closeWait*(bstream: HttpBodyWriter) {.async.} = var res = newSeq[Future[void]]() for index in countdown(len(bstream.streams) - 1, 0): res.add(bstream.streams[index].closeWait()) - await allFutures(res) + await noCancel(allFutures(res)) await procCall(closeWait(AsyncStreamWriter(bstream))) bstream.bstate = HttpState.Closed untrackCounter(HttpBodyWriterTrackerName) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 1815d2876..01e2bab12 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -600,14 +600,12 @@ proc closeWait(conn: HttpClientConnectionRef) {.async.} = res.add(conn.reader.closeWait()) if not(isNil(conn.writer)) and not(conn.writer.closed()): res.add(conn.writer.closeWait()) + if conn.kind == HttpClientScheme.Secure: + res.add(conn.treader.closeWait()) + res.add(conn.twriter.closeWait()) + res.add(conn.transp.closeWait()) res - if len(pending) > 0: await allFutures(pending) - case conn.kind - of HttpClientScheme.Secure: - await allFutures(conn.treader.closeWait(), conn.twriter.closeWait()) - of HttpClientScheme.NonSecure: - discard - await conn.transp.closeWait() + if len(pending) > 0: await noCancel(allFutures(pending)) conn.state = HttpClientConnectionState.Closed untrackCounter(HttpClientConnectionTrackerName) @@ -631,8 +629,7 @@ proc connect(session: HttpSessionRef, let conn = block: let res = HttpClientConnectionRef.new(session, ha, transp) - case res.kind - of HttpClientScheme.Secure: + if res.kind == HttpClientScheme.Secure: try: await res.tls.handshake() res.state = HttpClientConnectionState.Ready @@ -647,7 +644,7 @@ proc connect(session: HttpSessionRef, await res.closeWait() res.state = HttpClientConnectionState.Error lastError = $exc.msg - of HttpClientScheme.Nonsecure: + else: res.state = HttpClientConnectionState.Ready res if conn.state == HttpClientConnectionState.Ready: @@ -785,7 +782,7 @@ proc closeWait*(session: HttpSessionRef) {.async.} = for connections in session.connections.values(): for conn in connections: pending.add(closeWait(conn)) - await allFutures(pending) + await noCancel(allFutures(pending)) proc sessionWatcher(session: HttpSessionRef) {.async.} = while true: @@ -830,26 +827,30 @@ proc sessionWatcher(session: HttpSessionRef) {.async.} = break proc closeWait*(request: HttpClientRequestRef) {.async.} = + var pending: seq[FutureBase] if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: request.state = HttpReqRespState.Closing if not(isNil(request.writer)): if not(request.writer.closed()): - await request.writer.closeWait() + pending.add(FutureBase(request.writer.closeWait())) request.writer = nil - await request.releaseConnection() + pending.add(FutureBase(request.releaseConnection())) + await noCancel(allFutures(pending)) request.session = nil request.error = nil request.state = HttpReqRespState.Closed untrackCounter(HttpClientRequestTrackerName) proc closeWait*(response: HttpClientResponseRef) {.async.} = + var pending: seq[FutureBase] if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: response.state = HttpReqRespState.Closing if not(isNil(response.reader)): if not(response.reader.closed()): - await response.reader.closeWait() + pending.add(FutureBase(response.reader.closeWait())) response.reader = nil - await response.releaseConnection() + pending.add(FutureBase(response.releaseConnection())) + await noCancel(allFutures(pending)) response.session = nil response.error = nil response.state = HttpReqRespState.Closed diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index eafa27c66..f0788e2ea 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -809,10 +809,7 @@ proc closeUnsecureConnection(conn: HttpConnectionRef) {.async.} = pending.add(conn.mainReader.closeWait()) pending.add(conn.mainWriter.closeWait()) pending.add(conn.transp.closeWait()) - try: - await allFutures(pending) - except CancelledError: - await allFutures(pending) + await noCancel(allFutures(pending)) untrackCounter(HttpServerUnsecureConnectionTrackerName) reset(conn[]) conn.state = HttpState.Closed @@ -829,7 +826,7 @@ proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef, res proc gracefulCloseWait*(conn: HttpConnectionRef) {.async.} = - await conn.transp.shutdownWait() + await noCancel(conn.transp.shutdownWait()) await conn.closeCb(conn) proc closeWait*(conn: HttpConnectionRef): Future[void] = @@ -841,11 +838,7 @@ proc closeWait*(req: HttpRequestRef) {.async.} = req.state = HttpState.Closing let resp = req.response.get() if (HttpResponseFlags.Stream in resp.flags) and not(isNil(resp.writer)): - var writer = resp.writer.closeWait() - try: - await writer - except CancelledError: - await writer + await closeWait(resp.writer) reset(resp[]) untrackCounter(HttpServerRequestTrackerName) reset(req[]) @@ -1038,7 +1031,6 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async.} = except CatchableError as exc: raiseAssert "Unexpected error [" & $exc.name & "] happens: " & $exc.msg - server.connections.del(connectionId) case runLoop of HttpProcessExitType.KeepAlive: await connection.closeWait() @@ -1047,6 +1039,8 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async.} = of HttpProcessExitType.Graceful: await connection.gracefulCloseWait() + server.connections.del(connectionId) + proc acceptClientLoop(server: HttpServerRef) {.async.} = var runLoop = true while runLoop: @@ -1102,7 +1096,7 @@ proc drop*(server: HttpServerRef) {.async.} = for holder in server.connections.values(): if not(isNil(holder.future)) and not(holder.future.finished()): pending.add(holder.future.cancelAndWait()) - await allFutures(pending) + await noCancel(allFutures(pending)) server.connections.clear() proc closeWait*(server: HttpServerRef) {.async.} = diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index bc5c3fbeb..6d321a02d 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -30,19 +30,10 @@ proc closeSecConnection(conn: HttpConnectionRef) {.async.} = var pending: seq[Future[void]] pending.add(conn.writer.closeWait()) pending.add(conn.reader.closeWait()) - try: - await allFutures(pending) - except CancelledError: - await allFutures(pending) - # After we going to close everything else. - pending.setLen(3) - pending[0] = conn.mainReader.closeWait() - pending[1] = conn.mainWriter.closeWait() - pending[2] = conn.transp.closeWait() - try: - await allFutures(pending) - except CancelledError: - await allFutures(pending) + pending.add(conn.mainReader.closeWait()) + pending.add(conn.mainWriter.closeWait()) + pending.add(conn.transp.closeWait()) + await noCancel(allFutures(pending)) reset(cast[SecureHttpConnectionRef](conn)[]) untrackCounter(HttpServerSecureConnectionTrackerName) conn.state = HttpState.Closed diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index d3954bab1..ee6e8e0d1 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -12,6 +12,7 @@ import std/sequtils import stew/base10 when chronosStackTrace: + import std/strutils when defined(nimHasStacktracesModule): import system/stacktraces else: @@ -26,7 +27,8 @@ template LocFinishIndex*: auto {.deprecated: "LocationKind.Finish".} = template LocCompleteIndex*: untyped {.deprecated: "LocationKind.Finish".} = LocationKind.Finish -func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {.deprecated: "use LocationKind".} = +func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {. + deprecated: "use LocationKind".} = case v of 0: loc[LocationKind.Create] of 1: loc[LocationKind.Finish] @@ -43,29 +45,37 @@ type # Backwards compatibility for old FutureState name template Finished* {.deprecated: "Use Completed instead".} = Completed -template Finished*(T: type FutureState): FutureState {.deprecated: "Use FutureState.Completed instead".} = FutureState.Completed +template Finished*(T: type FutureState): FutureState {. + deprecated: "Use FutureState.Completed instead".} = + FutureState.Completed proc newFutureImpl[T](loc: ptr SrcLoc): Future[T] = let fut = Future[T]() - internalInitFutureBase(fut, loc, FutureState.Pending) + internalInitFutureBase(fut, loc, FutureState.Pending, {}) + fut + +proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] = + let fut = Future[T]() + internalInitFutureBase(fut, loc, FutureState.Pending, flags) fut proc newFutureSeqImpl[A, B](loc: ptr SrcLoc): FutureSeq[A, B] = let fut = FutureSeq[A, B]() - internalInitFutureBase(fut, loc, FutureState.Pending) + internalInitFutureBase(fut, loc, FutureState.Pending, {}) fut proc newFutureStrImpl[T](loc: ptr SrcLoc): FutureStr[T] = let fut = FutureStr[T]() - internalInitFutureBase(fut, loc, FutureState.Pending) + internalInitFutureBase(fut, loc, FutureState.Pending, {}) fut -template newFuture*[T](fromProc: static[string] = ""): Future[T] = +template newFuture*[T](fromProc: static[string] = "", + flags: static[FutureFlags] = {}): Future[T] = ## Creates a new future. ## ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. - newFutureImpl[T](getSrcLocation(fromProc)) + newFutureImpl[T](getSrcLocation(fromProc), flags) template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] = ## Create a new future which can hold/preserve GC sequence until future will @@ -132,8 +142,6 @@ proc finish(fut: FutureBase, state: FutureState) = # 1. `finish()` is a private procedure and `state` is under our control. # 2. `fut.state` is checked by `checkFinished()`. fut.internalState = state - when chronosStrictFutureAccess: - doAssert fut.internalCancelcb == nil or state != FutureState.Cancelled fut.internalCancelcb = nil # release cancellation callback memory for item in fut.internalCallbacks.mitems(): if not(isNil(item.function)): @@ -194,21 +202,23 @@ proc cancelAndSchedule(future: FutureBase, loc: ptr SrcLoc) = template cancelAndSchedule*(future: FutureBase) = cancelAndSchedule(future, getSrcLocation()) -proc cancel(future: FutureBase, loc: ptr SrcLoc): bool = - ## Request that Future ``future`` cancel itself. +proc tryCancel(future: FutureBase, loc: ptr SrcLoc): bool = + ## Perform an attempt to cancel ``future``. ## - ## This arranges for a `CancelledError` to be thrown into procedure which - ## waits for ``future`` on the next cycle through the event loop. - ## The procedure then has a chance to clean up or even deny the request - ## using `try/except/finally`. + ## NOTE: This procedure does not guarantee that cancellation will actually + ## happened. ## - ## This call do not guarantee that the ``future`` will be cancelled: the - ## exception might be caught and acted upon, delaying cancellation of the - ## ``future`` or preventing cancellation completely. The ``future`` may also - ## return value or raise different exception. + ## Cancellation is the process which starts from the last ``future`` + ## descendent and moves step by step to the parent ``future``. To initiate + ## this process procedure iterates through all non-finished ``future`` + ## descendents and tries to find the last one. If last descendent is still + ## pending it will become cancelled and process will be initiated. In such + ## case this procedure returns ``true``. ## - ## Immediately after this procedure is called, ``future.cancelled()`` will - ## not return ``true`` (unless the Future was already cancelled). + ## If last descendent future is not pending, this procedure will be unable to + ## initiate cancellation process and so it returns ``false``. + if future.cancelled(): + return true if future.finished(): return false @@ -217,23 +227,18 @@ proc cancel(future: FutureBase, loc: ptr SrcLoc): bool = # mechanism and/or use a regular `addCallback` when chronosStrictFutureAccess: doAssert future.internalCancelcb.isNil, - "futures returned from `{.async.}` functions must not use `cancelCallback`" - - if cancel(future.internalChild, getSrcLocation()): - return true - + "futures returned from `{.async.}` functions must not use " & + "`cancelCallback`" + tryCancel(future.internalChild, loc) else: if not(isNil(future.internalCancelcb)): future.internalCancelcb(cast[pointer](future)) - future.internalCancelcb = nil - cancelAndSchedule(future, getSrcLocation()) - - future.internalMustCancel = true - return true + if FutureFlag.OwnCancelSchedule notin future.internalFlags: + cancelAndSchedule(future, loc) + future.cancelled() -template cancel*(future: FutureBase) = - ## Cancel ``future``. - discard cancel(future, getSrcLocation()) +template tryCancel*(future: FutureBase): bool = + tryCancel(future, getSrcLocation()) proc clearCallbacks(future: FutureBase) = future.internalCallbacks = default(seq[AsyncCallback]) @@ -778,27 +783,117 @@ proc oneValue*[T](futs: varargs[Future[T]]): Future[T] {. return retFuture -proc cancelAndWait*(fut: FutureBase): Future[void] = - ## Initiate cancellation process for Future ``fut`` and wait until ``fut`` is - ## done e.g. changes its state (become completed, failed or cancelled). +proc cancelSoon(future: FutureBase, aftercb: CallbackFunc, udata: pointer, + loc: ptr SrcLoc) = + ## Perform cancellation ``future`` and call ``aftercb`` callback when + ## ``future`` become finished (completed with value, failed or cancelled). ## - ## If ``fut`` is already finished (completed, failed or cancelled) result - ## Future[void] object will be returned complete. - var retFuture = newFuture[void]("chronos.cancelAndWait(T)") - proc continuation(udata: pointer) = - if not(retFuture.finished()): - retFuture.complete() - proc cancellation(udata: pointer) = - if not(fut.finished()): - fut.removeCallback(continuation) - if fut.finished(): + ## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees + ## that ``future``will be finished (completed with value, failed or cancelled) + ## as quickly as possible. + proc checktick(udata: pointer) {.gcsafe.} = + # We trying to cancel Future on more time, and if `cancel()` succeeds we + # return early. + if tryCancel(future, loc): + return + # Cancellation signal was not delivered, so we trying to deliver it one + # more time after one tick. But we need to check situation when child + # future was finished but our completion callback is not yet invoked. + if not(future.finished()): + internalCallTick(checktick) + + proc continuation(udata: pointer) {.gcsafe.} = + # We do not use `callSoon` here because we was just scheduled from `poll()`. + if not(isNil(aftercb)): + aftercb(udata) + + if future.finished(): + # We could not schedule callback directly otherwise we could fall into + # recursion problem. + if not(isNil(aftercb)): + let loop = getThreadDispatcher() + loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: udata)) + return + + future.addCallback(continuation) + # Initiate cancellation process. + if not(tryCancel(future, loc)): + # Cancellation signal was not delivered, so we trying to deliver it one + # more time after async tick. But we need to check case, when future was + # finished but our completion callback is not yet invoked. + if not(future.finished()): + internalCallTick(checktick) + +template cancelSoon*(fut: FutureBase, cb: CallbackFunc, udata: pointer) = + cancelSoon(fut, cb, udata, getSrcLocation()) + +template cancelSoon*(fut: FutureBase, cb: CallbackFunc) = + cancelSoon(fut, cb, nil, getSrcLocation()) + +template cancelSoon*(fut: FutureBase, acb: AsyncCallback) = + cancelSoon(fut, acb.function, acb.udata, getSrcLocation()) + +template cancelSoon*(fut: FutureBase) = + cancelSoon(fut, nil, nil, getSrcLocation()) + +template cancel*(future: FutureBase) {. + deprecated: "Please use cancelSoon() or cancelAndWait() instead".} = + ## Cancel ``future``. + cancelSoon(future, nil, nil, getSrcLocation()) + +proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] = + ## Perform cancellation ``future`` return Future which will be completed when + ## ``future`` become finished (completed with value, failed or cancelled). + ## + ## NOTE: Compared to the `tryCancel()` call, this procedure call guarantees + ## that ``future``will be finished (completed with value, failed or cancelled) + ## as quickly as possible. + let retFuture = newFuture[void]("chronos.cancelAndWait(FutureBase)", + {FutureFlag.OwnCancelSchedule}) + + proc continuation(udata: pointer) {.gcsafe.} = + retFuture.complete() + + if future.finished(): retFuture.complete() else: - fut.addCallback(continuation) - retFuture.cancelCallback = cancellation - # Initiate cancellation process. - fut.cancel() - return retFuture + cancelSoon(future, continuation, cast[pointer](retFuture), loc) + + retFuture + +template cancelAndWait*(future: FutureBase): Future[void] = + ## Cancel ``future``. + cancelAndWait(future, getSrcLocation()) + +proc noCancel*[T](future: Future[T]): Future[T] = + ## Prevent cancellation requests from propagating to ``future`` while + ## forwarding its value or error when it finishes. + ## + ## This procedure should be used when you need to perform operations which + ## should not be cancelled at all cost, for example closing sockets, pipes, + ## connections or servers. Usually it become useful in exception or finally + ## blocks. + let retFuture = newFuture[T]("chronos.noCancel(T)", + {FutureFlag.OwnCancelSchedule}) + template completeFuture() = + if future.completed(): + when T is void: + retFuture.complete() + else: + retFuture.complete(future.value) + elif future.failed(): + retFuture.fail(future.error) + else: + raiseAssert("Unexpected future state [" & $future.state & "]") + + proc continuation(udata: pointer) {.gcsafe.} = + completeFuture() + + if future.finished(): + completeFuture() + else: + future.addCallback(continuation) + retFuture proc allFutures*(futs: varargs[FutureBase]): Future[void] = ## Returns a future which will complete only when all futures in ``futs`` @@ -836,7 +931,7 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] = if len(nfuts) == 0 or len(nfuts) == finishedFutures: retFuture.complete() - return retFuture + retFuture proc allFutures*[T](futs: varargs[Future[T]]): Future[void] = ## Returns a future which will complete only when all futures in ``futs`` diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index c6d69fd75..fecec39e8 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -11,7 +11,7 @@ {.push raises: [].} from nativesockets import Port -import std/[tables, strutils, heapqueue, deques] +import std/[tables, heapqueue, deques] import stew/results import "."/[config, futures, osdefs, oserrno, osutils, timer] @@ -179,10 +179,11 @@ type timers*: HeapQueue[TimerCallback] callbacks*: Deque[AsyncCallback] idlers*: Deque[AsyncCallback] + ticks*: Deque[AsyncCallback] trackers*: Table[string, TrackerBase] counters*: Table[string, TrackerCounter] -proc sentinelCallbackImpl(arg: pointer) {.gcsafe.} = +proc sentinelCallbackImpl(arg: pointer) {.gcsafe, noreturn.} = raiseAssert "Sentinel callback MUST not be scheduled" const @@ -254,6 +255,10 @@ template processIdlers(loop: untyped) = if len(loop.idlers) > 0: loop.callbacks.addLast(loop.idlers.popFirst()) +template processTicks(loop: untyped) = + while len(loop.ticks) > 0: + loop.callbacks.addLast(loop.ticks.popFirst()) + template processCallbacks(loop: untyped) = while true: let callable = loop.callbacks.popFirst() # len must be > 0 due to sentinel @@ -417,6 +422,7 @@ when defined(windows): timers: initHeapQueue[TimerCallback](), callbacks: initDeque[AsyncCallback](64), idlers: initDeque[AsyncCallback](), + ticks: initDeque[AsyncCallback](), trackers: initTable[string, TrackerBase](), counters: initTable[string, TrackerCounter]() ) @@ -746,6 +752,9 @@ when defined(windows): if networkEventsCount == 0: loop.processIdlers() + # We move tick callbacks to `loop.callbacks` always. + processTicks(loop) + # All callbacks which will be added during `processCallbacks` will be # scheduled after the sentinel and are processed on next `poll()` call. loop.callbacks.addLast(SentinelCallback) @@ -1138,6 +1147,9 @@ elif defined(macosx) or defined(freebsd) or defined(netbsd) or if count == 0: loop.processIdlers() + # We move tick callbacks to `loop.callbacks` always. + processTicks(loop) + # All callbacks which will be added during `processCallbacks` will be # scheduled after the sentinel and are processed on next `poll()` call. loop.callbacks.addLast(SentinelCallback) @@ -1255,6 +1267,20 @@ proc callIdle*(cbproc: CallbackFunc, data: pointer) = proc callIdle*(cbproc: CallbackFunc) = callIdle(cbproc, nil) +proc internalCallTick*(acb: AsyncCallback) = + ## Schedule ``cbproc`` to be called after all scheduled callbacks, but only + ## when OS system queue finished processing events. + getThreadDispatcher().ticks.addLast(acb) + +proc internalCallTick*(cbproc: CallbackFunc, data: pointer) = + ## Schedule ``cbproc`` to be called after all scheduled callbacks when + ## OS system queue processing is done. + doAssert(not isNil(cbproc)) + internalCallTick(AsyncCallback(function: cbproc, udata: data)) + +proc internalCallTick*(cbproc: CallbackFunc) = + internalCallTick(AsyncCallback(function: cbproc, udata: nil)) + include asyncfutures2 when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows): @@ -1322,30 +1348,24 @@ proc stepsAsync*(number: int): Future[void] = ## ## This primitive can be useful when you need to create more deterministic ## tests and cases. - ## - ## WARNING! Do not use this primitive to perform switch between tasks, because - ## this can lead to 100% CPU load in the moments when there are no I/O - ## events. Usually when there no I/O events CPU consumption should be near 0%. - var retFuture = newFuture[void]("chronos.stepsAsync(int)") - var counter = 0 + doAssert(number > 0, "Number should be positive integer") + var + retFuture = newFuture[void]("chronos.stepsAsync(int)") + counter = 0 + continuation: proc(data: pointer) {.gcsafe, raises: [].} - var continuation: proc(data: pointer) {.gcsafe, raises: [].} continuation = proc(data: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): inc(counter) if counter < number: - callSoon(continuation, nil) + internalCallTick(continuation) else: retFuture.complete() - proc cancellation(udata: pointer) = - discard - if number <= 0: retFuture.complete() else: - retFuture.cancelCallback = cancellation - callSoon(continuation, nil) + internalCallTick(continuation) retFuture @@ -1374,37 +1394,46 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] = ## If ``fut`` completes first the returned future will hold true, ## otherwise, if ``timeout`` milliseconds has elapsed first, the returned ## future will hold false. - var retFuture = newFuture[bool]("chronos.`withTimeout`") - var moment: Moment - var timer: TimerCallback - var cancelling = false + var + retFuture = newFuture[bool]("chronos.withTimeout", + {FutureFlag.OwnCancelSchedule}) + moment: Moment + timer: TimerCallback + timeouted = false + + template completeFuture(fut: untyped): untyped = + if fut.failed() or fut.completed(): + retFuture.complete(true) + else: + retFuture.cancelAndSchedule() # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 proc continuation(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): - if not(cancelling): - if not(fut.finished()): - # Timer exceeded first, we going to cancel `fut` and wait until it - # not completes. - cancelling = true - fut.cancel() - else: - # Future `fut` completed/failed/cancelled first. - if not(isNil(timer)): - clearTimer(timer) - retFuture.complete(true) - else: + if timeouted: retFuture.complete(false) + return + if not(fut.finished()): + # Timer exceeded first, we going to cancel `fut` and wait until it + # not completes. + timeouted = true + fut.cancelSoon() + else: + # Future `fut` completed/failed/cancelled first. + if not(isNil(timer)): + clearTimer(timer) + fut.completeFuture() # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 proc cancellation(udata: pointer) {.gcsafe, raises: [].} = - if not isNil(timer): - clearTimer(timer) if not(fut.finished()): - fut.removeCallback(continuation) - fut.cancel() + if not isNil(timer): + clearTimer(timer) + fut.cancelSoon() + else: + fut.completeFuture() if fut.finished(): retFuture.complete(true) @@ -1420,11 +1449,11 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] = timer = setTimer(moment, continuation, nil) fut.addCallback(continuation) - return retFuture + retFuture proc withTimeout*[T](fut: Future[T], timeout: int): Future[bool] {. inline, deprecated: "Use withTimeout(Future[T], Duration)".} = - result = withTimeout(fut, timeout.milliseconds()) + withTimeout(fut, timeout.milliseconds()) proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = ## Returns a future which will complete once future ``fut`` completes @@ -1435,49 +1464,49 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = ## ## TODO: In case when ``fut`` got cancelled, what result Future[T] ## should return, because it can't be cancelled too. - var retFuture = newFuture[T]("chronos.wait()") - var moment: Moment - var timer: TimerCallback - var cancelling = false + var + retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + moment: Moment + timer: TimerCallback + timeouted = false + + template completeFuture(fut: untyped): untyped = + if fut.failed(): + retFuture.fail(fut.error) + elif fut.cancelled(): + retFuture.cancelAndSchedule() + else: + when T is void: + retFuture.complete() + else: + retFuture.complete(fut.value) proc continuation(udata: pointer) {.raises: [].} = if not(retFuture.finished()): - if not(cancelling): - if not(fut.finished()): - # Timer exceeded first. - cancelling = true - fut.cancel() - else: - # Future `fut` completed/failed/cancelled first. - if not isNil(timer): - clearTimer(timer) - - if fut.failed(): - retFuture.fail(fut.error) - else: - when T is void: - retFuture.complete() - else: - retFuture.complete(fut.value) - else: + if timeouted: retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + return + if not(fut.finished()): + # Timer exceeded first. + timeouted = true + fut.cancelSoon() + else: + # Future `fut` completed/failed/cancelled first. + if not(isNil(timer)): + clearTimer(timer) + fut.completeFuture() var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = - if not isNil(timer): - clearTimer(timer) if not(fut.finished()): - fut.removeCallback(continuation) - fut.cancel() + if not(isNil(timer)): + clearTimer(timer) + fut.cancelSoon() + else: + fut.completeFuture() if fut.finished(): - if fut.failed(): - retFuture.fail(fut.error) - else: - when T is void: - retFuture.complete() - else: - retFuture.complete(fut.value) + fut.completeFuture() else: if timeout.isZero(): retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) @@ -1490,7 +1519,7 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = timer = setTimer(moment, continuation, nil) fut.addCallback(continuation) - return retFuture + retFuture proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. inline, deprecated: "Use wait(Future[T], Duration)".} = diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index 8e7407309..a86147c6e 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -301,11 +301,6 @@ template await*[T](f: Future[T]): untyped = # transformation - `yield` gives control back to `futureContinue` which is # responsible for resuming execution once the yielded future is finished yield chronosInternalRetFuture.internalChild - - # `child` is guaranteed to have been `finished` after the yield - if chronosInternalRetFuture.internalMustCancel: - raise newCancelledError() - # `child` released by `futureContinue` chronosInternalRetFuture.internalChild.internalCheckComplete() when T isnot void: @@ -317,8 +312,6 @@ template awaitne*[T](f: Future[T]): Future[T] = when declared(chronosInternalRetFuture): chronosInternalRetFuture.internalChild = f yield chronosInternalRetFuture.internalChild - if chronosInternalRetFuture.internalMustCancel: - raise newCancelledError() cast[type(f)](chronosInternalRetFuture.internalChild) else: unsupported "awaitne is only available within {.async.}" diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index 8df8e33e5..3e2df88c6 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -1241,7 +1241,7 @@ proc closeWait*(p: AsyncProcessRef) {.async.} = # Here we ignore all possible errrors, because we do not want to raise # exceptions. discard closeProcessHandles(p.pipes, p.options, OSErrorCode(0)) - await p.pipes.closeProcessStreams(p.options) + await noCancel(p.pipes.closeProcessStreams(p.options)) discard p.closeThreadAndProcessHandle() untrackCounter(AsyncProcessTrackerName) diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index 530984682..0feb51e17 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -736,13 +736,19 @@ proc close*(ab: AsyncEventQueue) {.raises: [].} = ab.queue.clear() proc closeWait*(ab: AsyncEventQueue): Future[void] {.raises: [].} = - var retFuture = newFuture[void]("AsyncEventQueue.closeWait()") + let retFuture = newFuture[void]("AsyncEventQueue.closeWait()", + {FutureFlag.OwnCancelSchedule}) proc continuation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - retFuture.complete() + retFuture.complete() + proc cancellation(udata: pointer) {.gcsafe.} = + # We are not going to change the state of `retFuture` to cancelled, so we + # will prevent the entire sequence of Futures from being cancelled. + discard + ab.close() # Schedule `continuation` to be called only after all the `reader` # notifications will be scheduled and processed. + retFuture.cancelCallback = cancellation callSoon(continuation) retFuture diff --git a/chronos/futures.nim b/chronos/futures.nim index 9b2667b62..5f96867e5 100644 --- a/chronos/futures.nim +++ b/chronos/futures.nim @@ -37,6 +37,11 @@ type FutureState* {.pure.} = enum Pending, Completed, Cancelled, Failed + FutureFlag* {.pure.} = enum + OwnCancelSchedule + + FutureFlags* = set[FutureFlag] + InternalFutureBase* = object of RootObj # Internal untyped future representation - the fields are not part of the # public API and neither is `InternalFutureBase`, ie the inheritance @@ -47,8 +52,8 @@ type internalCancelcb*: CallbackFunc internalChild*: FutureBase internalState*: FutureState + internalFlags*: FutureFlags internalError*: ref CatchableError ## Stored exception - internalMustCancel*: bool internalClosure*: iterator(f: FutureBase): FutureBase {.closureIter.} when chronosFutureId: @@ -94,12 +99,11 @@ when chronosFutureTracking: var futureList* {.threadvar.}: FutureList # Internal utilities - these are not part of the stable API -proc internalInitFutureBase*( - fut: FutureBase, - loc: ptr SrcLoc, - state: FutureState) = +proc internalInitFutureBase*(fut: FutureBase, loc: ptr SrcLoc, + state: FutureState, flags: FutureFlags) = fut.internalState = state fut.internalLocation[LocationKind.Create] = loc + fut.internalFlags = flags if state != FutureState.Pending: fut.internalLocation[LocationKind.Finish] = loc @@ -128,21 +132,34 @@ template init*[T](F: type Future[T], fromProc: static[string] = ""): Future[T] = ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. let res = Future[T]() - internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending) + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {}) + res + +template init*[T](F: type Future[T], fromProc: static[string] = "", + flags: static[FutureFlags]): Future[T] = + ## Creates a new pending future. + ## + ## Specifying ``fromProc``, which is a string specifying the name of the proc + ## that this future belongs to, is a good habit as it helps with debugging. + let res = Future[T]() + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, + flags) res template completed*( F: type Future, fromProc: static[string] = ""): Future[void] = ## Create a new completed future - let res = Future[T]() - internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed) + let res = Future[void]() + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed, + {}) res template completed*[T: not void]( F: type Future, valueParam: T, fromProc: static[string] = ""): Future[T] = ## Create a new completed future let res = Future[T](internalValue: valueParam) - internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed) + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Completed, + {}) res template failed*[T]( @@ -150,19 +167,21 @@ template failed*[T]( fromProc: static[string] = ""): Future[T] = ## Create a new failed future let res = Future[T](internalError: errorParam) - internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Failed) + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Failed, {}) when chronosStackTrace: res.internalErrorStackTrace = if getStackTrace(res.error) == "": getStackTrace() else: getStackTrace(res.error) - res func state*(future: FutureBase): FutureState = future.internalState +func flags*(future: FutureBase): FutureFlags = + future.internalFlags + func finished*(future: FutureBase): bool {.inline.} = ## Determines whether ``future`` has finished, i.e. ``future`` state changed ## from state ``Pending`` to one of the states (``Finished``, ``Cancelled``, diff --git a/chronos/ratelimit.nim b/chronos/ratelimit.nim index 4147db788..ad66c067e 100644 --- a/chronos/ratelimit.nim +++ b/chronos/ratelimit.nim @@ -88,8 +88,8 @@ proc worker(bucket: TokenBucket) {.async.} = #buckets sleeper = sleepAsync(milliseconds(timeToTarget)) await sleeper or eventWaiter - sleeper.cancel() - eventWaiter.cancel() + sleeper.cancelSoon() + eventWaiter.cancelSoon() else: await eventWaiter diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 191b36aea..4698e8358 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -913,7 +913,7 @@ proc close*(rw: AsyncStreamRW) = callSoon(continuation) else: rw.future.addCallback(continuation) - rw.future.cancel() + rw.future.cancelSoon() elif rw is AsyncStreamWriter: if isNil(rw.wsource) or isNil(rw.writerLoop) or isNil(rw.future): callSoon(continuation) @@ -922,12 +922,36 @@ proc close*(rw: AsyncStreamRW) = callSoon(continuation) else: rw.future.addCallback(continuation) - rw.future.cancel() + rw.future.cancelSoon() proc closeWait*(rw: AsyncStreamRW): Future[void] = ## Close and frees resources of stream ``rw``. + const FutureName = + when rw is AsyncStreamReader: + "async.stream.reader.closeWait" + else: + "async.stream.writer.closeWait" + + if rw.closed(): + return Future.completed(FutureName) + + let retFuture = newFuture[void](FutureName, {FutureFlag.OwnCancelSchedule}) + + proc continuation(udata: pointer) {.gcsafe, raises:[].} = + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe, raises:[].} = + # We are not going to change the state of `retFuture` to cancelled, so we + # will prevent the entire sequence of Futures from being cancelled. + discard + rw.close() - rw.join() + if rw.future.finished(): + retFuture.complete() + else: + rw.future.addCallback(continuation, cast[pointer](retFuture)) + retFuture.cancelCallback = cancellation + retFuture proc startReader(rstream: AsyncStreamReader) = rstream.state = Running diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 2999f7af6..6432a10d4 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -267,19 +267,15 @@ template readAndReset(fut: untyped) = break proc cancelAndWait*(a, b, c, d: Future[TLSResult]): Future[void] = - var waiting: seq[Future[TLSResult]] + var waiting: seq[FutureBase] if not(isNil(a)) and not(a.finished()): - a.cancel() - waiting.add(a) + waiting.add(a.cancelAndWait()) if not(isNil(b)) and not(b.finished()): - b.cancel() - waiting.add(b) + waiting.add(b.cancelAndWait()) if not(isNil(c)) and not(c.finished()): - c.cancel() - waiting.add(c) + waiting.add(c.cancelAndWait()) if not(isNil(d)) and not(d.finished()): - d.cancel() - waiting.add(d) + waiting.add(d.cancelAndWait()) allFutures(waiting) proc dumpState*(state: cuint): string = @@ -432,7 +428,7 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} = proc tlsWriteLoop(stream: AsyncStreamWriter) {.async.} = var wstream = TLSStreamWriter(stream) wstream.state = AsyncStreamState.Running - await stepsAsync(1) + await sleepAsync(0.milliseconds) if isNil(wstream.stream.mainLoop): wstream.stream.mainLoop = tlsLoop(wstream.stream) await wstream.stream.mainLoop @@ -440,7 +436,7 @@ proc tlsWriteLoop(stream: AsyncStreamWriter) {.async.} = proc tlsReadLoop(stream: AsyncStreamReader) {.async.} = var rstream = TLSStreamReader(stream) rstream.state = AsyncStreamState.Running - await stepsAsync(1) + await sleepAsync(0.milliseconds) if isNil(rstream.stream.mainLoop): rstream.stream.mainLoop = tlsLoop(rstream.stream) await rstream.stream.mainLoop diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 665bc0ed5..af29c2acc 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -690,8 +690,28 @@ proc join*(transp: DatagramTransport): Future[void] = proc closeWait*(transp: DatagramTransport): Future[void] = ## Close transport ``transp`` and release all resources. + const FutureName = "datagram.transport.closeWait" + + if {ReadClosed, WriteClosed} * transp.state != {}: + return Future.completed(FutureName) + + let retFuture = newFuture[void](FutureName, {FutureFlag.OwnCancelSchedule}) + + proc continuation(udata: pointer) {.gcsafe.} = + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe.} = + # We are not going to change the state of `retFuture` to cancelled, so we + # will prevent the entire sequence of Futures from being cancelled. + discard + transp.close() - transp.join() + if transp.future.finished(): + retFuture.complete() + else: + transp.future.addCallback(continuation, cast[pointer](retFuture)) + retFuture.cancelCallback = cancellation + retFuture proc send*(transp: DatagramTransport, pbytes: pointer, nbytes: int): Future[void] = diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 44a39b292..f96650c7c 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2588,15 +2588,34 @@ proc close*(transp: StreamTransport) = proc closeWait*(transp: StreamTransport): Future[void] = ## Close and frees resources of transport ``transp``. + const FutureName = "stream.transport.closeWait" + + if {ReadClosed, WriteClosed} * transp.state != {}: + return Future.completed(FutureName) + + let retFuture = newFuture[void](FutureName, {FutureFlag.OwnCancelSchedule}) + + proc continuation(udata: pointer) {.gcsafe.} = + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe.} = + # We are not going to change the state of `retFuture` to cancelled, so we + # will prevent the entire sequence of Futures from being cancelled. + discard + transp.close() - transp.join() + if transp.future.finished(): + retFuture.complete() + else: + transp.future.addCallback(continuation, cast[pointer](retFuture)) + retFuture.cancelCallback = cancellation + retFuture proc shutdownWait*(transp: StreamTransport): Future[void] = ## Perform graceful shutdown of TCP connection backed by transport ``transp``. doAssert(transp.kind == TransportKind.Socket) let retFuture = newFuture[void]("stream.transport.shutdown") transp.checkClosed(retFuture) - transp.checkWriteEof(retFuture) when defined(windows): let loop = getThreadDispatcher() @@ -2636,7 +2655,14 @@ proc shutdownWait*(transp: StreamTransport): Future[void] = let res = osdefs.shutdown(SocketHandle(transp.fd), SHUT_WR) if res < 0: let err = osLastError() - retFuture.fail(getTransportOsError(err)) + case err + of ENOTCONN: + # The specified socket is not connected, it means that our initial + # goal is already happened. + transp.state.incl({WriteEof}) + callSoon(continuation, nil) + else: + retFuture.fail(getTransportOsError(err)) else: transp.state.incl({WriteEof}) callSoon(continuation, nil) diff --git a/tests/testbugs.nim b/tests/testbugs.nim index cf18a13c9..1f2a932d0 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -14,7 +14,7 @@ suite "Asynchronous issues test suite": const HELLO_PORT = 45679 const TEST_MSG = "testmsg" const MSG_LEN = TEST_MSG.len() - const TestsCount = 500 + const TestsCount = 100 type CustomData = ref object diff --git a/tests/testfut.nim b/tests/testfut.nim index a9fba0539..bc61594b8 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -6,10 +6,15 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import unittest2 +import stew/results import ../chronos, ../chronos/unittest2/asynctests {.used.} +type + TestFooConnection* = ref object + id*: int + suite "Future[T] behavior test suite": proc testFuture1(): Future[int] {.async.} = await sleepAsync(0.milliseconds) @@ -960,7 +965,7 @@ suite "Future[T] behavior test suite": let discarded {.used.} = await fut1 check res - asyncTest "cancel() async procedure test": + asyncTest "tryCancel() async procedure test": var completed = 0 proc client1() {.async.} = @@ -980,7 +985,7 @@ suite "Future[T] behavior test suite": inc(completed) var fut = client4() - fut.cancel() + discard fut.tryCancel() # Future must not be cancelled immediately, because it has many nested # futures. @@ -1031,7 +1036,7 @@ suite "Future[T] behavior test suite": var fut1 = client2() var fut2 = client2() - fut1.cancel() + discard fut1.tryCancel() await fut1 await cancelAndWait(fut2) check: @@ -1054,17 +1059,17 @@ suite "Future[T] behavior test suite": if not(retFuture.finished()): retFuture.complete() - proc cancel(udata: pointer) {.gcsafe.} = + proc cancellation(udata: pointer) {.gcsafe.} = inc(cancelled) if not(retFuture.finished()): removeTimer(moment, completion, cast[pointer](retFuture)) - retFuture.cancelCallback = cancel + retFuture.cancelCallback = cancellation discard setTimer(moment, completion, cast[pointer](retFuture)) return retFuture var fut = client1(100.milliseconds) - fut.cancel() + discard fut.tryCancel() await sleepAsync(500.milliseconds) check: fut.cancelled() @@ -1112,8 +1117,8 @@ suite "Future[T] behavior test suite": neverFlag3 = true res.addCallback(continuation) res.cancelCallback = cancellation - result = res neverFlag1 = true + res proc withTimeoutProc() {.async.} = try: @@ -1149,12 +1154,12 @@ suite "Future[T] behavior test suite": someFut = newFuture[void]() var raceFut3 = raceProc() - someFut.cancel() + discard someFut.tryCancel() await cancelAndWait(raceFut3) check: - raceFut1.state == FutureState.Cancelled - raceFut2.state == FutureState.Cancelled + raceFut1.state == FutureState.Completed + raceFut2.state == FutureState.Failed raceFut3.state == FutureState.Cancelled asyncTest "asyncSpawn() test": @@ -1255,12 +1260,12 @@ suite "Future[T] behavior test suite": (loc.procedure == procedure) check: - chk(loc10, "testfut.nim", 1221, "macroFuture") - chk(loc11, "testfut.nim", 1222, "") - chk(loc20, "testfut.nim", 1234, "template") - chk(loc21, "testfut.nim", 1237, "") - chk(loc30, "testfut.nim", 1231, "procedure") - chk(loc31, "testfut.nim", 1238, "") + chk(loc10, "testfut.nim", 1226, "macroFuture") + chk(loc11, "testfut.nim", 1227, "") + chk(loc20, "testfut.nim", 1239, "template") + chk(loc21, "testfut.nim", 1242, "") + chk(loc30, "testfut.nim", 1236, "procedure") + chk(loc31, "testfut.nim", 1243, "") asyncTest "withTimeout(fut) should wait cancellation test": proc futureNeverEnds(): Future[void] = @@ -1535,3 +1540,462 @@ suite "Future[T] behavior test suite": check: v1_u == 0'u v2_u + 1'u == 0'u + + asyncTest "wait() cancellation undefined behavior test #1": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testFoo(fooFut: Future[void]) {.async.} = + let connection = + try: + let res = await testInnerFoo(fooFut).wait(10.seconds) + Result[TestFooConnection, int].ok(res) + except CancelledError: + Result[TestFooConnection, int].err(0) + except CatchableError: + Result[TestFooConnection, int].err(1) + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "wait() cancellation undefined behavior test #2": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await testInnerFoo(fooFut) + + proc testFoo(fooFut: Future[void]) {.async.} = + let connection = + try: + let res = await testMiddleFoo(fooFut).wait(10.seconds) + Result[TestFooConnection, int].ok(res) + except CancelledError: + Result[TestFooConnection, int].err(0) + except CatchableError: + Result[TestFooConnection, int].err(1) + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "withTimeout() cancellation undefined behavior test #1": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testFoo(fooFut: Future[void]) {.async.} = + let connection = + try: + let + checkFut = testInnerFoo(fooFut) + res = await withTimeout(checkFut, 10.seconds) + if res: + Result[TestFooConnection, int].ok(checkFut.value) + else: + Result[TestFooConnection, int].err(0) + except CancelledError: + Result[TestFooConnection, int].err(1) + except CatchableError: + Result[TestFooConnection, int].err(2) + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "withTimeout() cancellation undefined behavior test #2": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await testInnerFoo(fooFut) + + proc testFoo(fooFut: Future[void]) {.async.} = + let connection = + try: + let + checkFut = testMiddleFoo(fooFut) + res = await withTimeout(checkFut, 10.seconds) + if res: + Result[TestFooConnection, int].ok(checkFut.value) + else: + Result[TestFooConnection, int].err(0) + except CancelledError: + Result[TestFooConnection, int].err(1) + except CatchableError: + Result[TestFooConnection, int].err(2) + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "Cancellation behavior test": + proc testInnerFoo(fooFut: Future[void]) {.async.} = + await fooFut + + proc testMiddleFoo(fooFut: Future[void]) {.async.} = + await testInnerFoo(fooFut) + + proc testOuterFoo(fooFut: Future[void]) {.async.} = + await testMiddleFoo(fooFut) + + block: + # Cancellation of pending Future + let future = newFuture[void]("last.child.pending.future") + await cancelAndWait(future) + check: + future.cancelled() == true + + block: + # Cancellation of completed Future + let future = newFuture[void]("last.child.completed.future") + future.complete() + await cancelAndWait(future) + check: + future.cancelled() == false + future.completed() == true + + block: + # Cancellation of failed Future + let future = newFuture[void]("last.child.failed.future") + future.fail(newException(ValueError, "ABCD")) + await cancelAndWait(future) + check: + future.cancelled() == false + future.failed() == true + + block: + # Cancellation of already cancelled Future + let future = newFuture[void]("last.child.cancelled.future") + future.cancelAndSchedule() + await cancelAndWait(future) + check: + future.cancelled() == true + + block: + # Cancellation of Pending->Pending->Pending->Pending sequence + let future = newFuture[void]("last.child.pending.future") + let testFut = testOuterFoo(future) + await cancelAndWait(testFut) + check: + testFut.cancelled() == true + + block: + # Cancellation of Pending->Pending->Pending->Completed sequence + let future = newFuture[void]("last.child.completed.future") + let testFut = testOuterFoo(future) + future.complete() + await cancelAndWait(testFut) + check: + testFut.cancelled() == false + testFut.completed() == true + + block: + # Cancellation of Pending->Pending->Pending->Failed sequence + let future = newFuture[void]("last.child.failed.future") + let testFut = testOuterFoo(future) + future.fail(newException(ValueError, "ABCD")) + await cancelAndWait(testFut) + check: + testFut.cancelled() == false + testFut.failed() == true + + block: + # Cancellation of Pending->Pending->Pending->Cancelled sequence + let future = newFuture[void]("last.child.cancelled.future") + let testFut = testOuterFoo(future) + future.cancelAndSchedule() + await cancelAndWait(testFut) + check: + testFut.cancelled() == true + + block: + # Cancellation of pending Future, when automatic scheduling disabled + let future = newFuture[void]("last.child.pending.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + discard + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + let cancelFut = cancelAndWait(future) + await sleepAsync(100.milliseconds) + check: + cancelFut.finished() == false + future.cancelled() == false + # Now we manually changing Future's state, so `cancelAndWait` could + # finish + future.complete() + await cancelFut + check: + cancelFut.finished() == true + future.cancelled() == false + future.finished() == true + + block: + # Cancellation of pending Future, which will fail Future on cancellation, + # when automatic scheduling disabled + let future = newFuture[void]("last.child.completed.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + future.complete() + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + await cancelAndWait(future) + check: + future.cancelled() == false + future.completed() == true + + block: + # Cancellation of pending Future, which will fail Future on cancellation, + # when automatic scheduling disabled + let future = newFuture[void]("last.child.failed.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + future.fail(newException(ValueError, "ABCD")) + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + await cancelAndWait(future) + check: + future.cancelled() == false + future.failed() == true + + block: + # Cancellation of pending Future, which will fail Future on cancellation, + # when automatic scheduling disabled + let future = newFuture[void]("last.child.cancelled.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + future.cancelAndSchedule() + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + await cancelAndWait(future) + check: + future.cancelled() == true + + block: + # Cancellation of pending Pending->Pending->Pending->Pending, when + # automatic scheduling disabled and Future do nothing in cancellation + # callback + let future = newFuture[void]("last.child.pending.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + discard + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + let testFut = testOuterFoo(future) + let cancelFut = cancelAndWait(testFut) + await sleepAsync(100.milliseconds) + check: + cancelFut.finished() == false + testFut.cancelled() == false + future.cancelled() == false + # Now we manually changing Future's state, so `cancelAndWait` could + # finish + future.complete() + await cancelFut + check: + cancelFut.finished() == true + future.cancelled() == false + future.finished() == true + testFut.cancelled() == false + testFut.finished() == true + + block: + # Cancellation of pending Pending->Pending->Pending->Pending, when + # automatic scheduling disabled and Future completes in cancellation + # callback + let future = newFuture[void]("last.child.pending.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + future.complete() + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + let testFut = testOuterFoo(future) + await cancelAndWait(testFut) + await sleepAsync(100.milliseconds) + check: + testFut.cancelled() == false + testFut.finished() == true + future.cancelled() == false + future.finished() == true + + block: + # Cancellation of pending Pending->Pending->Pending->Pending, when + # automatic scheduling disabled and Future fails in cancellation callback + let future = newFuture[void]("last.child.pending.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + future.fail(newException(ValueError, "ABCD")) + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + let testFut = testOuterFoo(future) + await cancelAndWait(testFut) + await sleepAsync(100.milliseconds) + check: + testFut.cancelled() == false + testFut.failed() == true + future.cancelled() == false + future.failed() == true + + block: + # Cancellation of pending Pending->Pending->Pending->Pending, when + # automatic scheduling disabled and Future fails in cancellation callback + let future = newFuture[void]("last.child.pending.future", + {FutureFlag.OwnCancelSchedule}) + proc cancellation(udata: pointer) {.gcsafe.} = + future.cancelAndSchedule() + future.cancelCallback = cancellation + # Note, future will never be finished in such case, until we manually not + # finish it + let testFut = testOuterFoo(future) + await cancelAndWait(testFut) + await sleepAsync(100.milliseconds) + check: + testFut.cancelled() == true + future.cancelled() == true + + test "Issue #334 test": + proc test(): bool = + var testres = "" + + proc a() {.async.} = + try: + await sleepAsync(seconds(1)) + except CatchableError as exc: + testres.add("A") + raise exc + + proc b() {.async.} = + try: + await a() + except CatchableError as exc: + testres.add("B") + raise exc + + proc c() {.async.} = + try: + echo $(await b().withTimeout(seconds(2))) + except CatchableError as exc: + testres.add("C") + raise exc + + let x = c() + x.cancelSoon() + + try: + waitFor x + except CatchableError: + testres.add("D") + + testres.add("E") + + waitFor sleepAsync(milliseconds(100)) + + testres == "ABCDE" + + check test() == true + + asyncTest "cancelAndWait() should be able to cancel test": + proc test1() {.async.} = + await noCancel sleepAsync(100.milliseconds) + await noCancel sleepAsync(100.milliseconds) + await sleepAsync(100.milliseconds) + + proc test2() {.async.} = + await noCancel sleepAsync(100.milliseconds) + await sleepAsync(100.milliseconds) + await noCancel sleepAsync(100.milliseconds) + + proc test3() {.async.} = + await sleepAsync(100.milliseconds) + await noCancel sleepAsync(100.milliseconds) + await noCancel sleepAsync(100.milliseconds) + + proc test4() {.async.} = + while true: + await noCancel sleepAsync(50.milliseconds) + await sleepAsync(0.milliseconds) + + proc test5() {.async.} = + while true: + await sleepAsync(0.milliseconds) + await noCancel sleepAsync(50.milliseconds) + + block: + let future1 = test1() + await cancelAndWait(future1) + let future2 = test1() + await sleepAsync(10.milliseconds) + await cancelAndWait(future2) + check: + future1.cancelled() == true + future2.cancelled() == true + + block: + let future1 = test2() + await cancelAndWait(future1) + let future2 = test2() + await sleepAsync(10.milliseconds) + await cancelAndWait(future2) + check: + future1.cancelled() == true + future2.cancelled() == true + + block: + let future1 = test3() + await cancelAndWait(future1) + let future2 = test3() + await sleepAsync(10.milliseconds) + await cancelAndWait(future2) + check: + future1.cancelled() == true + future2.cancelled() == true + + block: + let future1 = test4() + await cancelAndWait(future1) + let future2 = test4() + await sleepAsync(333.milliseconds) + await cancelAndWait(future2) + check: + future1.cancelled() == true + future2.cancelled() == true + + block: + let future1 = test5() + await cancelAndWait(future1) + let future2 = test5() + await sleepAsync(333.milliseconds) + await cancelAndWait(future2) + check: + future1.cancelled() == true + future2.cancelled() == true diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 4daaf87aa..e10892ebc 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -704,6 +704,107 @@ suite "HTTP client testing suite": await server.closeWait() return "redirect-" & $res + proc testSendCancelLeaksTest(secure: bool): Future[bool] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async.} = + return defaultResponse() + + var server = createServer(initTAddress("127.0.0.1:0"), process, secure) + server.start() + let address = server.instance.localAddress() + + let ha = + if secure: + getAddress(address, HttpClientScheme.Secure, "/") + else: + getAddress(address, HttpClientScheme.NonSecure, "/") + + var counter = 0 + while true: + let + session = createSession(secure) + request = HttpClientRequestRef.new(session, ha, MethodGet) + requestFut = request.send() + + if counter > 0: + await stepsAsync(counter) + let exitLoop = + if not(requestFut.finished()): + await cancelAndWait(requestFut) + doAssert(cancelled(requestFut) or completed(requestFut), + "Future should be Cancelled or Completed at this point") + if requestFut.completed(): + let response = await requestFut + await response.closeWait() + + inc(counter) + false + else: + let response = await requestFut + await response.closeWait() + true + + await request.closeWait() + await session.closeWait() + + if exitLoop: + break + + await server.stop() + await server.closeWait() + return true + + proc testOpenCancelLeaksTest(secure: bool): Future[bool] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async.} = + return defaultResponse() + + var server = createServer(initTAddress("127.0.0.1:0"), process, secure) + server.start() + let address = server.instance.localAddress() + + let ha = + if secure: + getAddress(address, HttpClientScheme.Secure, "/") + else: + getAddress(address, HttpClientScheme.NonSecure, "/") + + var counter = 0 + while true: + let + session = createSession(secure) + request = HttpClientRequestRef.new(session, ha, MethodPost) + bodyFut = request.open() + + if counter > 0: + await stepsAsync(counter) + let exitLoop = + if not(bodyFut.finished()): + await cancelAndWait(bodyFut) + doAssert(cancelled(bodyFut) or completed(bodyFut), + "Future should be Cancelled or Completed at this point") + + if bodyFut.completed(): + let bodyWriter = await bodyFut + await bodyWriter.closeWait() + + inc(counter) + false + else: + let bodyWriter = await bodyFut + await bodyWriter.closeWait() + true + + await request.closeWait() + await session.closeWait() + + if exitLoop: + break + + await server.stop() + await server.closeWait() + return true + # proc testBasicAuthorization(): Future[bool] {.async.} = # let session = HttpSessionRef.new({HttpClientFlag.NoVerifyHost}, # maxRedirections = 10) @@ -1243,6 +1344,18 @@ suite "HTTP client testing suite": test "HTTP(S) client maximum redirections test": check waitFor(testRequestRedirectTest(true, 4)) == "redirect-true" + test "HTTP send() cancellation leaks test": + check waitFor(testSendCancelLeaksTest(false)) == true + + test "HTTP(S) send() cancellation leaks test": + check waitFor(testSendCancelLeaksTest(true)) == true + + test "HTTP open() cancellation leaks test": + check waitFor(testOpenCancelLeaksTest(false)) == true + + test "HTTP(S) open() cancellation leaks test": + check waitFor(testOpenCancelLeaksTest(true)) == true + test "HTTPS basic authorization test": skip() # This test disabled because remote service is pretty flaky and fails pretty diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 0ecc9aa4f..85aeee5b6 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -1326,32 +1326,31 @@ suite "HTTP server testing suite": server.start() var transp: StreamTransport - try: - transp = await connect(address) - block: - let response = await transp.httpClient2(test[0], 7) - check: - response.data == "TEST_OK" - response.headers.getString("connection") == test[3] - # We do this sleeping here just because we running both server and - # client in single process, so when we received response from server - # it does not mean that connection has been immediately closed - it - # takes some more calls, so we trying to get this calls happens. - await sleepAsync(50.milliseconds) - let connectionStillAvailable = - try: - let response {.used.} = await transp.httpClient2(test[0], 7) - true - except CatchableError: - false - - check connectionStillAvailable == test[2] - - finally: - if not(isNil(transp)): - await transp.closeWait() - await server.stop() - await server.closeWait() + + transp = await connect(address) + block: + let response = await transp.httpClient2(test[0], 7) + check: + response.data == "TEST_OK" + response.headers.getString("connection") == test[3] + # We do this sleeping here just because we running both server and + # client in single process, so when we received response from server + # it does not mean that connection has been immediately closed - it + # takes some more calls, so we trying to get this calls happens. + await sleepAsync(50.milliseconds) + let connectionStillAvailable = + try: + let response {.used.} = await transp.httpClient2(test[0], 7) + true + except CatchableError: + false + + check connectionStillAvailable == test[2] + + if not(isNil(transp)): + await transp.closeWait() + await server.stop() + await server.closeWait() asyncTest "HTTP debug tests": const @@ -1400,32 +1399,30 @@ suite "HTTP server testing suite": info.flags == {HttpServerFlags.Http11Pipeline} info.socketFlags == socketFlags - try: - var clientFutures: seq[Future[StreamTransport]] - for i in 0 ..< TestsCount: - clientFutures.add(client(address, TestRequest)) - await allFutures(clientFutures) - - let connections = server.getConnections() - check len(connections) == TestsCount - let currentTime = Moment.now() - for index, connection in connections.pairs(): - let transp = clientFutures[index].read() - check: - connection.remoteAddress.get() == transp.localAddress() - connection.localAddress.get() == transp.remoteAddress() - connection.connectionType == ConnectionType.NonSecure - connection.connectionState == ConnectionState.Alive - connection.query.get("") == "/httpdebug" - (currentTime - connection.createMoment.get()) != ZeroDuration - (currentTime - connection.acceptMoment) != ZeroDuration - var pending: seq[Future[void]] - for transpFut in clientFutures: - pending.add(closeWait(transpFut.read())) - await allFutures(pending) - finally: - await server.stop() - await server.closeWait() + var clientFutures: seq[Future[StreamTransport]] + for i in 0 ..< TestsCount: + clientFutures.add(client(address, TestRequest)) + await allFutures(clientFutures) + + let connections = server.getConnections() + check len(connections) == TestsCount + let currentTime = Moment.now() + for index, connection in connections.pairs(): + let transp = clientFutures[index].read() + check: + connection.remoteAddress.get() == transp.localAddress() + connection.localAddress.get() == transp.remoteAddress() + connection.connectionType == ConnectionType.NonSecure + connection.connectionState == ConnectionState.Alive + connection.query.get("") == "/httpdebug" + (currentTime - connection.createMoment.get()) != ZeroDuration + (currentTime - connection.acceptMoment) != ZeroDuration + var pending: seq[Future[void]] + for transpFut in clientFutures: + pending.add(closeWait(transpFut.read())) + await allFutures(pending) + await server.stop() + await server.closeWait() test "Leaks test": checkLeaks() diff --git a/tests/testratelimit.nim b/tests/testratelimit.nim index bf281eec7..d28492874 100644 --- a/tests/testratelimit.nim +++ b/tests/testratelimit.nim @@ -49,7 +49,7 @@ suite "Token Bucket": # Consume 10* the budget cap let beforeStart = Moment.now() waitFor(bucket.consume(1000).wait(5.seconds)) - check Moment.now() - beforeStart in 900.milliseconds .. 1500.milliseconds + check Moment.now() - beforeStart in 900.milliseconds .. 2200.milliseconds test "Sync manual replenish": var bucket = TokenBucket.new(1000, 0.seconds) @@ -96,7 +96,7 @@ suite "Token Bucket": futBlocker.finished == false fut2.finished == false - futBlocker.cancel() + futBlocker.cancelSoon() waitFor(fut2.wait(10.milliseconds)) test "Very long replenish": @@ -117,9 +117,14 @@ suite "Token Bucket": check bucket.tryConsume(1, fakeNow) == true test "Short replenish": - var bucket = TokenBucket.new(15000, 1.milliseconds) - let start = Moment.now() - check bucket.tryConsume(15000, start) - check bucket.tryConsume(1, start) == false + skip() + # TODO (cheatfate): This test was disabled, because it continuosly fails in + # Github Actions Windows x64 CI when using Nim 1.6.14 version. + # Unable to reproduce failure locally. + + # var bucket = TokenBucket.new(15000, 1.milliseconds) + # let start = Moment.now() + # check bucket.tryConsume(15000, start) + # check bucket.tryConsume(1, start) == false - check bucket.tryConsume(15000, start + 1.milliseconds) == true + # check bucket.tryConsume(15000, start + 1.milliseconds) == true diff --git a/tests/testsoon.nim b/tests/testsoon.nim index 88072c267..41a6e4ec1 100644 --- a/tests/testsoon.nim +++ b/tests/testsoon.nim @@ -11,75 +11,83 @@ import ../chronos {.used.} suite "callSoon() tests suite": - const CallSoonTests = 10 - var soonTest1 = 0'u - var timeoutsTest1 = 0 - var timeoutsTest2 = 0 - var soonTest2 = 0 - - proc callback1(udata: pointer) {.gcsafe.} = - soonTest1 = soonTest1 xor cast[uint](udata) - - proc test1(): uint = - callSoon(callback1, cast[pointer](0x12345678'u)) - callSoon(callback1, cast[pointer](0x23456789'u)) - callSoon(callback1, cast[pointer](0x3456789A'u)) - callSoon(callback1, cast[pointer](0x456789AB'u)) - callSoon(callback1, cast[pointer](0x56789ABC'u)) - callSoon(callback1, cast[pointer](0x6789ABCD'u)) - callSoon(callback1, cast[pointer](0x789ABCDE'u)) - callSoon(callback1, cast[pointer](0x89ABCDEF'u)) - callSoon(callback1, cast[pointer](0x9ABCDEF1'u)) - callSoon(callback1, cast[pointer](0xABCDEF12'u)) - callSoon(callback1, cast[pointer](0xBCDEF123'u)) - callSoon(callback1, cast[pointer](0xCDEF1234'u)) - callSoon(callback1, cast[pointer](0xDEF12345'u)) - callSoon(callback1, cast[pointer](0xEF123456'u)) - callSoon(callback1, cast[pointer](0xF1234567'u)) - callSoon(callback1, cast[pointer](0x12345678'u)) - ## All callbacks must be processed exactly with 1 poll() call. - poll() - result = soonTest1 - - proc testProc() {.async.} = - for i in 1..CallSoonTests: - await sleepAsync(100.milliseconds) - timeoutsTest1 += 1 - - var callbackproc: proc(udata: pointer) {.gcsafe, raises: [].} - callbackproc = proc (udata: pointer) {.gcsafe, raises: [].} = - timeoutsTest2 += 1 - {.gcsafe.}: - callSoon(callbackproc) + test "User-defined callback argument test": + proc test(): bool = + var soonTest = 0'u - proc test2(timers, callbacks: var int) = - callSoon(callbackproc) - waitFor(testProc()) - timers = timeoutsTest1 - callbacks = timeoutsTest2 + proc callback(udata: pointer) {.gcsafe.} = + soonTest = soonTest xor cast[uint](udata) - proc testCallback(udata: pointer) = - soonTest2 = 987654321 + callSoon(callback, cast[pointer](0x12345678'u)) + callSoon(callback, cast[pointer](0x23456789'u)) + callSoon(callback, cast[pointer](0x3456789A'u)) + callSoon(callback, cast[pointer](0x456789AB'u)) + callSoon(callback, cast[pointer](0x56789ABC'u)) + callSoon(callback, cast[pointer](0x6789ABCD'u)) + callSoon(callback, cast[pointer](0x789ABCDE'u)) + callSoon(callback, cast[pointer](0x89ABCDEF'u)) + callSoon(callback, cast[pointer](0x9ABCDEF1'u)) + callSoon(callback, cast[pointer](0xABCDEF12'u)) + callSoon(callback, cast[pointer](0xBCDEF123'u)) + callSoon(callback, cast[pointer](0xCDEF1234'u)) + callSoon(callback, cast[pointer](0xDEF12345'u)) + callSoon(callback, cast[pointer](0xEF123456'u)) + callSoon(callback, cast[pointer](0xF1234567'u)) + callSoon(callback, cast[pointer](0x12345678'u)) + ## All callbacks must be processed exactly with 1 poll() call. + poll() - proc test3(): bool = - callSoon(testCallback) - poll() - result = soonTest2 == 987654321 + var values = [0x12345678'u, 0x23456789'u, 0x3456789A'u, 0x456789AB'u, + 0x56789ABC'u, 0x6789ABCD'u, 0x789ABCDE'u, 0x89ABCDEF'u, + 0x9ABCDEF1'u, 0xABCDEF12'u, 0xBCDEF123'u, 0xCDEF1234'u, + 0xDEF12345'u, 0xEF123456'u, 0xF1234567'u, 0x12345678'u] + var expect = 0'u + for item in values: + expect = expect xor item + + soonTest == expect + + check test() == true - test "User-defined callback argument test": - var values = [0x12345678'u, 0x23456789'u, 0x3456789A'u, 0x456789AB'u, - 0x56789ABC'u, 0x6789ABCD'u, 0x789ABCDE'u, 0x89ABCDEF'u, - 0x9ABCDEF1'u, 0xABCDEF12'u, 0xBCDEF123'u, 0xCDEF1234'u, - 0xDEF12345'u, 0xEF123456'u, 0xF1234567'u, 0x12345678'u] - var expect = 0'u - for item in values: - expect = expect xor item - check test1() == expect test "`Asynchronous dead end` #7193 test": - var timers, callbacks: int - test2(timers, callbacks) - check: - timers == CallSoonTests - callbacks > CallSoonTests * 2 + const CallSoonTests = 5 + proc test() = + var + timeoutsTest1 = 0 + timeoutsTest2 = 0 + stopFlag = false + + var callbackproc: proc(udata: pointer) {.gcsafe, raises: [].} + callbackproc = proc (udata: pointer) {.gcsafe, raises: [].} = + timeoutsTest2 += 1 + if not(stopFlag): + callSoon(callbackproc) + + proc testProc() {.async.} = + for i in 1 .. CallSoonTests: + await sleepAsync(10.milliseconds) + timeoutsTest1 += 1 + + callSoon(callbackproc) + waitFor(testProc()) + stopFlag = true + poll() + + check: + timeoutsTest1 == CallSoonTests + timeoutsTest2 > CallSoonTests * 2 + + test() + test "`callSoon() is not working prior getGlobalDispatcher()` #7192 test": - check test3() == true + proc test(): bool = + var soonTest = 0 + + proc testCallback(udata: pointer) = + soonTest = 987654321 + + callSoon(testCallback) + poll() + soonTest == 987654321 + + check test() == true diff --git a/tests/teststream.nim b/tests/teststream.nim index 9e1ce557c..762e99629 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1271,15 +1271,23 @@ suite "Stream Transport test suite": server2.start() server3.start() - # It works cause even though there's an active listening socket bound to dst3, we are using ReusePort - var transp1 = await connect(server1.local, localAddress = server3.local, flags={SocketFlags.ReusePort}) - var transp2 = await connect(server2.local, localAddress = server3.local, flags={SocketFlags.ReusePort}) + # It works cause even though there's an active listening socket bound to + # dst3, we are using ReusePort + var transp1 = await connect( + server1.localAddress(), localAddress = server3.localAddress(), + flags = {SocketFlags.ReusePort}) + var transp2 = await connect( + server2.localAddress(), localAddress = server3.localAddress(), + flags = {SocketFlags.ReusePort}) expect(TransportOsError): - var transp2 {.used.} = await connect(server2.local, localAddress = server3.local) + var transp2 {.used.} = await connect( + server2.localAddress(), localAddress = server3.localAddress()) expect(TransportOsError): - var transp3 {.used.} = await connect(server2.local, localAddress = initTAddress("::", server3.local.port)) + var transp3 {.used.} = await connect( + server2.localAddress(), + localAddress = initTAddress("::", server3.localAddress().port)) await transp1.closeWait() await transp2.closeWait() @@ -1293,6 +1301,77 @@ suite "Stream Transport test suite": server3.stop() await server3.closeWait() + proc testConnectCancelLeaksTest() {.async.} = + proc client(server: StreamServer, transp: StreamTransport) {.async.} = + await transp.closeWait() + + let + server = createStreamServer(initTAddress("127.0.0.1:0"), client) + address = server.localAddress() + + var counter = 0 + while true: + let transpFut = connect(address) + if counter > 0: + await stepsAsync(counter) + if not(transpFut.finished()): + await cancelAndWait(transpFut) + doAssert(cancelled(transpFut), + "Future should be Cancelled at this point") + inc(counter) + else: + let transp = await transpFut + await transp.closeWait() + break + server.stop() + await server.closeWait() + + proc testAcceptCancelLeaksTest() {.async.} = + var + counter = 0 + exitLoop = false + + # This timer will help to awake events poll in case its going to stuck + # usually happens on MacOS. + let sleepFut = sleepAsync(1.seconds) + + while not(exitLoop): + let + server = createStreamServer(initTAddress("127.0.0.1:0")) + address = server.localAddress() + + let + transpFut = connect(address) + acceptFut = server.accept() + + if counter > 0: + await stepsAsync(counter) + + exitLoop = + if not(acceptFut.finished()): + await cancelAndWait(acceptFut) + doAssert(cancelled(acceptFut), + "Future should be Cancelled at this point") + inc(counter) + false + else: + let transp = await acceptFut + await transp.closeWait() + true + + if not(transpFut.finished()): + await transpFut.cancelAndWait() + + if transpFut.completed(): + let transp = transpFut.value + await transp.closeWait() + + server.stop() + await server.closeWait() + + if not(sleepFut.finished()): + await cancelAndWait(sleepFut) + markFD = getCurrentFD() for i in 0.. Date: Mon, 16 Oct 2023 10:38:11 +0200 Subject: [PATCH 067/146] Complete futures in closure finally (fix #415) (#449) * Complete in closure finally * cleanup tests, add comment * handle defects * don't complete future on defect * complete future in test to avoid failure * fix with strict exceptions * fix regressions * fix nim 1.6 --- chronos/asyncfutures2.nim | 69 +++++--------- chronos/asyncmacro2.nim | 183 +++++++++++++++++++++++++------------- chronos/futures.nim | 7 +- tests/testmacro.nim | 130 +++++++++++++++++++++++++++ 4 files changed, 275 insertions(+), 114 deletions(-) diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index ee6e8e0d1..9674888c1 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -311,57 +311,30 @@ proc internalContinue(fut: pointer) {.raises: [], gcsafe.} = proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} = # This function is responsible for calling the closure iterator generated by # the `{.async.}` transformation either until it has completed its iteration - # or raised and error / been cancelled. # # Every call to an `{.async.}` proc is redirected to call this function # instead with its original body captured in `fut.closure`. - var next: FutureBase - template iterate = - while true: - # Call closure to make progress on `fut` until it reaches `yield` (inside - # `await` typically) or completes / fails / is cancelled - next = fut.internalClosure(fut) - if fut.internalClosure.finished(): # Reached the end of the transformed proc - break - - if next == nil: - raiseAssert "Async procedure (" & ($fut.location[LocationKind.Create]) & - ") yielded `nil`, are you await'ing a `nil` Future?" - - if not next.finished(): - # We cannot make progress on `fut` until `next` has finished - schedule - # `fut` to continue running when that happens - GC_ref(fut) - next.addCallback(CallbackFunc(internalContinue), cast[pointer](fut)) - - # return here so that we don't remove the closure below - return - - # Continue while the yielded future is already finished. - - when chronosStrictException: - try: - iterate - except CancelledError: - fut.cancelAndSchedule() - except CatchableError as exc: - fut.fail(exc) - finally: - next = nil # GC hygiene - else: - try: - iterate - except CancelledError: - fut.cancelAndSchedule() - except CatchableError as exc: - fut.fail(exc) - except Exception as exc: - if exc of Defect: - raise (ref Defect)(exc) - - fut.fail((ref ValueError)(msg: exc.msg, parent: exc)) - finally: - next = nil # GC hygiene + while true: + # Call closure to make progress on `fut` until it reaches `yield` (inside + # `await` typically) or completes / fails / is cancelled + let next: FutureBase = fut.internalClosure(fut) + if fut.internalClosure.finished(): # Reached the end of the transformed proc + break + + if next == nil: + raiseAssert "Async procedure (" & ($fut.location[LocationKind.Create]) & + ") yielded `nil`, are you await'ing a `nil` Future?" + + if not next.finished(): + # We cannot make progress on `fut` until `next` has finished - schedule + # `fut` to continue running when that happens + GC_ref(fut) + next.addCallback(CallbackFunc(internalContinue), cast[pointer](fut)) + + # return here so that we don't remove the closure below + return + + # Continue while the yielded future is already finished. # `futureContinue` will not be called any more for this future so we can # clean it up diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index a86147c6e..d05940432 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -9,60 +9,14 @@ import std/[macros] -# `quote do` will ruin line numbers so we avoid it using these helpers -proc completeWithResult(fut, baseType: NimNode): NimNode {.compileTime.} = - # when `baseType` is void: - # complete(`fut`) - # else: - # complete(`fut`, result) - if baseType.eqIdent("void"): - # Shortcut if we know baseType at macro expansion time - newCall(ident "complete", fut) - else: - # `baseType` might be generic and resolve to `void` - nnkWhenStmt.newTree( - nnkElifExpr.newTree( - nnkInfix.newTree(ident "is", baseType, ident "void"), - newCall(ident "complete", fut) - ), - nnkElseExpr.newTree( - newCall(ident "complete", fut, ident "result") - ) - ) - -proc completeWithNode(fut, baseType, node: NimNode): NimNode {.compileTime.} = - # when typeof(`node`) is void: - # `node` # statement / explicit return - # -> completeWithResult(fut, baseType) - # else: # expression / implicit return - # complete(`fut`, `node`) - if node.kind == nnkEmpty: # shortcut when known at macro expanstion time - completeWithResult(fut, baseType) - else: - # Handle both expressions and statements - since the type is not know at - # macro expansion time, we delegate this choice to a later compilation stage - # with `when`. - nnkWhenStmt.newTree( - nnkElifExpr.newTree( - nnkInfix.newTree( - ident "is", nnkTypeOfExpr.newTree(node), ident "void"), - newStmtList( - node, - completeWithResult(fut, baseType) - ) - ), - nnkElseExpr.newTree( - newCall(ident "complete", fut, node) - ) - ) - -proc processBody(node, fut, baseType: NimNode): NimNode {.compileTime.} = +proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} = #echo(node.treeRepr) case node.kind of nnkReturnStmt: let res = newNimNode(nnkStmtList, node) - res.add completeWithNode(fut, baseType, processBody(node[0], fut, baseType)) + if node[0].kind != nnkEmpty: + res.add newCall(setResultSym, processBody(node[0], setResultSym, baseType)) res.add newNimNode(nnkReturnStmt, node).add(newNilLit()) res @@ -71,12 +25,89 @@ proc processBody(node, fut, baseType: NimNode): NimNode {.compileTime.} = node else: for i in 0 ..< node.len: - # We must not transform nested procedures of any form, otherwise - # `fut` will be used for all nested procedures as their own - # `retFuture`. - node[i] = processBody(node[i], fut, baseType) + # We must not transform nested procedures of any form, since their + # returns are not meant for our futures + node[i] = processBody(node[i], setResultSym, baseType) node +proc wrapInTryFinally(fut, baseType, body: NimNode): NimNode {.compileTime.} = + # creates: + # var closureSucceeded = true + # try: `body` + # except CancelledError: closureSucceeded = false; `castFutureSym`.cancelAndSchedule() + # except CatchableError as exc: closureSucceeded = false; `castFutureSym`.fail(exc) + # except Defect as exc: + # closureSucceeded = false + # raise exc + # finally: + # if closureSucceeded: + # `castFutureSym`.complete(result) + + # we are completing inside finally to make sure the completion happens even + # after a `return` + let closureSucceeded = genSym(nskVar, "closureSucceeded") + var nTry = nnkTryStmt.newTree(body) + nTry.add nnkExceptBranch.newTree( + ident"CancelledError", + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + newCall(ident "cancelAndSchedule", fut) + ) + ) + + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", ident"CatchableError", ident"exc"), + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + newCall(ident "fail", fut, ident"exc") + ) + ) + + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", ident"Defect", ident"exc"), + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + nnkRaiseStmt.newTree(ident"exc") + ) + ) + + when not chronosStrictException: + # adds + # except Exception as exc: + # closureSucceeded = false + # fut.fail((ref ValueError)(msg: exc.msg, parent: exc)) + let excName = ident"exc" + + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", ident"Exception", ident"exc"), + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + newCall(ident "fail", fut, + quote do: (ref ValueError)(msg: `excName`.msg, parent: `excName`)), + ) + ) + + nTry.add nnkFinally.newTree( + nnkIfStmt.newTree( + nnkElifBranch.newTree( + closureSucceeded, + nnkWhenStmt.newTree( + nnkElifExpr.newTree( + nnkInfix.newTree(ident "is", baseType, ident "void"), + newCall(ident "complete", fut) + ), + nnkElseExpr.newTree( + newCall(ident "complete", fut, ident "result") + ) + ) + ) + ) + ) + return nnkStmtList.newTree( + newVarStmt(closureSucceeded, ident"true"), + nTry + ) + proc getName(node: NimNode): string {.compileTime.} = case node.kind of nnkSym: @@ -153,8 +184,9 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = if baseTypeIsVoid: futureVoidType else: returnType castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym) + setResultSym = ident"setResult" - procBody = prc.body.processBody(castFutureSym, baseType) + procBody = prc.body.processBody(setResultSym, baseType) # don't do anything with forward bodies (empty) if procBody.kind != nnkEmpty: @@ -199,9 +231,44 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = ) ) - completeDecl = completeWithNode(castFutureSym, baseType, procBodyBlck) + # generates: + # template `setResultSym`(code: untyped) {.used.} = + # when typeof(code) is void: code + # else: result = code + # + # this is useful to handle implicit returns, but also + # to bind the `result` to the one we declare here + setResultDecl = + nnkTemplateDef.newTree( + setResultSym, + newEmptyNode(), newEmptyNode(), + nnkFormalParams.newTree( + newEmptyNode(), + nnkIdentDefs.newTree( + ident"code", + ident"untyped", + newEmptyNode(), + ) + ), + nnkPragma.newTree(ident"used"), + newEmptyNode(), + nnkWhenStmt.newTree( + nnkElifBranch.newTree( + nnkInfix.newTree(ident"is", nnkTypeOfExpr.newTree(ident"code"), ident"void"), + ident"code" + ), + nnkElse.newTree( + newAssignment(ident"result", ident"code") + ) + ) + ) + + completeDecl = wrapInTryFinally( + castFutureSym, baseType, + newCall(setResultSym, procBodyBlck) + ) - closureBody = newStmtList(resultDecl, completeDecl) + closureBody = newStmtList(resultDecl, setResultDecl, completeDecl) internalFutureParameter = nnkIdentDefs.newTree( internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) @@ -225,10 +292,6 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = # here the possibility of transporting more specific error types here # for example by casting exceptions coming out of `await`.. let raises = nnkBracket.newTree() - when chronosStrictException: - raises.add(newIdentNode("CatchableError")) - else: - raises.add(newIdentNode("Exception")) closureIterator.addPragma(nnkExprColonExpr.newTree( newIdentNode("raises"), diff --git a/chronos/futures.nim b/chronos/futures.nim index 5f96867e5..0af635f5b 100644 --- a/chronos/futures.nim +++ b/chronos/futures.nim @@ -17,11 +17,6 @@ export srcloc when chronosStackTrace: type StackTrace = string -when chronosStrictException: - {.pragma: closureIter, raises: [CatchableError], gcsafe.} -else: - {.pragma: closureIter, raises: [Exception], gcsafe.} - type LocationKind* {.pure.} = enum Create @@ -54,7 +49,7 @@ type internalState*: FutureState internalFlags*: FutureFlags internalError*: ref CatchableError ## Stored exception - internalClosure*: iterator(f: FutureBase): FutureBase {.closureIter.} + internalClosure*: iterator(f: FutureBase): FutureBase {.raises: [], gcsafe.} when chronosFutureId: internalId*: uint diff --git a/tests/testmacro.nim b/tests/testmacro.nim index ad4c22f37..bd53078ad 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -94,6 +94,11 @@ proc testAwaitne(): Future[bool] {.async.} = return true +template returner = + # can't use `return 5` + result = 5 + return + suite "Macro transformations test suite": test "`await` command test": check waitFor(testAwait()) == true @@ -136,6 +141,131 @@ suite "Macro transformations test suite": check: waitFor(gen(int)) == default(int) + test "Nested return": + proc nr: Future[int] {.async.} = + return + if 1 == 1: + return 42 + else: + 33 + + check waitFor(nr()) == 42 + +suite "Macro transformations - completions": + test "Run closure to completion on return": # issue #415 + var x = 0 + proc test415 {.async.} = + try: + return + finally: + await sleepAsync(1.milliseconds) + x = 5 + waitFor(test415()) + check: x == 5 + + test "Run closure to completion on defer": + var x = 0 + proc testDefer {.async.} = + defer: + await sleepAsync(1.milliseconds) + x = 5 + return + waitFor(testDefer()) + check: x == 5 + + test "Run closure to completion with exceptions": + var x = 0 + proc testExceptionHandling {.async.} = + try: + return + finally: + try: + await sleepAsync(1.milliseconds) + raise newException(ValueError, "") + except ValueError: + await sleepAsync(1.milliseconds) + await sleepAsync(1.milliseconds) + x = 5 + waitFor(testExceptionHandling()) + check: x == 5 + + test "Correct return value when updating result after return": + proc testWeirdCase: int = + try: return 33 + finally: result = 55 + proc testWeirdCaseAsync: Future[int] {.async.} = + try: + await sleepAsync(1.milliseconds) + return 33 + finally: result = 55 + + check: + testWeirdCase() == waitFor(testWeirdCaseAsync()) + testWeirdCase() == 55 + + test "Generic & finally calling async": + proc testGeneric(T: type): Future[T] {.async.} = + try: + try: + await sleepAsync(1.milliseconds) + return + finally: + await sleepAsync(1.milliseconds) + await sleepAsync(1.milliseconds) + result = 11 + finally: + await sleepAsync(1.milliseconds) + await sleepAsync(1.milliseconds) + result = 12 + check waitFor(testGeneric(int)) == 12 + + proc testFinallyCallsAsync(T: type): Future[T] {.async.} = + try: + await sleepAsync(1.milliseconds) + return + finally: + result = await testGeneric(T) + check waitFor(testFinallyCallsAsync(int)) == 12 + + test "templates returning": + proc testReturner: Future[int] {.async.} = + returner + doAssert false + check waitFor(testReturner()) == 5 + + proc testReturner2: Future[int] {.async.} = + template returner2 = + return 6 + returner2 + doAssert false + check waitFor(testReturner2()) == 6 + + test "raising defects": + proc raiser {.async.} = + # sleeping to make sure our caller is the poll loop + await sleepAsync(0.milliseconds) + raise newException(Defect, "uh-oh") + + let fut = raiser() + expect(Defect): waitFor(fut) + check not fut.completed() + fut.complete() + + test "return result": + proc returnResult: Future[int] {.async.} = + var result: int + result = 12 + return result + check waitFor(returnResult()) == 12 + + test "async in async": + proc asyncInAsync: Future[int] {.async.} = + proc a2: Future[int] {.async.} = + result = 12 + result = await a2() + check waitFor(asyncInAsync()) == 12 + +suite "Macro transformations - implicit returns": test "Implicit return": proc implicit(): Future[int] {.async.} = 42 From a759c11ce4e9fd780f73d9264be1331a763e0fd8 Mon Sep 17 00:00:00 2001 From: Tanguy Date: Tue, 17 Oct 2023 14:18:14 +0200 Subject: [PATCH 068/146] Raise tracking (#251) * Exception tracking v2 * some fixes * Nim 1.2 compat * simpler things * Fixes for libp2p * Fixes for strictException * better await exception check * Fix for template async proc * make async work with procTy * FuturEx is now a ref object type * add tests * update test * update readme * Switch to asyncraises pragma * Address tests review comments * Rename FuturEx to RaiseTrackingFuture * Fix typo * Split asyncraises into async, asyncraises * Add -d:chronosWarnMissingRaises * Add comment to RaiseTrackingFuture * Allow standalone asyncraises * CheckedFuture.fail type checking * First cleanup * Remove useless line * Review comments * nimble: Remove #head from unittest2 * Remove implict raises: CancelledError * Move checkFutureExceptions to asyncfutures2 * Small refacto * small cleanup * Complete in closure finally * cleanup tests, add comment * bump * chronos is not compatible with nim 1.2 anymore * re-add readme modifications * fix special exception handlers * also propagate excetion type in `read` * `RaiseTrackingFuture` -> `InternalRaisesFuture` Use internal naming scheme for RTF (this type should only be accessed via asyncraises) * use `internalError` for error reading * oops * 2.0 workarounds * again * remove try/finally for non-raising functions * Revert "remove try/finally for non-raising functions" This reverts commit 86bfeb5c972ef379a3bd34e4a16cd158a7455721. `finally` is needed if code returns early :/ * fixes * avoid exposing `newInternalRaisesFuture` in manual macro code * avoid unnecessary codegen for `Future[void]` * avoid reduntant block around async proc body * simplify body generation for forward declarations with comment but no body * avoid duplicate `gcsafe` annotiations * line info for return at end of async proc * expand tests * fix comments, add defer test --------- Co-authored-by: Jacek Sieka --- README.md | 35 +++ chronos.nimble | 2 +- chronos/asyncfutures2.nim | 146 ++++++++++- chronos/asyncmacro2.nim | 536 +++++++++++++++++++++++--------------- tests/testfut.nim | 10 +- tests/testmacro.nim | 115 ++++++++ 6 files changed, 628 insertions(+), 216 deletions(-) diff --git a/README.md b/README.md index 3772c125f..c06cfa935 100644 --- a/README.md +++ b/README.md @@ -212,6 +212,41 @@ originating from tasks on the dispatcher queue. It is however possible that `Defect` that happen in tasks bubble up through `poll` as these are not caught by the transformation. +#### Checked exceptions + +By specifying a `asyncraises` list to an async procedure, you can check which +exceptions can be thrown by it. +```nim +proc p1(): Future[void] {.async, asyncraises: [IOError].} = + assert not (compiles do: raise newException(ValueError, "uh-uh")) + raise newException(IOError, "works") # Or any child of IOError +``` + +Under the hood, the return type of `p1` will be rewritten to another type, +which will convey raises informations to await. + +```nim +proc p2(): Future[void] {.async, asyncraises: [IOError].} = + await p1() # Works, because await knows that p1 + # can only raise IOError +``` + +The hidden type (`RaiseTrackingFuture`) is implicitely convertible into a Future. +However, it may causes issues when creating callback or methods +```nim +proc p3(): Future[void] {.async, asyncraises: [IOError].} = + let fut: Future[void] = p1() # works + assert not compiles(await fut) # await lost informations about raises, + # so it can raise anything + # Callbacks + assert not(compiles do: let cb1: proc(): Future[void] = p1) # doesn't work + let cb2: proc(): Future[void] {.async, asyncraises: [IOError].} = p1 # works + assert not(compiles do: + type c = proc(): Future[void] {.async, asyncraises: [IOError, ValueError].} + let cb3: c = p1 # doesn't work, the raises must match _exactly_ + ) +``` + ### Platform independence Several functions in `chronos` are backed by the operating system, such as diff --git a/chronos.nimble b/chronos.nimble index e9c1b11db..f9e261700 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -7,7 +7,7 @@ description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" skipDirs = @["tests"] -requires "nim >= 1.2.0", +requires "nim >= 1.6.0", "stew", "bearssl", "httputils", diff --git a/chronos/asyncfutures2.nim b/chronos/asyncfutures2.nim index 9674888c1..5a1383a84 100644 --- a/chronos/asyncfutures2.nim +++ b/chronos/asyncfutures2.nim @@ -8,7 +8,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import std/sequtils +import std/[sequtils, macros] import stew/base10 when chronosStackTrace: @@ -35,6 +35,12 @@ func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {. else: raiseAssert("Unknown source location " & $v) type + InternalRaisesFuture*[T, E] = ref object of Future[T] + ## Future with a tuple of possible exception types + ## eg InternalRaisesFuture[void, (ValueError, OSError)] + ## Will be injected by `asyncraises`, should generally + ## not be used manually + FutureStr*[T] = ref object of Future[T] ## Future to hold GC strings gcholder*: string @@ -59,6 +65,11 @@ proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] = internalInitFutureBase(fut, loc, FutureState.Pending, flags) fut +proc newInternalRaisesFutureImpl[T, E](loc: ptr SrcLoc): InternalRaisesFuture[T, E] = + let fut = InternalRaisesFuture[T, E]() + internalInitFutureBase(fut, loc, FutureState.Pending, {}) + fut + proc newFutureSeqImpl[A, B](loc: ptr SrcLoc): FutureSeq[A, B] = let fut = FutureSeq[A, B]() internalInitFutureBase(fut, loc, FutureState.Pending, {}) @@ -70,12 +81,28 @@ proc newFutureStrImpl[T](loc: ptr SrcLoc): FutureStr[T] = fut template newFuture*[T](fromProc: static[string] = "", - flags: static[FutureFlags] = {}): Future[T] = + flags: static[FutureFlags] = {}): auto = ## Creates a new future. ## ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. - newFutureImpl[T](getSrcLocation(fromProc), flags) + when declared(InternalRaisesFutureRaises): # injected by `asyncraises` + newInternalRaisesFutureImpl[T, InternalRaisesFutureRaises](getSrcLocation(fromProc)) + else: + newFutureImpl[T](getSrcLocation(fromProc), flags) + +macro getFutureExceptions(T: typedesc): untyped = + if getTypeInst(T)[1].len > 2: + getTypeInst(T)[1][2] + else: + ident"void" + +template newInternalRaisesFuture*[T](fromProc: static[string] = ""): auto = + ## Creates a new future. + ## + ## Specifying ``fromProc``, which is a string specifying the name of the proc + ## that this future belongs to, is a good habit as it helps with debugging. + newInternalRaisesFutureImpl[T, getFutureExceptions(typeof(result))](getSrcLocation(fromProc)) template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] = ## Create a new future which can hold/preserve GC sequence until future will @@ -188,6 +215,49 @@ template fail*(future: FutureBase, error: ref CatchableError) = ## Completes ``future`` with ``error``. fail(future, error, getSrcLocation()) +macro checkFailureType(future, error: typed): untyped = + let e = getTypeInst(future)[2] + let types = getType(e) + + if types.eqIdent("void"): + error("Can't raise exceptions on this Future") + + expectKind(types, nnkBracketExpr) + expectKind(types[0], nnkSym) + assert types[0].strVal == "tuple" + assert types.len > 1 + + expectKind(getTypeInst(error), nnkRefTy) + let toMatch = getTypeInst(error)[0] + + # Can't find a way to check `is` in the macro. (sameType doesn't + # work for inherited objects). Dirty hack here, for [IOError, OSError], + # this will generate: + # + # static: + # if not((`toMatch` is IOError) or (`toMatch` is OSError) + # or (`toMatch` is CancelledError) or false): + # raiseAssert("Can't fail with `toMatch`, only [IOError, OSError] is allowed") + var typeChecker = ident"false" + + for errorType in types[1..^1]: + typeChecker = newCall("or", typeChecker, newCall("is", toMatch, errorType)) + typeChecker = newCall( + "or", typeChecker, + newCall("is", toMatch, ident"CancelledError")) + + let errorMsg = "Can't fail with " & repr(toMatch) & ". Only " & repr(types[1..^1]) & " allowed" + + result = nnkStaticStmt.newNimNode(lineInfoFrom=error).add( + quote do: + if not(`typeChecker`): + raiseAssert(`errorMsg`) + ) + +template fail*[T, E](future: InternalRaisesFuture[T, E], error: ref CatchableError) = + checkFailureType(future, error) + fail(future, error, getSrcLocation()) + template newCancelledError(): ref CancelledError = (ref CancelledError)(msg: "Future operation cancelled!") @@ -429,6 +499,53 @@ proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} = injectStacktrace(fut.internalError) raise fut.internalError +macro internalCheckComplete*(f: InternalRaisesFuture): untyped = + # For InternalRaisesFuture[void, (ValueError, OSError), will do: + # {.cast(raises: [ValueError, OSError]).}: + # if isNil(f.error): discard + # else: raise f.error + let e = getTypeInst(f)[2] + let types = getType(e) + + if types.eqIdent("void"): + return quote do: + if not(isNil(`f`.internalError)): + raiseAssert("Unhandled future exception: " & `f`.error.msg) + + expectKind(types, nnkBracketExpr) + expectKind(types[0], nnkSym) + assert types[0].strVal == "tuple" + assert types.len > 1 + + let ifRaise = nnkIfExpr.newTree( + nnkElifExpr.newTree( + quote do: isNil(`f`.internalError), + quote do: discard + ), + nnkElseExpr.newTree( + nnkRaiseStmt.newNimNode(lineInfoFrom=f).add( + quote do: (`f`.internalError) + ) + ) + ) + + nnkPragmaBlock.newTree( + nnkPragma.newTree( + nnkCast.newTree( + newEmptyNode(), + nnkExprColonExpr.newTree( + ident"raises", + block: + var res = nnkBracket.newTree() + for r in types[1..^1]: + res.add(r) + res + ) + ), + ), + ifRaise + ) + proc read*[T: not void](future: Future[T] ): lent T {.raises: [CatchableError].} = ## Retrieves the value of ``future``. Future must be finished otherwise ## this function will fail with a ``ValueError`` exception. @@ -452,6 +569,29 @@ proc read*(future: Future[void] ) {.raises: [CatchableError].} = # TODO: Make a custom exception type for this? raise newException(ValueError, "Future still in progress.") +proc read*[T: not void, E](future: InternalRaisesFuture[T, E] ): lent T = + ## Retrieves the value of ``future``. Future must be finished otherwise + ## this function will fail with a ``ValueError`` exception. + ## + ## If the result of the future is an error then that error will be raised. + if not future.finished(): + # TODO: Make a custom exception type for this? + raise newException(ValueError, "Future still in progress.") + + internalCheckComplete(future) + future.internalValue + +proc read*[E](future: InternalRaisesFuture[void, E]) = + ## Retrieves the value of ``future``. Future must be finished otherwise + ## this function will fail with a ``ValueError`` exception. + ## + ## If the result of the future is an error then that error will be raised. + if future.finished(): + internalCheckComplete(future) + else: + # TODO: Make a custom exception type for this? + raise newException(ValueError, "Future still in progress.") + proc readError*(future: FutureBase): ref CatchableError {.raises: [ValueError].} = ## Retrieves the exception stored in ``future``. ## diff --git a/chronos/asyncmacro2.nim b/chronos/asyncmacro2.nim index d05940432..499f847e8 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/asyncmacro2.nim @@ -2,108 +2,144 @@ # # Nim's Runtime Library # (c) Copyright 2015 Dominik Picheta +# (c) Copyright 2018-Present Status Research & Development GmbH # # See the file "copying.txt", included in this # distribution, for details about the copyright. # -import std/[macros] +import std/algorithm proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} = - #echo(node.treeRepr) case node.kind of nnkReturnStmt: + # `return ...` -> `setResult(...); return` let res = newNimNode(nnkStmtList, node) if node[0].kind != nnkEmpty: res.add newCall(setResultSym, processBody(node[0], setResultSym, baseType)) - res.add newNimNode(nnkReturnStmt, node).add(newNilLit()) + res.add newNimNode(nnkReturnStmt, node).add(newEmptyNode()) res of RoutineNodes-{nnkTemplateDef}: - # skip all the nested procedure definitions + # Skip nested routines since they have their own return value distinct from + # the Future we inject node else: for i in 0 ..< node.len: - # We must not transform nested procedures of any form, since their - # returns are not meant for our futures node[i] = processBody(node[i], setResultSym, baseType) node -proc wrapInTryFinally(fut, baseType, body: NimNode): NimNode {.compileTime.} = +proc wrapInTryFinally(fut, baseType, body, raisesTuple: NimNode): NimNode {.compileTime.} = # creates: - # var closureSucceeded = true # try: `body` - # except CancelledError: closureSucceeded = false; `castFutureSym`.cancelAndSchedule() - # except CatchableError as exc: closureSucceeded = false; `castFutureSym`.fail(exc) - # except Defect as exc: - # closureSucceeded = false - # raise exc + # [for raise in raisesTuple]: + # except `raise`: closureSucceeded = false; `castFutureSym`.fail(exc) # finally: # if closureSucceeded: # `castFutureSym`.complete(result) - - # we are completing inside finally to make sure the completion happens even - # after a `return` - let closureSucceeded = genSym(nskVar, "closureSucceeded") - var nTry = nnkTryStmt.newTree(body) - nTry.add nnkExceptBranch.newTree( - ident"CancelledError", - nnkStmtList.newTree( - nnkAsgn.newTree(closureSucceeded, ident"false"), - newCall(ident "cancelAndSchedule", fut) - ) - ) - - nTry.add nnkExceptBranch.newTree( - nnkInfix.newTree(ident"as", ident"CatchableError", ident"exc"), - nnkStmtList.newTree( - nnkAsgn.newTree(closureSucceeded, ident"false"), - newCall(ident "fail", fut, ident"exc") - ) - ) - - nTry.add nnkExceptBranch.newTree( - nnkInfix.newTree(ident"as", ident"Defect", ident"exc"), + # + # Calling `complete` inside `finally` ensures that all success paths + # (including early returns and code inside nested finally statements and + # defer) are completed with the final contents of `result` + let + closureSucceeded = genSym(nskVar, "closureSucceeded") + nTry = nnkTryStmt.newTree(body) + excName = ident"exc" + + # Depending on the exception type, we must have at most one of each of these + # "special" exception handlers that are needed to implement cancellation and + # Defect propagation + var + hasDefect = false + hasCancelledError = false + hasCatchableError = false + + template addDefect = + if not hasDefect: + hasDefect = true + # When a Defect is raised, the program is in an undefined state and + # continuing running other tasks while the Future completion sits on the + # callback queue may lead to further damage so we re-raise them eagerly. + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", ident"Defect", excName), nnkStmtList.newTree( nnkAsgn.newTree(closureSucceeded, ident"false"), - nnkRaiseStmt.newTree(ident"exc") + nnkRaiseStmt.newTree(excName) ) ) + template addCancelledError = + if not hasCancelledError: + hasCancelledError = true + nTry.add nnkExceptBranch.newTree( + ident"CancelledError", + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + newCall(ident "cancelAndSchedule", fut) + ) + ) - when not chronosStrictException: - # adds - # except Exception as exc: - # closureSucceeded = false - # fut.fail((ref ValueError)(msg: exc.msg, parent: exc)) - let excName = ident"exc" - - nTry.add nnkExceptBranch.newTree( - nnkInfix.newTree(ident"as", ident"Exception", ident"exc"), - nnkStmtList.newTree( - nnkAsgn.newTree(closureSucceeded, ident"false"), + template addCatchableError = + if not hasCatchableError: + hasCatchableError = true + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", ident"CatchableError", excName), + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + newCall(ident "fail", fut, excName) + )) + + for exc in raisesTuple: + if exc.eqIdent("Exception"): + addCancelledError + addCatchableError + addDefect + + # Because we store `CatchableError` in the Future, we cannot re-raise the + # original exception + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", ident"Exception", excName), newCall(ident "fail", fut, - quote do: (ref ValueError)(msg: `excName`.msg, parent: `excName`)), + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + quote do: (ref ValueError)(msg: `excName`.msg, parent: `excName`))) ) - ) + elif exc.eqIdent("CancelledError"): + addCancelledError + elif exc.eqIdent("CatchableError"): + # Ensure cancellations are re-routed to the cancellation handler even if + # not explicitly specified in the raises list + addCancelledError + addCatchableError + else: + nTry.add nnkExceptBranch.newTree( + nnkInfix.newTree(ident"as", exc, excName), + nnkStmtList.newTree( + nnkAsgn.newTree(closureSucceeded, ident"false"), + newCall(ident "fail", fut, excName) + )) nTry.add nnkFinally.newTree( - nnkIfStmt.newTree( - nnkElifBranch.newTree( - closureSucceeded, - nnkWhenStmt.newTree( - nnkElifExpr.newTree( - nnkInfix.newTree(ident "is", baseType, ident "void"), - newCall(ident "complete", fut) - ), - nnkElseExpr.newTree( - newCall(ident "complete", fut, ident "result") - ) - ) - ) + nnkIfStmt.newTree( + nnkElifBranch.newTree( + closureSucceeded, + if baseType.eqIdent("void"): # shortcut for non-generic void + newCall(ident "complete", fut) + else: + nnkWhenStmt.newTree( + nnkElifExpr.newTree( + nnkInfix.newTree(ident "is", baseType, ident "void"), + newCall(ident "complete", fut) + ), + nnkElseExpr.newTree( + newCall(ident "complete", fut, ident "result") ) ) - return nnkStmtList.newTree( + ) + ) + ) + + nnkStmtList.newTree( newVarStmt(closureSucceeded, ident"true"), nTry ) @@ -144,6 +180,54 @@ proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} = for child in node: result.add(cleanupOpenSymChoice(child)) +proc getAsyncCfg(prc: NimNode): tuple[raises: bool, async: bool, raisesTuple: NimNode] = + # reads the pragmas to extract the useful data + # and removes them + var + foundRaises = -1 + foundAsync = -1 + + for index, pragma in pragma(prc): + if pragma.kind == nnkExprColonExpr and pragma[0] == ident "asyncraises": + foundRaises = index + elif pragma.eqIdent("async"): + foundAsync = index + elif pragma.kind == nnkExprColonExpr and pragma[0] == ident "raises": + warning("The raises pragma doesn't work on async procedure. " & + "Please remove it or use asyncraises instead") + + result.raises = foundRaises >= 0 + result.async = foundAsync >= 0 + result.raisesTuple = nnkTupleConstr.newTree() + + if foundRaises >= 0: + for possibleRaise in pragma(prc)[foundRaises][1]: + result.raisesTuple.add(possibleRaise) + if result.raisesTuple.len == 0: + result.raisesTuple = ident("void") + else: + when defined(chronosWarnMissingRaises): + warning("Async proc miss asyncraises") + const defaultException = + when defined(chronosStrictException): "CatchableError" + else: "Exception" + result.raisesTuple.add(ident(defaultException)) + + let toRemoveList = @[foundRaises, foundAsync].filterIt(it >= 0).sorted().reversed() + for toRemove in toRemoveList: + pragma(prc).del(toRemove) + +proc isEmpty(n: NimNode): bool {.compileTime.} = + # true iff node recursively contains only comments or empties + case n.kind + of nnkEmpty, nnkCommentStmt: true + of nnkStmtList: + for child in n: + if not isEmpty(child): return false + true + else: + false + proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = ## This macro transforms a single procedure into a closure iterator. ## The ``async`` macro supports a stmtList holding multiple async procedures. @@ -158,7 +242,8 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = if returnType.kind == nnkEmpty: ident "void" elif not ( - returnType.kind == nnkBracketExpr and eqIdent(returnType[0], "Future")): + returnType.kind == nnkBracketExpr and + (eqIdent(returnType[0], "Future") or eqIdent(returnType[0], "InternalRaisesFuture"))): error( "Expected return type of 'Future' got '" & repr(returnType) & "'", prc) return @@ -168,77 +253,111 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = let baseTypeIsVoid = baseType.eqIdent("void") futureVoidType = nnkBracketExpr.newTree(ident "Future", ident "void") + (hasRaises, isAsync, raisesTuple) = getAsyncCfg(prc) + + if hasRaises: + # Store `asyncraises` types in InternalRaisesFuture + prc.params2[0] = nnkBracketExpr.newTree( + newIdentNode("InternalRaisesFuture"), + baseType, + raisesTuple + ) + elif baseTypeIsVoid: + # Adds the implicit Future[void] + prc.params2[0] = + newNimNode(nnkBracketExpr, prc). + add(newIdentNode("Future")). + add(newIdentNode("void")) - if prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}: - let - prcName = prc.name.getName - outerProcBody = newNimNode(nnkStmtList, prc.body) + if prc.kind notin {nnkProcTy, nnkLambda}: # TODO: Nim bug? + prc.addPragma(newColonExpr(ident "stackTrace", ident "off")) - # Copy comment for nimdoc - if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt: - outerProcBody.add(prc.body[0]) + # The proc itself doesn't raise + prc.addPragma( + nnkExprColonExpr.newTree(newIdentNode("raises"), nnkBracket.newTree())) + + # `gcsafe` isn't deduced even though we require async code to be gcsafe + # https://github.com/nim-lang/RFCs/issues/435 + prc.addPragma(newIdentNode("gcsafe")) + + if isAsync == false: # `asyncraises` without `async` + # type InternalRaisesFutureRaises = `raisesTuple` + # `body` + prc.body = nnkStmtList.newTree( + nnkTypeSection.newTree( + nnkTypeDef.newTree( + ident"InternalRaisesFutureRaises", + newEmptyNode(), + raisesTuple + ) + ), + prc.body + ) + return prc + + if prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo} and + not isEmpty(prc.body): + # don't do anything with forward bodies (empty) let + prcName = prc.name.getName + setResultSym = ident "setResult" + procBody = prc.body.processBody(setResultSym, baseType) internalFutureSym = ident "chronosInternalRetFuture" internalFutureType = if baseTypeIsVoid: futureVoidType else: returnType castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym) - setResultSym = ident"setResult" - - procBody = prc.body.processBody(setResultSym, baseType) - - # don't do anything with forward bodies (empty) - if procBody.kind != nnkEmpty: - let - # fix #13899, `defer` should not escape its original scope - procBodyBlck = nnkBlockStmt.newTree(newEmptyNode(), procBody) - - resultDecl = nnkWhenStmt.newTree( - # when `baseType` is void: - nnkElifExpr.newTree( - nnkInfix.newTree(ident "is", baseType, ident "void"), - quote do: - template result: auto {.used.} = - {.fatal: "You should not reference the `result` variable inside" & - " a void async proc".} - ), - # else: - nnkElseExpr.newTree( - newStmtList( - quote do: {.push warning[resultshadowed]: off.}, - # var result {.used.}: `baseType` - # In the proc body, result may or may not end up being used - # depending on how the body is written - with implicit returns / - # expressions in particular, it is likely but not guaranteed that - # it is not used. Ideally, we would avoid emitting it in this - # case to avoid the default initializaiton. {.used.} typically - # works better than {.push.} which has a tendency to leak out of - # scope. - # TODO figure out if there's a way to detect `result` usage in - # the proc body _after_ template exapnsion, and therefore - # avoid creating this variable - one option is to create an - # addtional when branch witha fake `result` and check - # `compiles(procBody)` - this is not without cost though - nnkVarSection.newTree(nnkIdentDefs.newTree( - nnkPragmaExpr.newTree( - ident "result", - nnkPragma.newTree(ident "used")), - baseType, newEmptyNode()) - ), - quote do: {.pop.}, - ) + resultIdent = ident "result" + + resultDecl = nnkWhenStmt.newTree( + # when `baseType` is void: + nnkElifExpr.newTree( + nnkInfix.newTree(ident "is", baseType, ident "void"), + quote do: + template result: auto {.used.} = + {.fatal: "You should not reference the `result` variable inside" & + " a void async proc".} + ), + # else: + nnkElseExpr.newTree( + newStmtList( + quote do: {.push warning[resultshadowed]: off.}, + # var result {.used.}: `baseType` + # In the proc body, result may or may not end up being used + # depending on how the body is written - with implicit returns / + # expressions in particular, it is likely but not guaranteed that + # it is not used. Ideally, we would avoid emitting it in this + # case to avoid the default initializaiton. {.used.} typically + # works better than {.push.} which has a tendency to leak out of + # scope. + # TODO figure out if there's a way to detect `result` usage in + # the proc body _after_ template exapnsion, and therefore + # avoid creating this variable - one option is to create an + # addtional when branch witha fake `result` and check + # `compiles(procBody)` - this is not without cost though + nnkVarSection.newTree(nnkIdentDefs.newTree( + nnkPragmaExpr.newTree( + resultIdent, + nnkPragma.newTree(ident "used")), + baseType, newEmptyNode()) + ), + quote do: {.pop.}, ) ) + ) - # generates: - # template `setResultSym`(code: untyped) {.used.} = - # when typeof(code) is void: code - # else: result = code - # - # this is useful to handle implicit returns, but also - # to bind the `result` to the one we declare here - setResultDecl = + # generates: + # template `setResultSym`(code: untyped) {.used.} = + # when typeof(code) is void: code + # else: `resultIdent` = code + # + # this is useful to handle implicit returns, but also + # to bind the `result` to the one we declare here + setResultDecl = + if baseTypeIsVoid: # shortcut for non-generic void + newEmptyNode() + else: nnkTemplateDef.newTree( setResultSym, newEmptyNode(), newEmptyNode(), @@ -254,107 +373,91 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = newEmptyNode(), nnkWhenStmt.newTree( nnkElifBranch.newTree( - nnkInfix.newTree(ident"is", nnkTypeOfExpr.newTree(ident"code"), ident"void"), + nnkInfix.newTree( + ident"is", nnkTypeOfExpr.newTree(ident"code"), ident"void"), ident"code" ), nnkElse.newTree( - newAssignment(ident"result", ident"code") + newAssignment(resultIdent, ident"code") ) ) ) - completeDecl = wrapInTryFinally( - castFutureSym, baseType, - newCall(setResultSym, procBodyBlck) - ) - - closureBody = newStmtList(resultDecl, setResultDecl, completeDecl) - - internalFutureParameter = nnkIdentDefs.newTree( - internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) - iteratorNameSym = genSym(nskIterator, $prcName) - closureIterator = newProc( - iteratorNameSym, - [newIdentNode("FutureBase"), internalFutureParameter], - closureBody, nnkIteratorDef) + # Wrapping in try/finally ensures that early returns are handled properly + # and that `defer` is processed in the right scope + completeDecl = wrapInTryFinally( + castFutureSym, baseType, + if baseTypeIsVoid: procBody # shortcut for non-generic `void` + else: newCall(setResultSym, procBody), + raisesTuple + ) - iteratorNameSym.copyLineInfo(prc) + closureBody = newStmtList(resultDecl, setResultDecl, completeDecl) - closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body) - closureIterator.addPragma(newIdentNode("closure")) + internalFutureParameter = nnkIdentDefs.newTree( + internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) + iteratorNameSym = genSym(nskIterator, $prcName) + closureIterator = newProc( + iteratorNameSym, + [newIdentNode("FutureBase"), internalFutureParameter], + closureBody, nnkIteratorDef) - # `async` code must be gcsafe - closureIterator.addPragma(newIdentNode("gcsafe")) + outerProcBody = newNimNode(nnkStmtList, prc.body) - # TODO when push raises is active in a module, the iterator here inherits - # that annotation - here we explicitly disable it again which goes - # against the spirit of the raises annotation - one should investigate - # here the possibility of transporting more specific error types here - # for example by casting exceptions coming out of `await`.. - let raises = nnkBracket.newTree() + # Copy comment for nimdoc + if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt: + outerProcBody.add(prc.body[0]) - closureIterator.addPragma(nnkExprColonExpr.newTree( - newIdentNode("raises"), - raises - )) + iteratorNameSym.copyLineInfo(prc) - # If proc has an explicit gcsafe pragma, we add it to iterator as well. - # TODO if these lines are not here, srcloc tests fail (!) - if prc.pragma.findChild(it.kind in {nnkSym, nnkIdent} and - it.strVal == "gcsafe") != nil: - closureIterator.addPragma(newIdentNode("gcsafe")) - - outerProcBody.add(closureIterator) - - # -> let resultFuture = newFuture[T]() - # declared at the end to be sure that the closure - # doesn't reference it, avoid cyclic ref (#203) - let - retFutureSym = ident "resultFuture" - retFutureSym.copyLineInfo(prc) - # Do not change this code to `quote do` version because `instantiationInfo` - # will be broken for `newFuture()` call. - outerProcBody.add( - newLetStmt( - retFutureSym, - newCall(newTree(nnkBracketExpr, ident "newFuture", baseType), - newLit(prcName)) - ) - ) - # -> resultFuture.internalClosure = iterator - outerProcBody.add( - newAssignment( - newDotExpr(retFutureSym, newIdentNode("internalClosure")), - iteratorNameSym) - ) + closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body) + closureIterator.addPragma(newIdentNode("closure")) - # -> futureContinue(resultFuture)) - outerProcBody.add( - newCall(newIdentNode("futureContinue"), retFutureSym) - ) + # `async` code must be gcsafe + closureIterator.addPragma(newIdentNode("gcsafe")) - # -> return resultFuture - outerProcBody.add newNimNode(nnkReturnStmt, prc.body[^1]).add(retFutureSym) + # Exceptions are caught inside the iterator and stored in the future + closureIterator.addPragma(nnkExprColonExpr.newTree( + newIdentNode("raises"), + nnkBracket.newTree() + )) - prc.body = outerProcBody + outerProcBody.add(closureIterator) - if prc.kind notin {nnkProcTy, nnkLambda}: # TODO: Nim bug? - prc.addPragma(newColonExpr(ident "stackTrace", ident "off")) + # -> let resultFuture = newInternalRaisesFuture[T]() + # declared at the end to be sure that the closure + # doesn't reference it, avoid cyclic ref (#203) + let + retFutureSym = ident "resultFuture" + retFutureSym.copyLineInfo(prc) + # Do not change this code to `quote do` version because `instantiationInfo` + # will be broken for `newFuture()` call. + outerProcBody.add( + newLetStmt( + retFutureSym, + newCall(newTree(nnkBracketExpr, ident "newInternalRaisesFuture", baseType), + newLit(prcName)) + ) + ) + # -> resultFuture.internalClosure = iterator + outerProcBody.add( + newAssignment( + newDotExpr(retFutureSym, newIdentNode("internalClosure")), + iteratorNameSym) + ) - # See **Remark 435** in this file. - # https://github.com/nim-lang/RFCs/issues/435 - prc.addPragma(newIdentNode("gcsafe")) + # -> futureContinue(resultFuture)) + outerProcBody.add( + newCall(newIdentNode("futureContinue"), retFutureSym) + ) - prc.addPragma(nnkExprColonExpr.newTree( - newIdentNode("raises"), - nnkBracket.newTree() - )) + # -> return resultFuture + outerProcBody.add newNimNode(nnkReturnStmt, prc.body[^1]).add(retFutureSym) - if baseTypeIsVoid: - if returnType.kind == nnkEmpty: - # Add Future[void] - prc.params2[0] = futureVoidType + prc.body = outerProcBody + when chronosDumpAsync: + echo repr prc prc template await*[T](f: Future[T]): untyped = @@ -365,7 +468,8 @@ template await*[T](f: Future[T]): untyped = # responsible for resuming execution once the yielded future is finished yield chronosInternalRetFuture.internalChild # `child` released by `futureContinue` - chronosInternalRetFuture.internalChild.internalCheckComplete() + cast[type(f)](chronosInternalRetFuture.internalChild).internalCheckComplete() + when T isnot void: cast[type(f)](chronosInternalRetFuture.internalChild).value() else: @@ -385,8 +489,26 @@ macro async*(prc: untyped): untyped = if prc.kind == nnkStmtList: result = newStmtList() for oneProc in prc: + oneProc.addPragma(ident"async") result.add asyncSingleProc(oneProc) else: + prc.addPragma(ident"async") + result = asyncSingleProc(prc) + +macro asyncraises*(possibleExceptions, prc: untyped): untyped = + # Add back the pragma and let asyncSingleProc handle it + # Exerimental / subject to change and/or removal + if prc.kind == nnkStmtList: + result = newStmtList() + for oneProc in prc: + oneProc.addPragma(nnkExprColonExpr.newTree( + ident"asyncraises", + possibleExceptions + )) + result.add asyncSingleProc(oneProc) + else: + prc.addPragma(nnkExprColonExpr.newTree( + ident"asyncraises", + possibleExceptions + )) result = asyncSingleProc(prc) - when chronosDumpAsync: - echo repr result diff --git a/tests/testfut.nim b/tests/testfut.nim index bc61594b8..fc9d48288 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1223,11 +1223,11 @@ suite "Future[T] behavior test suite": test "location test": # WARNING: This test is very sensitive to line numbers and module name. - proc macroFuture() {.async.} = # LINE POSITION 1 - let someVar {.used.} = 5 # LINE POSITION 2 + proc macroFuture() {.async.} = + let someVar {.used.} = 5 # LINE POSITION 1 let someOtherVar {.used.} = 4 if true: - let otherVar {.used.} = 3 + let otherVar {.used.} = 3 # LINE POSITION 2 template templateFuture(): untyped = newFuture[void]("template") @@ -1260,8 +1260,8 @@ suite "Future[T] behavior test suite": (loc.procedure == procedure) check: - chk(loc10, "testfut.nim", 1226, "macroFuture") - chk(loc11, "testfut.nim", 1227, "") + chk(loc10, "testfut.nim", 1227, "macroFuture") + chk(loc11, "testfut.nim", 1230, "") chk(loc20, "testfut.nim", 1239, "template") chk(loc21, "testfut.nim", 1242, "") chk(loc30, "testfut.nim", 1236, "procedure") diff --git a/tests/testmacro.nim b/tests/testmacro.nim index bd53078ad..2d95a7fad 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -151,6 +151,10 @@ suite "Macro transformations test suite": check waitFor(nr()) == 42 +# There are a few unreacheable statements to ensure that we don't regress in +# generated code +{.push warning[UnreachableCode]: off.} + suite "Macro transformations - completions": test "Run closure to completion on return": # issue #415 var x = 0 @@ -203,6 +207,21 @@ suite "Macro transformations - completions": testWeirdCase() == waitFor(testWeirdCaseAsync()) testWeirdCase() == 55 + test "Correct return value with result assignment in defer": + proc testWeirdCase: int = + defer: + result = 55 + result = 33 + proc testWeirdCaseAsync: Future[int] {.async.} = + defer: + result = 55 + await sleepAsync(1.milliseconds) + return 33 + + check: + testWeirdCase() == waitFor(testWeirdCaseAsync()) + testWeirdCase() == 55 + test "Generic & finally calling async": proc testGeneric(T: type): Future[T] {.async.} = try: @@ -264,6 +283,7 @@ suite "Macro transformations - completions": result = 12 result = await a2() check waitFor(asyncInAsync()) == 12 +{.pop.} suite "Macro transformations - implicit returns": test "Implicit return": @@ -362,3 +382,98 @@ suite "Closure iterator's exception transformation issues": waitFor(x()) +suite "Exceptions tracking": + template checkNotCompiles(body: untyped) = + check (not compiles(body)) + test "Can raise valid exception": + proc test1 {.async.} = raise newException(ValueError, "hey") + proc test2 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") + proc test3 {.async, asyncraises: [IOError, ValueError].} = + if 1 == 2: + raise newException(ValueError, "hey") + else: + raise newException(IOError, "hey") + + proc test4 {.async, asyncraises: [], used.} = raise newException(Defect, "hey") + proc test5 {.async, asyncraises: [].} = discard + proc test6 {.async, asyncraises: [].} = await test5() + + expect(ValueError): waitFor test1() + expect(ValueError): waitFor test2() + expect(IOError): waitFor test3() + waitFor test6() + + test "Cannot raise invalid exception": + checkNotCompiles: + proc test3 {.async, asyncraises: [IOError].} = raise newException(ValueError, "hey") + + test "Explicit return in non-raising proc": + proc test(): Future[int] {.async, asyncraises: [].} = return 12 + check: + waitFor(test()) == 12 + + test "Non-raising compatibility": + proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") + let testVar: Future[void] = test1() + + proc test2 {.async.} = raise newException(ValueError, "hey") + let testVar2: proc: Future[void] = test2 + + # Doesn't work unfortunately + #let testVar3: proc: Future[void] = test1 + + test "Cannot store invalid future types": + proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") + proc test2 {.async, asyncraises: [IOError].} = raise newException(IOError, "hey") + + var a = test1() + checkNotCompiles: + a = test2() + + test "Await raises the correct types": + proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") + proc test2 {.async, asyncraises: [ValueError, CancelledError].} = await test1() + checkNotCompiles: + proc test3 {.async, asyncraises: [CancelledError].} = await test1() + + test "Can create callbacks": + proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") + let callback: proc() {.async, asyncraises: [ValueError].} = test1 + + test "Can return values": + proc test1: Future[int] {.async, asyncraises: [ValueError].} = + if 1 == 0: raise newException(ValueError, "hey") + return 12 + proc test2: Future[int] {.async, asyncraises: [ValueError, IOError, CancelledError].} = + return await test1() + + checkNotCompiles: + proc test3: Future[int] {.async, asyncraises: [CancelledError].} = await test1() + + check waitFor(test2()) == 12 + + test "Manual tracking": + proc test1: Future[int] {.asyncraises: [ValueError].} = + result = newFuture[int]() + result.complete(12) + check waitFor(test1()) == 12 + + proc test2: Future[int] {.asyncraises: [IOError, OSError].} = + result = newFuture[int]() + result.fail(newException(IOError, "fail")) + result.fail(newException(OSError, "fail")) + checkNotCompiles: + result.fail(newException(ValueError, "fail")) + + proc test3: Future[void] {.asyncraises: [].} = + checkNotCompiles: + result.fail(newException(ValueError, "fail")) + + # Inheritance + proc test4: Future[void] {.asyncraises: [CatchableError].} = + result.fail(newException(IOError, "fail")) + + test "Reversed async, asyncraises": + proc test44 {.asyncraises: [ValueError], async.} = raise newException(ValueError, "hey") + checkNotCompiles: + proc test33 {.asyncraises: [IOError], async.} = raise newException(ValueError, "hey") From be9eef7a091da00720c8cf5c03f77d418c7c2b8a Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 17 Oct 2023 14:19:20 +0200 Subject: [PATCH 069/146] move test data to c file (#448) * move test data to c file allows compiling with nlvm * more nlvm compat --- chronos/ioselects/ioselectors_epoll.nim | 2 +- tests/testasyncstream.c | 63 ++++++++++++++++++++++ tests/testasyncstream.nim | 69 ++----------------------- 3 files changed, 68 insertions(+), 66 deletions(-) create mode 100644 tests/testasyncstream.c diff --git a/chronos/ioselects/ioselectors_epoll.nim b/chronos/ioselects/ioselectors_epoll.nim index 161a5dfbe..2156a390c 100644 --- a/chronos/ioselects/ioselectors_epoll.nim +++ b/chronos/ioselects/ioselectors_epoll.nim @@ -411,7 +411,7 @@ proc registerProcess*[T](s: Selector, pid: int, data: T): SelectResult[cint] = s.freeKey(fdi32) s.freeProcess(int32(pid)) return err(res.error()) - s.pidFd = Opt.some(cast[cint](res.get())) + s.pidFd = Opt.some(res.get()) ok(cint(fdi32)) diff --git a/tests/testasyncstream.c b/tests/testasyncstream.c new file mode 100644 index 000000000..ecab9a9cf --- /dev/null +++ b/tests/testasyncstream.c @@ -0,0 +1,63 @@ +#include + +// This is the X509TrustAnchor for the SelfSignedRsaCert above +// Generate by doing the following: +// 1. Compile `brssl` from BearSSL +// 2. Run `brssl ta filewithSelfSignedRsaCert.pem` +// 3. Paste the output in the emit block below +// 4. Rename `TAs` to `SelfSignedTAs` + +static const unsigned char TA0_DN[] = { + 0x30, 0x5F, 0x31, 0x0B, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, + 0x0C, 0x0A, 0x53, 0x6F, 0x6D, 0x65, 0x2D, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1F, 0x06, 0x03, 0x55, 0x04, 0x0A, 0x0C, 0x18, 0x49, + 0x6E, 0x74, 0x65, 0x72, 0x6E, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4C, 0x74, 0x64, 0x31, + 0x18, 0x30, 0x16, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0C, 0x0F, 0x31, 0x32, + 0x37, 0x2E, 0x30, 0x2E, 0x30, 0x2E, 0x31, 0x3A, 0x34, 0x33, 0x38, 0x30, + 0x38 +}; + +static const unsigned char TA0_RSA_N[] = { + 0xA7, 0xEE, 0xD5, 0xC6, 0x2C, 0xA3, 0x08, 0x33, 0x33, 0x86, 0xB5, 0x5C, + 0xD4, 0x8B, 0x16, 0xB1, 0xD7, 0xF7, 0xED, 0x95, 0x22, 0xDC, 0xA4, 0x40, + 0x24, 0x64, 0xC3, 0x91, 0xBA, 0x20, 0x82, 0x9D, 0x88, 0xED, 0x20, 0x98, + 0x46, 0x65, 0xDC, 0xD1, 0x15, 0x90, 0xBC, 0x7C, 0x19, 0x5F, 0x00, 0x96, + 0x69, 0x2C, 0x80, 0x0E, 0x7D, 0x7D, 0x8B, 0xD9, 0xFD, 0x49, 0x66, 0xEC, + 0x29, 0xC0, 0x39, 0x0E, 0x22, 0xF3, 0x6A, 0x28, 0xC0, 0x6B, 0x97, 0x93, + 0x2F, 0x92, 0x5E, 0x5A, 0xCC, 0xF4, 0xF4, 0xAE, 0xD9, 0xE3, 0xBB, 0x0A, + 0xDC, 0xA8, 0xDE, 0x4D, 0x16, 0xD6, 0xE6, 0x64, 0xF2, 0x85, 0x62, 0xF6, + 0xE3, 0x7B, 0x1D, 0x9A, 0x5C, 0x6A, 0xA3, 0x97, 0x93, 0x16, 0x9D, 0x02, + 0x2C, 0xFD, 0x90, 0x3E, 0xF8, 0x35, 0x44, 0x5E, 0x66, 0x8D, 0xF6, 0x80, + 0xF1, 0x71, 0x9B, 0x2F, 0x44, 0xC0, 0xCA, 0x7E, 0xB1, 0x90, 0x7F, 0xD8, + 0x8B, 0x7A, 0x85, 0x4B, 0xE3, 0xB1, 0xB1, 0xF4, 0xAA, 0x6A, 0x36, 0xA0, + 0xFF, 0x24, 0xB2, 0x27, 0xE0, 0xBA, 0x62, 0x7A, 0xE9, 0x95, 0xC9, 0x88, + 0x9D, 0x9B, 0xAB, 0xA4, 0x4C, 0xEA, 0x87, 0x46, 0xFA, 0xD6, 0x9B, 0x7E, + 0xB2, 0xE9, 0x5B, 0xCA, 0x5B, 0x84, 0xC4, 0xF7, 0xB4, 0xC7, 0x69, 0xC5, + 0x0B, 0x9A, 0x47, 0x9A, 0x86, 0xD4, 0xDF, 0xF3, 0x30, 0xC9, 0x6D, 0xB8, + 0x78, 0x10, 0xEF, 0xA0, 0x89, 0xF8, 0x30, 0x80, 0x9D, 0x96, 0x05, 0x44, + 0xB4, 0xFB, 0x98, 0x4C, 0x71, 0x6B, 0xBC, 0xD7, 0x5D, 0x66, 0x5E, 0x66, + 0xA7, 0x94, 0xE5, 0x65, 0x72, 0x85, 0xBC, 0x7C, 0x7F, 0x11, 0x98, 0xF8, + 0xCB, 0xD5, 0xE2, 0xB5, 0x67, 0x78, 0xF7, 0x49, 0x51, 0xC4, 0x7F, 0xBA, + 0x16, 0x66, 0xD2, 0x15, 0x5B, 0x98, 0x06, 0x03, 0x48, 0xD0, 0x9D, 0xF0, + 0x38, 0x2B, 0x9D, 0x51 +}; + +static const unsigned char TA0_RSA_E[] = { + 0x01, 0x00, 0x01 +}; + +const br_x509_trust_anchor SelfSignedTAs[1] = { + { + { (unsigned char *)TA0_DN, sizeof TA0_DN }, + BR_X509_TA_CA, + { + BR_KEYTYPE_RSA, + { .rsa = { + (unsigned char *)TA0_RSA_N, sizeof TA0_RSA_N, + (unsigned char *)TA0_RSA_E, sizeof TA0_RSA_E, + } } + } + } +}; diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index d90b6887a..c5701bb97 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -73,69 +73,8 @@ N8r5CwGcIX/XPC3lKazzbZ8baA== -----END CERTIFICATE----- """ -# This is the X509TrustAnchor for the SelfSignedRsaCert above -# Generate by doing the following: -# 1. Compile `brssl` from BearSSL -# 2. Run `brssl ta filewithSelfSignedRsaCert.pem` -# 3. Paste the output in the emit block below -# 4. Rename `TAs` to `SelfSignedTAs` -{.emit: """ -static const unsigned char TA0_DN[] = { - 0x30, 0x5F, 0x31, 0x0B, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, - 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, - 0x0C, 0x0A, 0x53, 0x6F, 0x6D, 0x65, 0x2D, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x31, 0x21, 0x30, 0x1F, 0x06, 0x03, 0x55, 0x04, 0x0A, 0x0C, 0x18, 0x49, - 0x6E, 0x74, 0x65, 0x72, 0x6E, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, - 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4C, 0x74, 0x64, 0x31, - 0x18, 0x30, 0x16, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0C, 0x0F, 0x31, 0x32, - 0x37, 0x2E, 0x30, 0x2E, 0x30, 0x2E, 0x31, 0x3A, 0x34, 0x33, 0x38, 0x30, - 0x38 -}; - -static const unsigned char TA0_RSA_N[] = { - 0xA7, 0xEE, 0xD5, 0xC6, 0x2C, 0xA3, 0x08, 0x33, 0x33, 0x86, 0xB5, 0x5C, - 0xD4, 0x8B, 0x16, 0xB1, 0xD7, 0xF7, 0xED, 0x95, 0x22, 0xDC, 0xA4, 0x40, - 0x24, 0x64, 0xC3, 0x91, 0xBA, 0x20, 0x82, 0x9D, 0x88, 0xED, 0x20, 0x98, - 0x46, 0x65, 0xDC, 0xD1, 0x15, 0x90, 0xBC, 0x7C, 0x19, 0x5F, 0x00, 0x96, - 0x69, 0x2C, 0x80, 0x0E, 0x7D, 0x7D, 0x8B, 0xD9, 0xFD, 0x49, 0x66, 0xEC, - 0x29, 0xC0, 0x39, 0x0E, 0x22, 0xF3, 0x6A, 0x28, 0xC0, 0x6B, 0x97, 0x93, - 0x2F, 0x92, 0x5E, 0x5A, 0xCC, 0xF4, 0xF4, 0xAE, 0xD9, 0xE3, 0xBB, 0x0A, - 0xDC, 0xA8, 0xDE, 0x4D, 0x16, 0xD6, 0xE6, 0x64, 0xF2, 0x85, 0x62, 0xF6, - 0xE3, 0x7B, 0x1D, 0x9A, 0x5C, 0x6A, 0xA3, 0x97, 0x93, 0x16, 0x9D, 0x02, - 0x2C, 0xFD, 0x90, 0x3E, 0xF8, 0x35, 0x44, 0x5E, 0x66, 0x8D, 0xF6, 0x80, - 0xF1, 0x71, 0x9B, 0x2F, 0x44, 0xC0, 0xCA, 0x7E, 0xB1, 0x90, 0x7F, 0xD8, - 0x8B, 0x7A, 0x85, 0x4B, 0xE3, 0xB1, 0xB1, 0xF4, 0xAA, 0x6A, 0x36, 0xA0, - 0xFF, 0x24, 0xB2, 0x27, 0xE0, 0xBA, 0x62, 0x7A, 0xE9, 0x95, 0xC9, 0x88, - 0x9D, 0x9B, 0xAB, 0xA4, 0x4C, 0xEA, 0x87, 0x46, 0xFA, 0xD6, 0x9B, 0x7E, - 0xB2, 0xE9, 0x5B, 0xCA, 0x5B, 0x84, 0xC4, 0xF7, 0xB4, 0xC7, 0x69, 0xC5, - 0x0B, 0x9A, 0x47, 0x9A, 0x86, 0xD4, 0xDF, 0xF3, 0x30, 0xC9, 0x6D, 0xB8, - 0x78, 0x10, 0xEF, 0xA0, 0x89, 0xF8, 0x30, 0x80, 0x9D, 0x96, 0x05, 0x44, - 0xB4, 0xFB, 0x98, 0x4C, 0x71, 0x6B, 0xBC, 0xD7, 0x5D, 0x66, 0x5E, 0x66, - 0xA7, 0x94, 0xE5, 0x65, 0x72, 0x85, 0xBC, 0x7C, 0x7F, 0x11, 0x98, 0xF8, - 0xCB, 0xD5, 0xE2, 0xB5, 0x67, 0x78, 0xF7, 0x49, 0x51, 0xC4, 0x7F, 0xBA, - 0x16, 0x66, 0xD2, 0x15, 0x5B, 0x98, 0x06, 0x03, 0x48, 0xD0, 0x9D, 0xF0, - 0x38, 0x2B, 0x9D, 0x51 -}; - -static const unsigned char TA0_RSA_E[] = { - 0x01, 0x00, 0x01 -}; - -static const br_x509_trust_anchor SelfSignedTAs[1] = { - { - { (unsigned char *)TA0_DN, sizeof TA0_DN }, - BR_X509_TA_CA, - { - BR_KEYTYPE_RSA, - { .rsa = { - (unsigned char *)TA0_RSA_N, sizeof TA0_RSA_N, - (unsigned char *)TA0_RSA_E, sizeof TA0_RSA_E, - } } - } - } -}; -""".} -var SelfSignedTrustAnchors {.importc: "SelfSignedTAs", nodecl.}: array[1, X509TrustAnchor] +let SelfSignedTrustAnchors {.importc: "SelfSignedTAs".}: array[1, X509TrustAnchor] +{.compile: "testasyncstream.c".} proc createBigMessage(message: string, size: int): seq[byte] = var res = newSeq[byte](size) @@ -983,7 +922,7 @@ suite "TLSStream test suite": test "Simple server with RSA self-signed certificate": let res = waitFor(checkSSLServer(SelfSignedRsaKey, SelfSignedRsaCert)) check res == true - + test "Custom TrustAnchors test": proc checkTrustAnchors(testMessage: string): Future[string] {.async.} = var key = TLSPrivateKey.init(SelfSignedRsaKey) @@ -1025,7 +964,7 @@ suite "TLSStream test suite": return cast[string](res) let res = waitFor checkTrustAnchors("Some message") check res == "Some message\r\n" - + test "TLSStream leaks test": checkLeaks() From e3c5a86a14ac39f4092ea399b491d15f844bc8dd Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 17 Oct 2023 20:25:25 +0200 Subject: [PATCH 070/146] Introduce chronos/internals, move some code (#453) * Introduce chronos/internals, move some code This PR breaks the include dependencies between `asyncfutures2` and `asyncmacros2` by moving the dispatcher and some other code to a new module. This step makes it easier to implement `asyncraises` support for future utilities like `allFutures` etc avoiding the need to play tricks with include order etc. Future PR:s may further articulate the difference between "internal" stuff subject to API breakage and regular public API intended for end users (rather than advanced integrators). * names * windows fix --- chronos/asyncloop.nim | 1543 +---------------- chronos/internal/asyncengine.nim | 1232 +++++++++++++ .../asyncfutures.nim} | 322 +++- .../asyncmacro.nim} | 4 +- chronos/internal/errors.nim | 5 + 5 files changed, 1564 insertions(+), 1542 deletions(-) create mode 100644 chronos/internal/asyncengine.nim rename chronos/{asyncfutures2.nim => internal/asyncfutures.nim} (80%) rename chronos/{asyncmacro2.nim => internal/asyncmacro.nim} (99%) create mode 100644 chronos/internal/errors.nim diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index fecec39e8..b4d48af4e 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -10,16 +10,6 @@ {.push raises: [].} -from nativesockets import Port -import std/[tables, heapqueue, deques] -import stew/results -import "."/[config, futures, osdefs, oserrno, osutils, timer] - -export Port -export futures, timer, results - -#{.injectStmt: newGcInvariant().} - ## Chronos ## ************* ## @@ -138,1534 +128,7 @@ export futures, timer, results ## ## * The effect system (``raises: []``) does not work with async procedures. -# TODO: Check if yielded future is nil and throw a more meaningful exception - -const - MaxEventsCount* = 64 - -when defined(windows): - import std/[sets, hashes] -elif defined(macosx) or defined(freebsd) or defined(netbsd) or - defined(openbsd) or defined(dragonfly) or defined(macos) or - defined(linux) or defined(android) or defined(solaris): - import "."/selectors2 - export SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, - SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, - SIGPIPE, SIGALRM, SIGTERM, SIGPIPE - export oserrno - -type - AsyncCallback = InternalAsyncCallback - - AsyncError* = object of CatchableError - ## Generic async exception - AsyncTimeoutError* = object of AsyncError - ## Timeout exception - - TimerCallback* = ref object - finishAt*: Moment - function*: AsyncCallback - - TrackerBase* = ref object of RootRef - id*: string - dump*: proc(): string {.gcsafe, raises: [].} - isLeaked*: proc(): bool {.gcsafe, raises: [].} - - TrackerCounter* = object - opened*: uint64 - closed*: uint64 - - PDispatcherBase = ref object of RootRef - timers*: HeapQueue[TimerCallback] - callbacks*: Deque[AsyncCallback] - idlers*: Deque[AsyncCallback] - ticks*: Deque[AsyncCallback] - trackers*: Table[string, TrackerBase] - counters*: Table[string, TrackerCounter] - -proc sentinelCallbackImpl(arg: pointer) {.gcsafe, noreturn.} = - raiseAssert "Sentinel callback MUST not be scheduled" - -const - SentinelCallback = AsyncCallback(function: sentinelCallbackImpl, - udata: nil) - -proc isSentinel(acb: AsyncCallback): bool = - acb == SentinelCallback - -proc `<`(a, b: TimerCallback): bool = - result = a.finishAt < b.finishAt - -func getAsyncTimestamp*(a: Duration): auto {.inline.} = - ## Return rounded up value of duration with milliseconds resolution. - ## - ## This function also take care on int32 overflow, because Linux and Windows - ## accepts signed 32bit integer as timeout. - let milsec = Millisecond.nanoseconds() - let nansec = a.nanoseconds() - var res = nansec div milsec - let mid = nansec mod milsec - when defined(windows): - res = min(int64(high(int32) - 1), res) - result = cast[DWORD](res) - result += DWORD(min(1'i32, cast[int32](mid))) - else: - res = min(int64(high(int32) - 1), res) - result = cast[int32](res) - result += min(1, cast[int32](mid)) - -template processTimersGetTimeout(loop, timeout: untyped) = - var lastFinish = curTime - while loop.timers.len > 0: - if loop.timers[0].function.function.isNil: - discard loop.timers.pop() - continue - - lastFinish = loop.timers[0].finishAt - if curTime < lastFinish: - break - - loop.callbacks.addLast(loop.timers.pop().function) - - if loop.timers.len > 0: - timeout = (lastFinish - curTime).getAsyncTimestamp() - - if timeout == 0: - if (len(loop.callbacks) == 0) and (len(loop.idlers) == 0): - when defined(windows): - timeout = INFINITE - else: - timeout = -1 - else: - if (len(loop.callbacks) != 0) or (len(loop.idlers) != 0): - timeout = 0 - -template processTimers(loop: untyped) = - var curTime = Moment.now() - while loop.timers.len > 0: - if loop.timers[0].function.function.isNil: - discard loop.timers.pop() - continue - - if curTime < loop.timers[0].finishAt: - break - loop.callbacks.addLast(loop.timers.pop().function) - -template processIdlers(loop: untyped) = - if len(loop.idlers) > 0: - loop.callbacks.addLast(loop.idlers.popFirst()) - -template processTicks(loop: untyped) = - while len(loop.ticks) > 0: - loop.callbacks.addLast(loop.ticks.popFirst()) - -template processCallbacks(loop: untyped) = - while true: - let callable = loop.callbacks.popFirst() # len must be > 0 due to sentinel - if isSentinel(callable): - break - if not(isNil(callable.function)): - callable.function(callable.udata) - -proc raiseAsDefect*(exc: ref Exception, msg: string) {.noreturn, noinline.} = - # Reraise an exception as a Defect, where it's unexpected and can't be handled - # We include the stack trace in the message because otherwise, it's easily - # lost - Nim doesn't print it for `parent` exceptions for example (!) - raise (ref Defect)( - msg: msg & "\n" & exc.msg & "\n" & exc.getStackTrace(), parent: exc) - -proc raiseOsDefect*(error: OSErrorCode, msg = "") {.noreturn, noinline.} = - # Reraise OS error code as a Defect, where it's unexpected and can't be - # handled. We include the stack trace in the message because otherwise, - # it's easily lost. - raise (ref Defect)(msg: msg & "\n[" & $int(error) & "] " & osErrorMsg(error) & - "\n" & getStackTrace()) - -func toPointer(error: OSErrorCode): pointer = - when sizeof(int) == 8: - cast[pointer](uint64(uint32(error))) - else: - cast[pointer](uint32(error)) - -func toException*(v: OSErrorCode): ref OSError = newOSError(v) - # This helper will allow to use `tryGet()` and raise OSError for - # Result[T, OSErrorCode] values. - -when defined(windows): - {.pragma: stdcallbackFunc, stdcall, gcsafe, raises: [].} - - export SIGINT, SIGQUIT, SIGTERM - type - CompletionKey = ULONG_PTR - - CompletionData* = object - cb*: CallbackFunc - errCode*: OSErrorCode - bytesCount*: uint32 - udata*: pointer - - CustomOverlapped* = object of OVERLAPPED - data*: CompletionData - - DispatcherFlag* = enum - SignalHandlerInstalled - - PDispatcher* = ref object of PDispatcherBase - ioPort: HANDLE - handles: HashSet[AsyncFD] - connectEx*: WSAPROC_CONNECTEX - acceptEx*: WSAPROC_ACCEPTEX - getAcceptExSockAddrs*: WSAPROC_GETACCEPTEXSOCKADDRS - transmitFile*: WSAPROC_TRANSMITFILE - getQueuedCompletionStatusEx*: LPFN_GETQUEUEDCOMPLETIONSTATUSEX - disconnectEx*: WSAPROC_DISCONNECTEX - flags: set[DispatcherFlag] - - PtrCustomOverlapped* = ptr CustomOverlapped - - RefCustomOverlapped* = ref CustomOverlapped - - PostCallbackData = object - ioPort: HANDLE - handleFd: AsyncFD - waitFd: HANDLE - udata: pointer - ovlref: RefCustomOverlapped - ovl: pointer - - WaitableHandle* = ref PostCallbackData - ProcessHandle* = distinct WaitableHandle - SignalHandle* = distinct WaitableHandle - - WaitableResult* {.pure.} = enum - Ok, Timeout - - AsyncFD* = distinct int - - proc hash(x: AsyncFD): Hash {.borrow.} - proc `==`*(x: AsyncFD, y: AsyncFD): bool {.borrow, gcsafe.} - - proc getFunc(s: SocketHandle, fun: var pointer, guid: GUID): bool = - var bytesRet: DWORD - fun = nil - wsaIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, unsafeAddr(guid), - DWORD(sizeof(GUID)), addr fun, DWORD(sizeof(pointer)), - addr(bytesRet), nil, nil) == 0 - - proc globalInit() = - var wsa = WSAData() - let res = wsaStartup(0x0202'u16, addr wsa) - if res != 0: - raiseOsDefect(osLastError(), - "globalInit(): Unable to initialize Windows Sockets API") - - proc initAPI(loop: PDispatcher) = - var funcPointer: pointer = nil - - let kernel32 = getModuleHandle(newWideCString("kernel32.dll")) - loop.getQueuedCompletionStatusEx = cast[LPFN_GETQUEUEDCOMPLETIONSTATUSEX]( - getProcAddress(kernel32, "GetQueuedCompletionStatusEx")) - - let sock = osdefs.socket(osdefs.AF_INET, 1, 6) - if sock == osdefs.INVALID_SOCKET: - raiseOsDefect(osLastError(), "initAPI(): Unable to create control socket") - - block: - let res = getFunc(sock, funcPointer, WSAID_CONNECTEX) - if not(res): - raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & - "dispatcher's ConnectEx()") - loop.connectEx = cast[WSAPROC_CONNECTEX](funcPointer) - - block: - let res = getFunc(sock, funcPointer, WSAID_ACCEPTEX) - if not(res): - raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & - "dispatcher's AcceptEx()") - loop.acceptEx = cast[WSAPROC_ACCEPTEX](funcPointer) - - block: - let res = getFunc(sock, funcPointer, WSAID_GETACCEPTEXSOCKADDRS) - if not(res): - raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & - "dispatcher's GetAcceptExSockAddrs()") - loop.getAcceptExSockAddrs = - cast[WSAPROC_GETACCEPTEXSOCKADDRS](funcPointer) - - block: - let res = getFunc(sock, funcPointer, WSAID_TRANSMITFILE) - if not(res): - raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & - "dispatcher's TransmitFile()") - loop.transmitFile = cast[WSAPROC_TRANSMITFILE](funcPointer) - - block: - let res = getFunc(sock, funcPointer, WSAID_DISCONNECTEX) - if not(res): - raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & - "dispatcher's DisconnectEx()") - loop.disconnectEx = cast[WSAPROC_DISCONNECTEX](funcPointer) - - if closeFd(sock) != 0: - raiseOsDefect(osLastError(), "initAPI(): Unable to close control socket") - - proc newDispatcher*(): PDispatcher = - ## Creates a new Dispatcher instance. - let port = createIoCompletionPort(osdefs.INVALID_HANDLE_VALUE, - HANDLE(0), 0, 1) - if port == osdefs.INVALID_HANDLE_VALUE: - raiseOsDefect(osLastError(), "newDispatcher(): Unable to create " & - "IOCP port") - var res = PDispatcher( - ioPort: port, - handles: initHashSet[AsyncFD](), - timers: initHeapQueue[TimerCallback](), - callbacks: initDeque[AsyncCallback](64), - idlers: initDeque[AsyncCallback](), - ticks: initDeque[AsyncCallback](), - trackers: initTable[string, TrackerBase](), - counters: initTable[string, TrackerCounter]() - ) - res.callbacks.addLast(SentinelCallback) - initAPI(res) - res - - var gDisp{.threadvar.}: PDispatcher ## Global dispatcher - - proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [].} - proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [].} - - proc getIoHandler*(disp: PDispatcher): HANDLE = - ## Returns the underlying IO Completion Port handle (Windows) or selector - ## (Unix) for the specified dispatcher. - disp.ioPort - - proc register2*(fd: AsyncFD): Result[void, OSErrorCode] = - ## Register file descriptor ``fd`` in thread's dispatcher. - let loop = getThreadDispatcher() - if createIoCompletionPort(HANDLE(fd), loop.ioPort, cast[CompletionKey](fd), - 1) == osdefs.INVALID_HANDLE_VALUE: - return err(osLastError()) - loop.handles.incl(fd) - ok() - - proc register*(fd: AsyncFD) {.raises: [OSError].} = - ## Register file descriptor ``fd`` in thread's dispatcher. - register2(fd).tryGet() - - proc unregister*(fd: AsyncFD) = - ## Unregisters ``fd``. - getThreadDispatcher().handles.excl(fd) - - {.push stackTrace: off.} - proc waitableCallback(param: pointer, timerOrWaitFired: WINBOOL) {. - stdcallbackFunc.} = - # This procedure will be executed in `wait thread`, so it must not use - # GC related objects. - # We going to ignore callbacks which was spawned when `isNil(param) == true` - # because we unable to indicate this error. - if isNil(param): return - var wh = cast[ptr PostCallbackData](param) - # We ignore result of postQueueCompletionStatus() call because we unable to - # indicate error. - discard postQueuedCompletionStatus(wh[].ioPort, DWORD(timerOrWaitFired), - ULONG_PTR(wh[].handleFd), - wh[].ovl) - {.pop.} - - proc registerWaitable( - handle: HANDLE, - flags: ULONG, - timeout: Duration, - cb: CallbackFunc, - udata: pointer - ): Result[WaitableHandle, OSErrorCode] = - ## Register handle of (Change notification, Console input, Event, - ## Memory resource notification, Mutex, Process, Semaphore, Thread, - ## Waitable timer) for waiting, using specific Windows' ``flags`` and - ## ``timeout`` value. - ## - ## Callback ``cb`` will be scheduled with ``udata`` parameter when - ## ``handle`` become signaled. - ## - ## Result of this procedure call ``WaitableHandle`` should be closed using - ## closeWaitable() call. - ## - ## NOTE: This is private procedure, not supposed to be publicly available, - ## please use ``waitForSingleObject()``. - let loop = getThreadDispatcher() - var ovl = RefCustomOverlapped(data: CompletionData(cb: cb)) - - var whandle = (ref PostCallbackData)( - ioPort: loop.getIoHandler(), - handleFd: AsyncFD(handle), - udata: udata, - ovlref: ovl, - ovl: cast[pointer](ovl) - ) - - ovl.data.udata = cast[pointer](whandle) - - let dwordTimeout = - if timeout == InfiniteDuration: - DWORD(INFINITE) - else: - DWORD(timeout.milliseconds) - - if registerWaitForSingleObject(addr(whandle[].waitFd), handle, - cast[WAITORTIMERCALLBACK](waitableCallback), - cast[pointer](whandle), - dwordTimeout, - flags) == WINBOOL(0): - ovl.data.udata = nil - whandle.ovlref = nil - whandle.ovl = nil - return err(osLastError()) - - ok(WaitableHandle(whandle)) - - proc closeWaitable(wh: WaitableHandle): Result[void, OSErrorCode] = - ## Close waitable handle ``wh`` and clear all the resources. It is safe - ## to close this handle, even if wait operation is pending. - ## - ## NOTE: This is private procedure, not supposed to be publicly available, - ## please use ``waitForSingleObject()``. - doAssert(not(isNil(wh))) - - let pdata = (ref PostCallbackData)(wh) - # We are not going to clear `ref` fields in PostCallbackData object because - # it possible that callback is already scheduled. - if unregisterWait(pdata.waitFd) == 0: - let res = osLastError() - if res != ERROR_IO_PENDING: - return err(res) - ok() - - proc addProcess2*(pid: int, cb: CallbackFunc, - udata: pointer = nil): Result[ProcessHandle, OSErrorCode] = - ## Registers callback ``cb`` to be called when process with process - ## identifier ``pid`` exited. Returns process identifier, which can be - ## used to clear process callback via ``removeProcess``. - doAssert(pid > 0, "Process identifier must be positive integer") - let - hProcess = openProcess(SYNCHRONIZE, WINBOOL(0), DWORD(pid)) - flags = WT_EXECUTEINWAITTHREAD or WT_EXECUTEONLYONCE - - var wh: WaitableHandle = nil - - if hProcess == HANDLE(0): - return err(osLastError()) - - proc continuation(udata: pointer) {.gcsafe.} = - doAssert(not(isNil(udata))) - doAssert(not(isNil(wh))) - discard closeFd(hProcess) - cb(wh[].udata) - - wh = - block: - let res = registerWaitable(hProcess, flags, InfiniteDuration, - continuation, udata) - if res.isErr(): - discard closeFd(hProcess) - return err(res.error()) - res.get() - ok(ProcessHandle(wh)) - - proc removeProcess2*(procHandle: ProcessHandle): Result[void, OSErrorCode] = - ## Remove process' watching using process' descriptor ``procHandle``. - let waitableHandle = WaitableHandle(procHandle) - doAssert(not(isNil(waitableHandle))) - ? closeWaitable(waitableHandle) - ok() - - proc addProcess*(pid: int, cb: CallbackFunc, - udata: pointer = nil): ProcessHandle {. - raises: [OSError].} = - ## Registers callback ``cb`` to be called when process with process - ## identifier ``pid`` exited. Returns process identifier, which can be - ## used to clear process callback via ``removeProcess``. - addProcess2(pid, cb, udata).tryGet() - - proc removeProcess*(procHandle: ProcessHandle) {. - raises: [ OSError].} = - ## Remove process' watching using process' descriptor ``procHandle``. - removeProcess2(procHandle).tryGet() - - {.push stackTrace: off.} - proc consoleCtrlEventHandler(dwCtrlType: DWORD): uint32 {.stdcallbackFunc.} = - ## This procedure will be executed in different thread, so it MUST not use - ## any GC related features (strings, seqs, echo etc.). - case dwCtrlType - of CTRL_C_EVENT: - return - (if raiseSignal(SIGINT).valueOr(false): TRUE else: FALSE) - of CTRL_BREAK_EVENT: - return - (if raiseSignal(SIGINT).valueOr(false): TRUE else: FALSE) - of CTRL_CLOSE_EVENT: - return - (if raiseSignal(SIGTERM).valueOr(false): TRUE else: FALSE) - of CTRL_LOGOFF_EVENT: - return - (if raiseSignal(SIGQUIT).valueOr(false): TRUE else: FALSE) - else: - FALSE - {.pop.} - - proc addSignal2*(signal: int, cb: CallbackFunc, - udata: pointer = nil): Result[SignalHandle, OSErrorCode] = - ## Start watching signal ``signal``, and when signal appears, call the - ## callback ``cb`` with specified argument ``udata``. Returns signal - ## identifier code, which can be used to remove signal callback - ## via ``removeSignal``. - ## - ## NOTE: On Windows only subset of signals are supported: SIGINT, SIGTERM, - ## SIGQUIT - const supportedSignals = [SIGINT, SIGTERM, SIGQUIT] - doAssert(cint(signal) in supportedSignals, "Signal is not supported") - let loop = getThreadDispatcher() - var hWait: WaitableHandle = nil - - proc continuation(ucdata: pointer) {.gcsafe.} = - doAssert(not(isNil(ucdata))) - doAssert(not(isNil(hWait))) - cb(hWait[].udata) - - if SignalHandlerInstalled notin loop.flags: - if getConsoleCP() != 0'u32: - # Console application, we going to cleanup Nim default signal handlers. - if setConsoleCtrlHandler(consoleCtrlEventHandler, TRUE) == FALSE: - return err(osLastError()) - loop.flags.incl(SignalHandlerInstalled) - else: - return err(ERROR_NOT_SUPPORTED) - - let - flags = WT_EXECUTEINWAITTHREAD - hEvent = ? openEvent($getSignalName(signal)) - - hWait = registerWaitable(hEvent, flags, InfiniteDuration, - continuation, udata).valueOr: - discard closeFd(hEvent) - return err(error) - ok(SignalHandle(hWait)) - - proc removeSignal2*(signalHandle: SignalHandle): Result[void, OSErrorCode] = - ## Remove watching signal ``signal``. - ? closeWaitable(WaitableHandle(signalHandle)) - ok() - - proc addSignal*(signal: int, cb: CallbackFunc, - udata: pointer = nil): SignalHandle {. - raises: [ValueError].} = - ## Registers callback ``cb`` to be called when signal ``signal`` will be - ## raised. Returns signal identifier, which can be used to clear signal - ## callback via ``removeSignal``. - addSignal2(signal, cb, udata).valueOr: - raise newException(ValueError, osErrorMsg(error)) - - proc removeSignal*(signalHandle: SignalHandle) {. - raises: [ValueError].} = - ## Remove signal's watching using signal descriptor ``signalfd``. - let res = removeSignal2(signalHandle) - if res.isErr(): - raise newException(ValueError, osErrorMsg(res.error())) - - proc poll*() = - ## Perform single asynchronous step, processing timers and completing - ## tasks. Blocks until at least one event has completed. - ## - ## Exceptions raised here indicate that waiting for tasks to be unblocked - ## failed - exceptions from within tasks are instead propagated through - ## their respective futures and not allowed to interrrupt the poll call. - let loop = getThreadDispatcher() - var - curTime = Moment.now() - curTimeout = DWORD(0) - events: array[MaxEventsCount, osdefs.OVERLAPPED_ENTRY] - - # On reentrant `poll` calls from `processCallbacks`, e.g., `waitFor`, - # complete pending work of the outer `processCallbacks` call. - # On non-reentrant `poll` calls, this only removes sentinel element. - processCallbacks(loop) - - # Moving expired timers to `loop.callbacks` and calculate timeout - loop.processTimersGetTimeout(curTimeout) - - let networkEventsCount = - if isNil(loop.getQueuedCompletionStatusEx): - let res = getQueuedCompletionStatus( - loop.ioPort, - addr events[0].dwNumberOfBytesTransferred, - addr events[0].lpCompletionKey, - cast[ptr POVERLAPPED](addr events[0].lpOverlapped), - curTimeout - ) - if res == FALSE: - let errCode = osLastError() - if not(isNil(events[0].lpOverlapped)): - 1 - else: - if uint32(errCode) != WAIT_TIMEOUT: - raiseOsDefect(errCode, "poll(): Unable to get OS events") - 0 - else: - 1 - else: - var eventsReceived = ULONG(0) - let res = loop.getQueuedCompletionStatusEx( - loop.ioPort, - addr events[0], - ULONG(len(events)), - eventsReceived, - curTimeout, - WINBOOL(0) - ) - if res == FALSE: - let errCode = osLastError() - if uint32(errCode) != WAIT_TIMEOUT: - raiseOsDefect(errCode, "poll(): Unable to get OS events") - 0 - else: - int(eventsReceived) - - for i in 0 ..< networkEventsCount: - var customOverlapped = PtrCustomOverlapped(events[i].lpOverlapped) - customOverlapped.data.errCode = - block: - let res = cast[uint64](customOverlapped.internal) - if res == 0'u64: - OSErrorCode(-1) - else: - OSErrorCode(rtlNtStatusToDosError(res)) - customOverlapped.data.bytesCount = events[i].dwNumberOfBytesTransferred - let acb = AsyncCallback(function: customOverlapped.data.cb, - udata: cast[pointer](customOverlapped)) - loop.callbacks.addLast(acb) - - # Moving expired timers to `loop.callbacks`. - loop.processTimers() - - # We move idle callbacks to `loop.callbacks` only if there no pending - # network events. - if networkEventsCount == 0: - loop.processIdlers() - - # We move tick callbacks to `loop.callbacks` always. - processTicks(loop) - - # All callbacks which will be added during `processCallbacks` will be - # scheduled after the sentinel and are processed on next `poll()` call. - loop.callbacks.addLast(SentinelCallback) - processCallbacks(loop) - - # All callbacks done, skip `processCallbacks` at start. - loop.callbacks.addFirst(SentinelCallback) - - proc closeSocket*(fd: AsyncFD, aftercb: CallbackFunc = nil) = - ## Closes a socket and ensures that it is unregistered. - let loop = getThreadDispatcher() - loop.handles.excl(fd) - let - param = toPointer( - if closeFd(SocketHandle(fd)) == 0: - OSErrorCode(0) - else: - osLastError() - ) - if not(isNil(aftercb)): - loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) - - proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) = - ## Closes a (pipe/file) handle and ensures that it is unregistered. - let loop = getThreadDispatcher() - loop.handles.excl(fd) - let - param = toPointer( - if closeFd(HANDLE(fd)) == 0: - OSErrorCode(0) - else: - osLastError() - ) - - if not(isNil(aftercb)): - loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) - - proc contains*(disp: PDispatcher, fd: AsyncFD): bool = - ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. - fd in disp.handles - -elif defined(macosx) or defined(freebsd) or defined(netbsd) or - defined(openbsd) or defined(dragonfly) or defined(macos) or - defined(linux) or defined(android) or defined(solaris): - const - SIG_IGN = cast[proc(x: cint) {.raises: [], noconv, gcsafe.}](1) - - type - AsyncFD* = distinct cint - - SelectorData* = object - reader*: AsyncCallback - writer*: AsyncCallback - - PDispatcher* = ref object of PDispatcherBase - selector: Selector[SelectorData] - keys: seq[ReadyKey] - - proc `==`*(x, y: AsyncFD): bool {.borrow, gcsafe.} - - proc globalInit() = - # We are ignoring SIGPIPE signal, because we are working with EPIPE. - signal(cint(SIGPIPE), SIG_IGN) - - proc initAPI(disp: PDispatcher) = - discard - - proc newDispatcher*(): PDispatcher = - ## Create new dispatcher. - let selector = - block: - let res = Selector.new(SelectorData) - if res.isErr(): raiseOsDefect(res.error(), - "Could not initialize selector") - res.get() - - var res = PDispatcher( - selector: selector, - timers: initHeapQueue[TimerCallback](), - callbacks: initDeque[AsyncCallback](chronosEventsCount), - idlers: initDeque[AsyncCallback](), - keys: newSeq[ReadyKey](chronosEventsCount), - trackers: initTable[string, TrackerBase](), - counters: initTable[string, TrackerCounter]() - ) - res.callbacks.addLast(SentinelCallback) - initAPI(res) - res - - var gDisp{.threadvar.}: PDispatcher ## Global dispatcher - - proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [].} - proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [].} - - proc getIoHandler*(disp: PDispatcher): Selector[SelectorData] = - ## Returns system specific OS queue. - disp.selector - - proc contains*(disp: PDispatcher, fd: AsyncFD): bool {.inline.} = - ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. - cint(fd) in disp.selector - - proc register2*(fd: AsyncFD): Result[void, OSErrorCode] = - ## Register file descriptor ``fd`` in thread's dispatcher. - var data: SelectorData - getThreadDispatcher().selector.registerHandle2(cint(fd), {}, data) - - proc unregister2*(fd: AsyncFD): Result[void, OSErrorCode] = - ## Unregister file descriptor ``fd`` from thread's dispatcher. - getThreadDispatcher().selector.unregister2(cint(fd)) - - proc addReader2*(fd: AsyncFD, cb: CallbackFunc, - udata: pointer = nil): Result[void, OSErrorCode] = - ## Start watching the file descriptor ``fd`` for read availability and then - ## call the callback ``cb`` with specified argument ``udata``. - let loop = getThreadDispatcher() - var newEvents = {Event.Read} - withData(loop.selector, cint(fd), adata) do: - let acb = AsyncCallback(function: cb, udata: udata) - adata.reader = acb - if not(isNil(adata.writer.function)): - newEvents.incl(Event.Write) - do: - return err(osdefs.EBADF) - loop.selector.updateHandle2(cint(fd), newEvents) - - proc removeReader2*(fd: AsyncFD): Result[void, OSErrorCode] = - ## Stop watching the file descriptor ``fd`` for read availability. - let loop = getThreadDispatcher() - var newEvents: set[Event] - withData(loop.selector, cint(fd), adata) do: - # We need to clear `reader` data, because `selectors` don't do it - adata.reader = default(AsyncCallback) - if not(isNil(adata.writer.function)): - newEvents.incl(Event.Write) - do: - return err(osdefs.EBADF) - loop.selector.updateHandle2(cint(fd), newEvents) - - proc addWriter2*(fd: AsyncFD, cb: CallbackFunc, - udata: pointer = nil): Result[void, OSErrorCode] = - ## Start watching the file descriptor ``fd`` for write availability and then - ## call the callback ``cb`` with specified argument ``udata``. - let loop = getThreadDispatcher() - var newEvents = {Event.Write} - withData(loop.selector, cint(fd), adata) do: - let acb = AsyncCallback(function: cb, udata: udata) - adata.writer = acb - if not(isNil(adata.reader.function)): - newEvents.incl(Event.Read) - do: - return err(osdefs.EBADF) - loop.selector.updateHandle2(cint(fd), newEvents) - - proc removeWriter2*(fd: AsyncFD): Result[void, OSErrorCode] = - ## Stop watching the file descriptor ``fd`` for write availability. - let loop = getThreadDispatcher() - var newEvents: set[Event] - withData(loop.selector, cint(fd), adata) do: - # We need to clear `writer` data, because `selectors` don't do it - adata.writer = default(AsyncCallback) - if not(isNil(adata.reader.function)): - newEvents.incl(Event.Read) - do: - return err(osdefs.EBADF) - loop.selector.updateHandle2(cint(fd), newEvents) - - proc register*(fd: AsyncFD) {.raises: [OSError].} = - ## Register file descriptor ``fd`` in thread's dispatcher. - register2(fd).tryGet() - - proc unregister*(fd: AsyncFD) {.raises: [OSError].} = - ## Unregister file descriptor ``fd`` from thread's dispatcher. - unregister2(fd).tryGet() - - proc addReader*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {. - raises: [OSError].} = - ## Start watching the file descriptor ``fd`` for read availability and then - ## call the callback ``cb`` with specified argument ``udata``. - addReader2(fd, cb, udata).tryGet() - - proc removeReader*(fd: AsyncFD) {.raises: [OSError].} = - ## Stop watching the file descriptor ``fd`` for read availability. - removeReader2(fd).tryGet() - - proc addWriter*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {. - raises: [OSError].} = - ## Start watching the file descriptor ``fd`` for write availability and then - ## call the callback ``cb`` with specified argument ``udata``. - addWriter2(fd, cb, udata).tryGet() - - proc removeWriter*(fd: AsyncFD) {.raises: [OSError].} = - ## Stop watching the file descriptor ``fd`` for write availability. - removeWriter2(fd).tryGet() - - proc unregisterAndCloseFd*(fd: AsyncFD): Result[void, OSErrorCode] = - ## Unregister from system queue and close asynchronous socket. - ## - ## NOTE: Use this function to close temporary sockets/pipes only (which - ## are not exposed to the public and not supposed to be used/reused). - ## Please use closeSocket(AsyncFD) and closeHandle(AsyncFD) instead. - doAssert(fd != AsyncFD(osdefs.INVALID_SOCKET)) - ? unregister2(fd) - if closeFd(cint(fd)) != 0: - err(osLastError()) - else: - ok() - - proc closeSocket*(fd: AsyncFD, aftercb: CallbackFunc = nil) = - ## Close asynchronous socket. - ## - ## Please note, that socket is not closed immediately. To avoid bugs with - ## closing socket, while operation pending, socket will be closed as - ## soon as all pending operations will be notified. - let loop = getThreadDispatcher() - - proc continuation(udata: pointer) = - let - param = toPointer( - if SocketHandle(fd) in loop.selector: - let ures = unregister2(fd) - if ures.isErr(): - discard closeFd(cint(fd)) - ures.error() - else: - if closeFd(cint(fd)) != 0: - osLastError() - else: - OSErrorCode(0) - else: - osdefs.EBADF - ) - if not(isNil(aftercb)): aftercb(param) - - withData(loop.selector, cint(fd), adata) do: - # We are scheduling reader and writer callbacks to be called - # explicitly, so they can get an error and continue work. - # Callbacks marked as deleted so we don't need to get REAL notifications - # from system queue for this reader and writer. - - if not(isNil(adata.reader.function)): - loop.callbacks.addLast(adata.reader) - adata.reader = default(AsyncCallback) - - if not(isNil(adata.writer.function)): - loop.callbacks.addLast(adata.writer) - adata.writer = default(AsyncCallback) - - # We can't unregister file descriptor from system queue here, because - # in such case processing queue will stuck on poll() call, because there - # can be no file descriptors registered in system queue. - var acb = AsyncCallback(function: continuation) - loop.callbacks.addLast(acb) - - proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) = - ## Close asynchronous file/pipe handle. - ## - ## Please note, that socket is not closed immediately. To avoid bugs with - ## closing socket, while operation pending, socket will be closed as - ## soon as all pending operations will be notified. - ## You can execute ``aftercb`` before actual socket close operation. - closeSocket(fd, aftercb) - - when chronosEventEngine in ["epoll", "kqueue"]: - type - ProcessHandle* = distinct int - SignalHandle* = distinct int - - proc addSignal2*( - signal: int, - cb: CallbackFunc, - udata: pointer = nil - ): Result[SignalHandle, OSErrorCode] = - ## Start watching signal ``signal``, and when signal appears, call the - ## callback ``cb`` with specified argument ``udata``. Returns signal - ## identifier code, which can be used to remove signal callback - ## via ``removeSignal``. - let loop = getThreadDispatcher() - var data: SelectorData - let sigfd = ? loop.selector.registerSignal(signal, data) - withData(loop.selector, sigfd, adata) do: - adata.reader = AsyncCallback(function: cb, udata: udata) - do: - return err(osdefs.EBADF) - ok(SignalHandle(sigfd)) - - proc addProcess2*( - pid: int, - cb: CallbackFunc, - udata: pointer = nil - ): Result[ProcessHandle, OSErrorCode] = - ## Registers callback ``cb`` to be called when process with process - ## identifier ``pid`` exited. Returns process' descriptor, which can be - ## used to clear process callback via ``removeProcess``. - let loop = getThreadDispatcher() - var data: SelectorData - let procfd = ? loop.selector.registerProcess(pid, data) - withData(loop.selector, procfd, adata) do: - adata.reader = AsyncCallback(function: cb, udata: udata) - do: - return err(osdefs.EBADF) - ok(ProcessHandle(procfd)) - - proc removeSignal2*(signalHandle: SignalHandle): Result[void, OSErrorCode] = - ## Remove watching signal ``signal``. - getThreadDispatcher().selector.unregister2(cint(signalHandle)) - - proc removeProcess2*(procHandle: ProcessHandle): Result[void, OSErrorCode] = - ## Remove process' watching using process' descriptor ``procfd``. - getThreadDispatcher().selector.unregister2(cint(procHandle)) - - proc addSignal*(signal: int, cb: CallbackFunc, - udata: pointer = nil): SignalHandle {. - raises: [OSError].} = - ## Start watching signal ``signal``, and when signal appears, call the - ## callback ``cb`` with specified argument ``udata``. Returns signal - ## identifier code, which can be used to remove signal callback - ## via ``removeSignal``. - addSignal2(signal, cb, udata).tryGet() - - proc removeSignal*(signalHandle: SignalHandle) {. - raises: [OSError].} = - ## Remove watching signal ``signal``. - removeSignal2(signalHandle).tryGet() - - proc addProcess*(pid: int, cb: CallbackFunc, - udata: pointer = nil): ProcessHandle {. - raises: [OSError].} = - ## Registers callback ``cb`` to be called when process with process - ## identifier ``pid`` exited. Returns process identifier, which can be - ## used to clear process callback via ``removeProcess``. - addProcess2(pid, cb, udata).tryGet() - - proc removeProcess*(procHandle: ProcessHandle) {. - raises: [OSError].} = - ## Remove process' watching using process' descriptor ``procHandle``. - removeProcess2(procHandle).tryGet() - - proc poll*() {.gcsafe.} = - ## Perform single asynchronous step. - let loop = getThreadDispatcher() - var curTime = Moment.now() - var curTimeout = 0 - - # On reentrant `poll` calls from `processCallbacks`, e.g., `waitFor`, - # complete pending work of the outer `processCallbacks` call. - # On non-reentrant `poll` calls, this only removes sentinel element. - processCallbacks(loop) - - # Moving expired timers to `loop.callbacks` and calculate timeout. - loop.processTimersGetTimeout(curTimeout) - - # Processing IO descriptors and all hardware events. - let count = - block: - let res = loop.selector.selectInto2(curTimeout, loop.keys) - if res.isErr(): - raiseOsDefect(res.error(), "poll(): Unable to get OS events") - res.get() - - for i in 0 ..< count: - let fd = loop.keys[i].fd - let events = loop.keys[i].events - - withData(loop.selector, cint(fd), adata) do: - if (Event.Read in events) or (events == {Event.Error}): - if not isNil(adata.reader.function): - loop.callbacks.addLast(adata.reader) - - if (Event.Write in events) or (events == {Event.Error}): - if not isNil(adata.writer.function): - loop.callbacks.addLast(adata.writer) - - if Event.User in events: - if not isNil(adata.reader.function): - loop.callbacks.addLast(adata.reader) - - when chronosEventEngine in ["epoll", "kqueue"]: - let customSet = {Event.Timer, Event.Signal, Event.Process, - Event.Vnode} - if customSet * events != {}: - if not isNil(adata.reader.function): - loop.callbacks.addLast(adata.reader) - - # Moving expired timers to `loop.callbacks`. - loop.processTimers() - - # We move idle callbacks to `loop.callbacks` only if there no pending - # network events. - if count == 0: - loop.processIdlers() - - # We move tick callbacks to `loop.callbacks` always. - processTicks(loop) - - # All callbacks which will be added during `processCallbacks` will be - # scheduled after the sentinel and are processed on next `poll()` call. - loop.callbacks.addLast(SentinelCallback) - processCallbacks(loop) - - # All callbacks done, skip `processCallbacks` at start. - loop.callbacks.addFirst(SentinelCallback) - -else: - proc initAPI() = discard - proc globalInit() = discard - -proc setThreadDispatcher*(disp: PDispatcher) = - ## Set current thread's dispatcher instance to ``disp``. - if not(gDisp.isNil()): - doAssert gDisp.callbacks.len == 0 - gDisp = disp - -proc getThreadDispatcher*(): PDispatcher = - ## Returns current thread's dispatcher instance. - if gDisp.isNil(): - setThreadDispatcher(newDispatcher()) - gDisp - -proc setGlobalDispatcher*(disp: PDispatcher) {. - gcsafe, deprecated: "Use setThreadDispatcher() instead".} = - setThreadDispatcher(disp) - -proc getGlobalDispatcher*(): PDispatcher {. - gcsafe, deprecated: "Use getThreadDispatcher() instead".} = - getThreadDispatcher() - -proc setTimer*(at: Moment, cb: CallbackFunc, - udata: pointer = nil): TimerCallback = - ## Arrange for the callback ``cb`` to be called at the given absolute - ## timestamp ``at``. You can also pass ``udata`` to callback. - let loop = getThreadDispatcher() - result = TimerCallback(finishAt: at, - function: AsyncCallback(function: cb, udata: udata)) - loop.timers.push(result) - -proc clearTimer*(timer: TimerCallback) {.inline.} = - timer.function = default(AsyncCallback) - -proc addTimer*(at: Moment, cb: CallbackFunc, udata: pointer = nil) {. - inline, deprecated: "Use setTimer/clearTimer instead".} = - ## Arrange for the callback ``cb`` to be called at the given absolute - ## timestamp ``at``. You can also pass ``udata`` to callback. - discard setTimer(at, cb, udata) - -proc addTimer*(at: int64, cb: CallbackFunc, udata: pointer = nil) {. - inline, deprecated: "Use addTimer(Duration, cb, udata)".} = - discard setTimer(Moment.init(at, Millisecond), cb, udata) - -proc addTimer*(at: uint64, cb: CallbackFunc, udata: pointer = nil) {. - inline, deprecated: "Use addTimer(Duration, cb, udata)".} = - discard setTimer(Moment.init(int64(at), Millisecond), cb, udata) - -proc removeTimer*(at: Moment, cb: CallbackFunc, udata: pointer = nil) = - ## Remove timer callback ``cb`` with absolute timestamp ``at`` from waiting - ## queue. - let loop = getThreadDispatcher() - var list = cast[seq[TimerCallback]](loop.timers) - var index = -1 - for i in 0.. 0, "Number should be positive integer") - var - retFuture = newFuture[void]("chronos.stepsAsync(int)") - counter = 0 - continuation: proc(data: pointer) {.gcsafe, raises: [].} - - continuation = proc(data: pointer) {.gcsafe, raises: [].} = - if not(retFuture.finished()): - inc(counter) - if counter < number: - internalCallTick(continuation) - else: - retFuture.complete() - - if number <= 0: - retFuture.complete() - else: - internalCallTick(continuation) - - retFuture - -proc idleAsync*(): Future[void] = - ## Suspends the execution of the current asynchronous task until "idle" time. - ## - ## "idle" time its moment of time, when no network events were processed by - ## ``poll()`` call. - var retFuture = newFuture[void]("chronos.idleAsync()") - - proc continuation(data: pointer) {.gcsafe.} = - if not(retFuture.finished()): - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe.} = - discard - - retFuture.cancelCallback = cancellation - callIdle(continuation, nil) - retFuture - -proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] = - ## Returns a future which will complete once ``fut`` completes or after - ## ``timeout`` milliseconds has elapsed. - ## - ## If ``fut`` completes first the returned future will hold true, - ## otherwise, if ``timeout`` milliseconds has elapsed first, the returned - ## future will hold false. - var - retFuture = newFuture[bool]("chronos.withTimeout", - {FutureFlag.OwnCancelSchedule}) - moment: Moment - timer: TimerCallback - timeouted = false - - template completeFuture(fut: untyped): untyped = - if fut.failed() or fut.completed(): - retFuture.complete(true) - else: - retFuture.cancelAndSchedule() - - # TODO: raises annotation shouldn't be needed, but likely similar issue as - # https://github.com/nim-lang/Nim/issues/17369 - proc continuation(udata: pointer) {.gcsafe, raises: [].} = - if not(retFuture.finished()): - if timeouted: - retFuture.complete(false) - return - if not(fut.finished()): - # Timer exceeded first, we going to cancel `fut` and wait until it - # not completes. - timeouted = true - fut.cancelSoon() - else: - # Future `fut` completed/failed/cancelled first. - if not(isNil(timer)): - clearTimer(timer) - fut.completeFuture() - - # TODO: raises annotation shouldn't be needed, but likely similar issue as - # https://github.com/nim-lang/Nim/issues/17369 - proc cancellation(udata: pointer) {.gcsafe, raises: [].} = - if not(fut.finished()): - if not isNil(timer): - clearTimer(timer) - fut.cancelSoon() - else: - fut.completeFuture() - - if fut.finished(): - retFuture.complete(true) - else: - if timeout.isZero(): - retFuture.complete(false) - elif timeout.isInfinite(): - retFuture.cancelCallback = cancellation - fut.addCallback(continuation) - else: - moment = Moment.fromNow(timeout) - retFuture.cancelCallback = cancellation - timer = setTimer(moment, continuation, nil) - fut.addCallback(continuation) - - retFuture - -proc withTimeout*[T](fut: Future[T], timeout: int): Future[bool] {. - inline, deprecated: "Use withTimeout(Future[T], Duration)".} = - withTimeout(fut, timeout.milliseconds()) - -proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = - ## Returns a future which will complete once future ``fut`` completes - ## or if timeout of ``timeout`` milliseconds has been expired. - ## - ## If ``timeout`` is ``-1``, then statement ``await wait(fut)`` is - ## equal to ``await fut``. - ## - ## TODO: In case when ``fut`` got cancelled, what result Future[T] - ## should return, because it can't be cancelled too. - var - retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) - moment: Moment - timer: TimerCallback - timeouted = false - - template completeFuture(fut: untyped): untyped = - if fut.failed(): - retFuture.fail(fut.error) - elif fut.cancelled(): - retFuture.cancelAndSchedule() - else: - when T is void: - retFuture.complete() - else: - retFuture.complete(fut.value) - - proc continuation(udata: pointer) {.raises: [].} = - if not(retFuture.finished()): - if timeouted: - retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) - return - if not(fut.finished()): - # Timer exceeded first. - timeouted = true - fut.cancelSoon() - else: - # Future `fut` completed/failed/cancelled first. - if not(isNil(timer)): - clearTimer(timer) - fut.completeFuture() - - var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} - cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = - if not(fut.finished()): - if not(isNil(timer)): - clearTimer(timer) - fut.cancelSoon() - else: - fut.completeFuture() - - if fut.finished(): - fut.completeFuture() - else: - if timeout.isZero(): - retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) - elif timeout.isInfinite(): - retFuture.cancelCallback = cancellation - fut.addCallback(continuation) - else: - moment = Moment.fromNow(timeout) - retFuture.cancelCallback = cancellation - timer = setTimer(moment, continuation, nil) - fut.addCallback(continuation) - - retFuture - -proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. - inline, deprecated: "Use wait(Future[T], Duration)".} = - if timeout == -1: - wait(fut, InfiniteDuration) - elif timeout == 0: - wait(fut, ZeroDuration) - else: - wait(fut, timeout.milliseconds()) - -include asyncmacro2 - -proc runForever*() = - ## Begins a never ending global dispatcher poll loop. - ## Raises different exceptions depending on the platform. - while true: - poll() - -proc waitFor*[T](fut: Future[T]): T {.raises: [CatchableError].} = - ## **Blocks** the current thread until the specified future completes. - ## There's no way to tell if poll or read raised the exception - while not(fut.finished()): - poll() - - fut.read() - -proc addTracker*[T](id: string, tracker: T) {. - deprecated: "Please use trackCounter facility instead".} = - ## Add new ``tracker`` object to current thread dispatcher with identifier - ## ``id``. - getThreadDispatcher().trackers[id] = tracker - -proc getTracker*(id: string): TrackerBase {. - deprecated: "Please use getTrackerCounter() instead".} = - ## Get ``tracker`` from current thread dispatcher using identifier ``id``. - getThreadDispatcher().trackers.getOrDefault(id, nil) - -proc trackCounter*(name: string) {.noinit.} = - ## Increase tracker counter with name ``name`` by 1. - let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) - inc(getThreadDispatcher().counters.mgetOrPut(name, tracker).opened) - -proc untrackCounter*(name: string) {.noinit.} = - ## Decrease tracker counter with name ``name`` by 1. - let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) - inc(getThreadDispatcher().counters.mgetOrPut(name, tracker).closed) - -proc getTrackerCounter*(name: string): TrackerCounter {.noinit.} = - ## Return value of counter with name ``name``. - let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) - getThreadDispatcher().counters.getOrDefault(name, tracker) - -proc isCounterLeaked*(name: string): bool {.noinit.} = - ## Returns ``true`` if leak is detected, number of `opened` not equal to - ## number of `closed` requests. - let tracker = TrackerCounter(opened: 0'u64, closed: 0'u64) - let res = getThreadDispatcher().counters.getOrDefault(name, tracker) - res.opened != res.closed - -iterator trackerCounters*( - loop: PDispatcher - ): tuple[name: string, value: TrackerCounter] = - ## Iterates over `loop` thread dispatcher tracker counter table, returns all - ## the tracker counter's names and values. - doAssert(not(isNil(loop))) - for key, value in loop.counters.pairs(): - yield (key, value) - -iterator trackerCounterKeys*(loop: PDispatcher): string = - doAssert(not(isNil(loop))) - ## Iterates over `loop` thread dispatcher tracker counter table, returns all - ## tracker names. - for key in loop.counters.keys(): - yield key - -when chronosFutureTracking: - iterator pendingFutures*(): FutureBase = - ## Iterates over the list of pending Futures (Future[T] objects which not - ## yet completed, cancelled or failed). - var slider = futureList.head - while not(isNil(slider)): - yield slider - slider = slider.next - - proc pendingFuturesCount*(): uint = - ## Returns number of pending Futures (Future[T] objects which not yet - ## completed, cancelled or failed). - futureList.count - -when defined(windows): - proc waitForSingleObject*(handle: HANDLE, - timeout: Duration): Future[WaitableResult] {. - raises: [].} = - ## Waits until the specified object is in the signaled state or the - ## time-out interval elapses. WaitForSingleObject() for asynchronous world. - let flags = WT_EXECUTEONLYONCE - - var - retFuture = newFuture[WaitableResult]("chronos.waitForSingleObject()") - waitHandle: WaitableHandle = nil - - proc continuation(udata: pointer) {.gcsafe.} = - doAssert(not(isNil(waitHandle))) - if not(retFuture.finished()): - let - ovl = cast[PtrCustomOverlapped](udata) - returnFlag = WINBOOL(ovl.data.bytesCount) - res = closeWaitable(waitHandle) - if res.isErr(): - retFuture.fail(newException(AsyncError, osErrorMsg(res.error()))) - else: - if returnFlag == TRUE: - retFuture.complete(WaitableResult.Timeout) - else: - retFuture.complete(WaitableResult.Ok) - - proc cancellation(udata: pointer) {.gcsafe.} = - doAssert(not(isNil(waitHandle))) - if not(retFuture.finished()): - discard closeWaitable(waitHandle) - - let wres = uint32(waitForSingleObject(handle, DWORD(0))) - if wres == WAIT_OBJECT_0: - retFuture.complete(WaitableResult.Ok) - return retFuture - elif wres == WAIT_ABANDONED: - retFuture.fail(newException(AsyncError, "Handle was abandoned")) - return retFuture - elif wres == WAIT_FAILED: - retFuture.fail(newException(AsyncError, osErrorMsg(osLastError()))) - return retFuture - - if timeout == ZeroDuration: - retFuture.complete(WaitableResult.Timeout) - return retFuture - - waitHandle = - block: - let res = registerWaitable(handle, flags, timeout, continuation, nil) - if res.isErr(): - retFuture.fail(newException(AsyncError, osErrorMsg(res.error()))) - return retFuture - res.get() - - retFuture.cancelCallback = cancellation - return retFuture +import ./internal/[asyncengine, asyncfutures, asyncmacro, errors] -# Perform global per-module initialization. -globalInit() +export asyncfutures, asyncengine, errors +export asyncmacro.async, asyncmacro.await, asyncmacro.awaitne, asyncraises diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim new file mode 100644 index 000000000..5a46f0489 --- /dev/null +++ b/chronos/internal/asyncengine.nim @@ -0,0 +1,1232 @@ +# +# Chronos +# +# (c) Copyright 2015 Dominik Picheta +# (c) Copyright 2018-Present Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.push raises: [].} + +from nativesockets import Port +import std/[tables, heapqueue, deques] +import stew/results +import ".."/[config, futures, osdefs, oserrno, osutils, timer] + +import ./[asyncmacro, errors] + +export Port +export deques, errors, futures, timer, results + +export + asyncmacro.async, asyncmacro.await, asyncmacro.awaitne, asyncmacro.asyncraises + +const + MaxEventsCount* = 64 + +when defined(windows): + import std/[sets, hashes] +elif defined(macosx) or defined(freebsd) or defined(netbsd) or + defined(openbsd) or defined(dragonfly) or defined(macos) or + defined(linux) or defined(android) or defined(solaris): + import ../selectors2 + export SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, + SIGBUS, SIGFPE, SIGKILL, SIGUSR1, SIGSEGV, SIGUSR2, + SIGPIPE, SIGALRM, SIGTERM, SIGPIPE + export oserrno + +type + AsyncCallback* = InternalAsyncCallback + + TimerCallback* = ref object + finishAt*: Moment + function*: AsyncCallback + + TrackerBase* = ref object of RootRef + id*: string + dump*: proc(): string {.gcsafe, raises: [].} + isLeaked*: proc(): bool {.gcsafe, raises: [].} + + TrackerCounter* = object + opened*: uint64 + closed*: uint64 + + PDispatcherBase = ref object of RootRef + timers*: HeapQueue[TimerCallback] + callbacks*: Deque[AsyncCallback] + idlers*: Deque[AsyncCallback] + ticks*: Deque[AsyncCallback] + trackers*: Table[string, TrackerBase] + counters*: Table[string, TrackerCounter] + +proc sentinelCallbackImpl(arg: pointer) {.gcsafe, noreturn.} = + raiseAssert "Sentinel callback MUST not be scheduled" + +const + SentinelCallback = AsyncCallback(function: sentinelCallbackImpl, + udata: nil) + +proc isSentinel(acb: AsyncCallback): bool = + acb == SentinelCallback + +proc `<`(a, b: TimerCallback): bool = + result = a.finishAt < b.finishAt + +func getAsyncTimestamp*(a: Duration): auto {.inline.} = + ## Return rounded up value of duration with milliseconds resolution. + ## + ## This function also take care on int32 overflow, because Linux and Windows + ## accepts signed 32bit integer as timeout. + let milsec = Millisecond.nanoseconds() + let nansec = a.nanoseconds() + var res = nansec div milsec + let mid = nansec mod milsec + when defined(windows): + res = min(int64(high(int32) - 1), res) + result = cast[DWORD](res) + result += DWORD(min(1'i32, cast[int32](mid))) + else: + res = min(int64(high(int32) - 1), res) + result = cast[int32](res) + result += min(1, cast[int32](mid)) + +template processTimersGetTimeout(loop, timeout: untyped) = + var lastFinish = curTime + while loop.timers.len > 0: + if loop.timers[0].function.function.isNil: + discard loop.timers.pop() + continue + + lastFinish = loop.timers[0].finishAt + if curTime < lastFinish: + break + + loop.callbacks.addLast(loop.timers.pop().function) + + if loop.timers.len > 0: + timeout = (lastFinish - curTime).getAsyncTimestamp() + + if timeout == 0: + if (len(loop.callbacks) == 0) and (len(loop.idlers) == 0): + when defined(windows): + timeout = INFINITE + else: + timeout = -1 + else: + if (len(loop.callbacks) != 0) or (len(loop.idlers) != 0): + timeout = 0 + +template processTimers(loop: untyped) = + var curTime = Moment.now() + while loop.timers.len > 0: + if loop.timers[0].function.function.isNil: + discard loop.timers.pop() + continue + + if curTime < loop.timers[0].finishAt: + break + loop.callbacks.addLast(loop.timers.pop().function) + +template processIdlers(loop: untyped) = + if len(loop.idlers) > 0: + loop.callbacks.addLast(loop.idlers.popFirst()) + +template processTicks(loop: untyped) = + while len(loop.ticks) > 0: + loop.callbacks.addLast(loop.ticks.popFirst()) + +template processCallbacks(loop: untyped) = + while true: + let callable = loop.callbacks.popFirst() # len must be > 0 due to sentinel + if isSentinel(callable): + break + if not(isNil(callable.function)): + callable.function(callable.udata) + +proc raiseAsDefect*(exc: ref Exception, msg: string) {.noreturn, noinline.} = + # Reraise an exception as a Defect, where it's unexpected and can't be handled + # We include the stack trace in the message because otherwise, it's easily + # lost - Nim doesn't print it for `parent` exceptions for example (!) + raise (ref Defect)( + msg: msg & "\n" & exc.msg & "\n" & exc.getStackTrace(), parent: exc) + +proc raiseOsDefect*(error: OSErrorCode, msg = "") {.noreturn, noinline.} = + # Reraise OS error code as a Defect, where it's unexpected and can't be + # handled. We include the stack trace in the message because otherwise, + # it's easily lost. + raise (ref Defect)(msg: msg & "\n[" & $int(error) & "] " & osErrorMsg(error) & + "\n" & getStackTrace()) + +func toPointer(error: OSErrorCode): pointer = + when sizeof(int) == 8: + cast[pointer](uint64(uint32(error))) + else: + cast[pointer](uint32(error)) + +func toException*(v: OSErrorCode): ref OSError = newOSError(v) + # This helper will allow to use `tryGet()` and raise OSError for + # Result[T, OSErrorCode] values. + +when defined(windows): + {.pragma: stdcallbackFunc, stdcall, gcsafe, raises: [].} + + export SIGINT, SIGQUIT, SIGTERM + type + CompletionKey = ULONG_PTR + + CompletionData* = object + cb*: CallbackFunc + errCode*: OSErrorCode + bytesCount*: uint32 + udata*: pointer + + CustomOverlapped* = object of OVERLAPPED + data*: CompletionData + + DispatcherFlag* = enum + SignalHandlerInstalled + + PDispatcher* = ref object of PDispatcherBase + ioPort: HANDLE + handles: HashSet[AsyncFD] + connectEx*: WSAPROC_CONNECTEX + acceptEx*: WSAPROC_ACCEPTEX + getAcceptExSockAddrs*: WSAPROC_GETACCEPTEXSOCKADDRS + transmitFile*: WSAPROC_TRANSMITFILE + getQueuedCompletionStatusEx*: LPFN_GETQUEUEDCOMPLETIONSTATUSEX + disconnectEx*: WSAPROC_DISCONNECTEX + flags: set[DispatcherFlag] + + PtrCustomOverlapped* = ptr CustomOverlapped + + RefCustomOverlapped* = ref CustomOverlapped + + PostCallbackData = object + ioPort: HANDLE + handleFd: AsyncFD + waitFd: HANDLE + udata: pointer + ovlref: RefCustomOverlapped + ovl: pointer + + WaitableHandle* = ref PostCallbackData + ProcessHandle* = distinct WaitableHandle + SignalHandle* = distinct WaitableHandle + + WaitableResult* {.pure.} = enum + Ok, Timeout + + AsyncFD* = distinct int + + proc hash(x: AsyncFD): Hash {.borrow.} + proc `==`*(x: AsyncFD, y: AsyncFD): bool {.borrow, gcsafe.} + + proc getFunc(s: SocketHandle, fun: var pointer, guid: GUID): bool = + var bytesRet: DWORD + fun = nil + wsaIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, unsafeAddr(guid), + DWORD(sizeof(GUID)), addr fun, DWORD(sizeof(pointer)), + addr(bytesRet), nil, nil) == 0 + + proc globalInit() = + var wsa = WSAData() + let res = wsaStartup(0x0202'u16, addr wsa) + if res != 0: + raiseOsDefect(osLastError(), + "globalInit(): Unable to initialize Windows Sockets API") + + proc initAPI(loop: PDispatcher) = + var funcPointer: pointer = nil + + let kernel32 = getModuleHandle(newWideCString("kernel32.dll")) + loop.getQueuedCompletionStatusEx = cast[LPFN_GETQUEUEDCOMPLETIONSTATUSEX]( + getProcAddress(kernel32, "GetQueuedCompletionStatusEx")) + + let sock = osdefs.socket(osdefs.AF_INET, 1, 6) + if sock == osdefs.INVALID_SOCKET: + raiseOsDefect(osLastError(), "initAPI(): Unable to create control socket") + + block: + let res = getFunc(sock, funcPointer, WSAID_CONNECTEX) + if not(res): + raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & + "dispatcher's ConnectEx()") + loop.connectEx = cast[WSAPROC_CONNECTEX](funcPointer) + + block: + let res = getFunc(sock, funcPointer, WSAID_ACCEPTEX) + if not(res): + raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & + "dispatcher's AcceptEx()") + loop.acceptEx = cast[WSAPROC_ACCEPTEX](funcPointer) + + block: + let res = getFunc(sock, funcPointer, WSAID_GETACCEPTEXSOCKADDRS) + if not(res): + raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & + "dispatcher's GetAcceptExSockAddrs()") + loop.getAcceptExSockAddrs = + cast[WSAPROC_GETACCEPTEXSOCKADDRS](funcPointer) + + block: + let res = getFunc(sock, funcPointer, WSAID_TRANSMITFILE) + if not(res): + raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & + "dispatcher's TransmitFile()") + loop.transmitFile = cast[WSAPROC_TRANSMITFILE](funcPointer) + + block: + let res = getFunc(sock, funcPointer, WSAID_DISCONNECTEX) + if not(res): + raiseOsDefect(osLastError(), "initAPI(): Unable to initialize " & + "dispatcher's DisconnectEx()") + loop.disconnectEx = cast[WSAPROC_DISCONNECTEX](funcPointer) + + if closeFd(sock) != 0: + raiseOsDefect(osLastError(), "initAPI(): Unable to close control socket") + + proc newDispatcher*(): PDispatcher = + ## Creates a new Dispatcher instance. + let port = createIoCompletionPort(osdefs.INVALID_HANDLE_VALUE, + HANDLE(0), 0, 1) + if port == osdefs.INVALID_HANDLE_VALUE: + raiseOsDefect(osLastError(), "newDispatcher(): Unable to create " & + "IOCP port") + var res = PDispatcher( + ioPort: port, + handles: initHashSet[AsyncFD](), + timers: initHeapQueue[TimerCallback](), + callbacks: initDeque[AsyncCallback](64), + idlers: initDeque[AsyncCallback](), + ticks: initDeque[AsyncCallback](), + trackers: initTable[string, TrackerBase](), + counters: initTable[string, TrackerCounter]() + ) + res.callbacks.addLast(SentinelCallback) + initAPI(res) + res + + var gDisp{.threadvar.}: PDispatcher ## Global dispatcher + + proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [].} + proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [].} + + proc getIoHandler*(disp: PDispatcher): HANDLE = + ## Returns the underlying IO Completion Port handle (Windows) or selector + ## (Unix) for the specified dispatcher. + disp.ioPort + + proc register2*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Register file descriptor ``fd`` in thread's dispatcher. + let loop = getThreadDispatcher() + if createIoCompletionPort(HANDLE(fd), loop.ioPort, cast[CompletionKey](fd), + 1) == osdefs.INVALID_HANDLE_VALUE: + return err(osLastError()) + loop.handles.incl(fd) + ok() + + proc register*(fd: AsyncFD) {.raises: [OSError].} = + ## Register file descriptor ``fd`` in thread's dispatcher. + register2(fd).tryGet() + + proc unregister*(fd: AsyncFD) = + ## Unregisters ``fd``. + getThreadDispatcher().handles.excl(fd) + + {.push stackTrace: off.} + proc waitableCallback(param: pointer, timerOrWaitFired: WINBOOL) {. + stdcallbackFunc.} = + # This procedure will be executed in `wait thread`, so it must not use + # GC related objects. + # We going to ignore callbacks which was spawned when `isNil(param) == true` + # because we unable to indicate this error. + if isNil(param): return + var wh = cast[ptr PostCallbackData](param) + # We ignore result of postQueueCompletionStatus() call because we unable to + # indicate error. + discard postQueuedCompletionStatus(wh[].ioPort, DWORD(timerOrWaitFired), + ULONG_PTR(wh[].handleFd), + wh[].ovl) + {.pop.} + + proc registerWaitable*( + handle: HANDLE, + flags: ULONG, + timeout: Duration, + cb: CallbackFunc, + udata: pointer + ): Result[WaitableHandle, OSErrorCode] = + ## Register handle of (Change notification, Console input, Event, + ## Memory resource notification, Mutex, Process, Semaphore, Thread, + ## Waitable timer) for waiting, using specific Windows' ``flags`` and + ## ``timeout`` value. + ## + ## Callback ``cb`` will be scheduled with ``udata`` parameter when + ## ``handle`` become signaled. + ## + ## Result of this procedure call ``WaitableHandle`` should be closed using + ## closeWaitable() call. + ## + ## NOTE: This is private procedure, not supposed to be publicly available, + ## please use ``waitForSingleObject()``. + let loop = getThreadDispatcher() + var ovl = RefCustomOverlapped(data: CompletionData(cb: cb)) + + var whandle = (ref PostCallbackData)( + ioPort: loop.getIoHandler(), + handleFd: AsyncFD(handle), + udata: udata, + ovlref: ovl, + ovl: cast[pointer](ovl) + ) + + ovl.data.udata = cast[pointer](whandle) + + let dwordTimeout = + if timeout == InfiniteDuration: + DWORD(INFINITE) + else: + DWORD(timeout.milliseconds) + + if registerWaitForSingleObject(addr(whandle[].waitFd), handle, + cast[WAITORTIMERCALLBACK](waitableCallback), + cast[pointer](whandle), + dwordTimeout, + flags) == WINBOOL(0): + ovl.data.udata = nil + whandle.ovlref = nil + whandle.ovl = nil + return err(osLastError()) + + ok(WaitableHandle(whandle)) + + proc closeWaitable*(wh: WaitableHandle): Result[void, OSErrorCode] = + ## Close waitable handle ``wh`` and clear all the resources. It is safe + ## to close this handle, even if wait operation is pending. + ## + ## NOTE: This is private procedure, not supposed to be publicly available, + ## please use ``waitForSingleObject()``. + doAssert(not(isNil(wh))) + + let pdata = (ref PostCallbackData)(wh) + # We are not going to clear `ref` fields in PostCallbackData object because + # it possible that callback is already scheduled. + if unregisterWait(pdata.waitFd) == 0: + let res = osLastError() + if res != ERROR_IO_PENDING: + return err(res) + ok() + + proc addProcess2*(pid: int, cb: CallbackFunc, + udata: pointer = nil): Result[ProcessHandle, OSErrorCode] = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process identifier, which can be + ## used to clear process callback via ``removeProcess``. + doAssert(pid > 0, "Process identifier must be positive integer") + let + hProcess = openProcess(SYNCHRONIZE, WINBOOL(0), DWORD(pid)) + flags = WT_EXECUTEINWAITTHREAD or WT_EXECUTEONLYONCE + + var wh: WaitableHandle = nil + + if hProcess == HANDLE(0): + return err(osLastError()) + + proc continuation(udata: pointer) {.gcsafe.} = + doAssert(not(isNil(udata))) + doAssert(not(isNil(wh))) + discard closeFd(hProcess) + cb(wh[].udata) + + wh = + block: + let res = registerWaitable(hProcess, flags, InfiniteDuration, + continuation, udata) + if res.isErr(): + discard closeFd(hProcess) + return err(res.error()) + res.get() + ok(ProcessHandle(wh)) + + proc removeProcess2*(procHandle: ProcessHandle): Result[void, OSErrorCode] = + ## Remove process' watching using process' descriptor ``procHandle``. + let waitableHandle = WaitableHandle(procHandle) + doAssert(not(isNil(waitableHandle))) + ? closeWaitable(waitableHandle) + ok() + + proc addProcess*(pid: int, cb: CallbackFunc, + udata: pointer = nil): ProcessHandle {. + raises: [OSError].} = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process identifier, which can be + ## used to clear process callback via ``removeProcess``. + addProcess2(pid, cb, udata).tryGet() + + proc removeProcess*(procHandle: ProcessHandle) {. + raises: [ OSError].} = + ## Remove process' watching using process' descriptor ``procHandle``. + removeProcess2(procHandle).tryGet() + + {.push stackTrace: off.} + proc consoleCtrlEventHandler(dwCtrlType: DWORD): uint32 {.stdcallbackFunc.} = + ## This procedure will be executed in different thread, so it MUST not use + ## any GC related features (strings, seqs, echo etc.). + case dwCtrlType + of CTRL_C_EVENT: + return + (if raiseSignal(SIGINT).valueOr(false): TRUE else: FALSE) + of CTRL_BREAK_EVENT: + return + (if raiseSignal(SIGINT).valueOr(false): TRUE else: FALSE) + of CTRL_CLOSE_EVENT: + return + (if raiseSignal(SIGTERM).valueOr(false): TRUE else: FALSE) + of CTRL_LOGOFF_EVENT: + return + (if raiseSignal(SIGQUIT).valueOr(false): TRUE else: FALSE) + else: + FALSE + {.pop.} + + proc addSignal2*(signal: int, cb: CallbackFunc, + udata: pointer = nil): Result[SignalHandle, OSErrorCode] = + ## Start watching signal ``signal``, and when signal appears, call the + ## callback ``cb`` with specified argument ``udata``. Returns signal + ## identifier code, which can be used to remove signal callback + ## via ``removeSignal``. + ## + ## NOTE: On Windows only subset of signals are supported: SIGINT, SIGTERM, + ## SIGQUIT + const supportedSignals = [SIGINT, SIGTERM, SIGQUIT] + doAssert(cint(signal) in supportedSignals, "Signal is not supported") + let loop = getThreadDispatcher() + var hWait: WaitableHandle = nil + + proc continuation(ucdata: pointer) {.gcsafe.} = + doAssert(not(isNil(ucdata))) + doAssert(not(isNil(hWait))) + cb(hWait[].udata) + + if SignalHandlerInstalled notin loop.flags: + if getConsoleCP() != 0'u32: + # Console application, we going to cleanup Nim default signal handlers. + if setConsoleCtrlHandler(consoleCtrlEventHandler, TRUE) == FALSE: + return err(osLastError()) + loop.flags.incl(SignalHandlerInstalled) + else: + return err(ERROR_NOT_SUPPORTED) + + let + flags = WT_EXECUTEINWAITTHREAD + hEvent = ? openEvent($getSignalName(signal)) + + hWait = registerWaitable(hEvent, flags, InfiniteDuration, + continuation, udata).valueOr: + discard closeFd(hEvent) + return err(error) + ok(SignalHandle(hWait)) + + proc removeSignal2*(signalHandle: SignalHandle): Result[void, OSErrorCode] = + ## Remove watching signal ``signal``. + ? closeWaitable(WaitableHandle(signalHandle)) + ok() + + proc addSignal*(signal: int, cb: CallbackFunc, + udata: pointer = nil): SignalHandle {. + raises: [ValueError].} = + ## Registers callback ``cb`` to be called when signal ``signal`` will be + ## raised. Returns signal identifier, which can be used to clear signal + ## callback via ``removeSignal``. + addSignal2(signal, cb, udata).valueOr: + raise newException(ValueError, osErrorMsg(error)) + + proc removeSignal*(signalHandle: SignalHandle) {. + raises: [ValueError].} = + ## Remove signal's watching using signal descriptor ``signalfd``. + let res = removeSignal2(signalHandle) + if res.isErr(): + raise newException(ValueError, osErrorMsg(res.error())) + + proc poll*() = + ## Perform single asynchronous step, processing timers and completing + ## tasks. Blocks until at least one event has completed. + ## + ## Exceptions raised here indicate that waiting for tasks to be unblocked + ## failed - exceptions from within tasks are instead propagated through + ## their respective futures and not allowed to interrrupt the poll call. + let loop = getThreadDispatcher() + var + curTime = Moment.now() + curTimeout = DWORD(0) + events: array[MaxEventsCount, osdefs.OVERLAPPED_ENTRY] + + # On reentrant `poll` calls from `processCallbacks`, e.g., `waitFor`, + # complete pending work of the outer `processCallbacks` call. + # On non-reentrant `poll` calls, this only removes sentinel element. + processCallbacks(loop) + + # Moving expired timers to `loop.callbacks` and calculate timeout + loop.processTimersGetTimeout(curTimeout) + + let networkEventsCount = + if isNil(loop.getQueuedCompletionStatusEx): + let res = getQueuedCompletionStatus( + loop.ioPort, + addr events[0].dwNumberOfBytesTransferred, + addr events[0].lpCompletionKey, + cast[ptr POVERLAPPED](addr events[0].lpOverlapped), + curTimeout + ) + if res == FALSE: + let errCode = osLastError() + if not(isNil(events[0].lpOverlapped)): + 1 + else: + if uint32(errCode) != WAIT_TIMEOUT: + raiseOsDefect(errCode, "poll(): Unable to get OS events") + 0 + else: + 1 + else: + var eventsReceived = ULONG(0) + let res = loop.getQueuedCompletionStatusEx( + loop.ioPort, + addr events[0], + ULONG(len(events)), + eventsReceived, + curTimeout, + WINBOOL(0) + ) + if res == FALSE: + let errCode = osLastError() + if uint32(errCode) != WAIT_TIMEOUT: + raiseOsDefect(errCode, "poll(): Unable to get OS events") + 0 + else: + int(eventsReceived) + + for i in 0 ..< networkEventsCount: + var customOverlapped = PtrCustomOverlapped(events[i].lpOverlapped) + customOverlapped.data.errCode = + block: + let res = cast[uint64](customOverlapped.internal) + if res == 0'u64: + OSErrorCode(-1) + else: + OSErrorCode(rtlNtStatusToDosError(res)) + customOverlapped.data.bytesCount = events[i].dwNumberOfBytesTransferred + let acb = AsyncCallback(function: customOverlapped.data.cb, + udata: cast[pointer](customOverlapped)) + loop.callbacks.addLast(acb) + + # Moving expired timers to `loop.callbacks`. + loop.processTimers() + + # We move idle callbacks to `loop.callbacks` only if there no pending + # network events. + if networkEventsCount == 0: + loop.processIdlers() + + # We move tick callbacks to `loop.callbacks` always. + processTicks(loop) + + # All callbacks which will be added during `processCallbacks` will be + # scheduled after the sentinel and are processed on next `poll()` call. + loop.callbacks.addLast(SentinelCallback) + processCallbacks(loop) + + # All callbacks done, skip `processCallbacks` at start. + loop.callbacks.addFirst(SentinelCallback) + + proc closeSocket*(fd: AsyncFD, aftercb: CallbackFunc = nil) = + ## Closes a socket and ensures that it is unregistered. + let loop = getThreadDispatcher() + loop.handles.excl(fd) + let + param = toPointer( + if closeFd(SocketHandle(fd)) == 0: + OSErrorCode(0) + else: + osLastError() + ) + if not(isNil(aftercb)): + loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) + + proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) = + ## Closes a (pipe/file) handle and ensures that it is unregistered. + let loop = getThreadDispatcher() + loop.handles.excl(fd) + let + param = toPointer( + if closeFd(HANDLE(fd)) == 0: + OSErrorCode(0) + else: + osLastError() + ) + + if not(isNil(aftercb)): + loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) + + proc contains*(disp: PDispatcher, fd: AsyncFD): bool = + ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. + fd in disp.handles + +elif defined(macosx) or defined(freebsd) or defined(netbsd) or + defined(openbsd) or defined(dragonfly) or defined(macos) or + defined(linux) or defined(android) or defined(solaris): + const + SIG_IGN = cast[proc(x: cint) {.raises: [], noconv, gcsafe.}](1) + + type + AsyncFD* = distinct cint + + SelectorData* = object + reader*: AsyncCallback + writer*: AsyncCallback + + PDispatcher* = ref object of PDispatcherBase + selector: Selector[SelectorData] + keys: seq[ReadyKey] + + proc `==`*(x, y: AsyncFD): bool {.borrow, gcsafe.} + + proc globalInit() = + # We are ignoring SIGPIPE signal, because we are working with EPIPE. + signal(cint(SIGPIPE), SIG_IGN) + + proc initAPI(disp: PDispatcher) = + discard + + proc newDispatcher*(): PDispatcher = + ## Create new dispatcher. + let selector = + block: + let res = Selector.new(SelectorData) + if res.isErr(): raiseOsDefect(res.error(), + "Could not initialize selector") + res.get() + + var res = PDispatcher( + selector: selector, + timers: initHeapQueue[TimerCallback](), + callbacks: initDeque[AsyncCallback](chronosEventsCount), + idlers: initDeque[AsyncCallback](), + keys: newSeq[ReadyKey](chronosEventsCount), + trackers: initTable[string, TrackerBase](), + counters: initTable[string, TrackerCounter]() + ) + res.callbacks.addLast(SentinelCallback) + initAPI(res) + res + + var gDisp{.threadvar.}: PDispatcher ## Global dispatcher + + proc setThreadDispatcher*(disp: PDispatcher) {.gcsafe, raises: [].} + proc getThreadDispatcher*(): PDispatcher {.gcsafe, raises: [].} + + proc getIoHandler*(disp: PDispatcher): Selector[SelectorData] = + ## Returns system specific OS queue. + disp.selector + + proc contains*(disp: PDispatcher, fd: AsyncFD): bool {.inline.} = + ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. + cint(fd) in disp.selector + + proc register2*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Register file descriptor ``fd`` in thread's dispatcher. + var data: SelectorData + getThreadDispatcher().selector.registerHandle2(cint(fd), {}, data) + + proc unregister2*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Unregister file descriptor ``fd`` from thread's dispatcher. + getThreadDispatcher().selector.unregister2(cint(fd)) + + proc addReader2*(fd: AsyncFD, cb: CallbackFunc, + udata: pointer = nil): Result[void, OSErrorCode] = + ## Start watching the file descriptor ``fd`` for read availability and then + ## call the callback ``cb`` with specified argument ``udata``. + let loop = getThreadDispatcher() + var newEvents = {Event.Read} + withData(loop.selector, cint(fd), adata) do: + let acb = AsyncCallback(function: cb, udata: udata) + adata.reader = acb + if not(isNil(adata.writer.function)): + newEvents.incl(Event.Write) + do: + return err(osdefs.EBADF) + loop.selector.updateHandle2(cint(fd), newEvents) + + proc removeReader2*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Stop watching the file descriptor ``fd`` for read availability. + let loop = getThreadDispatcher() + var newEvents: set[Event] + withData(loop.selector, cint(fd), adata) do: + # We need to clear `reader` data, because `selectors` don't do it + adata.reader = default(AsyncCallback) + if not(isNil(adata.writer.function)): + newEvents.incl(Event.Write) + do: + return err(osdefs.EBADF) + loop.selector.updateHandle2(cint(fd), newEvents) + + proc addWriter2*(fd: AsyncFD, cb: CallbackFunc, + udata: pointer = nil): Result[void, OSErrorCode] = + ## Start watching the file descriptor ``fd`` for write availability and then + ## call the callback ``cb`` with specified argument ``udata``. + let loop = getThreadDispatcher() + var newEvents = {Event.Write} + withData(loop.selector, cint(fd), adata) do: + let acb = AsyncCallback(function: cb, udata: udata) + adata.writer = acb + if not(isNil(adata.reader.function)): + newEvents.incl(Event.Read) + do: + return err(osdefs.EBADF) + loop.selector.updateHandle2(cint(fd), newEvents) + + proc removeWriter2*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Stop watching the file descriptor ``fd`` for write availability. + let loop = getThreadDispatcher() + var newEvents: set[Event] + withData(loop.selector, cint(fd), adata) do: + # We need to clear `writer` data, because `selectors` don't do it + adata.writer = default(AsyncCallback) + if not(isNil(adata.reader.function)): + newEvents.incl(Event.Read) + do: + return err(osdefs.EBADF) + loop.selector.updateHandle2(cint(fd), newEvents) + + proc register*(fd: AsyncFD) {.raises: [OSError].} = + ## Register file descriptor ``fd`` in thread's dispatcher. + register2(fd).tryGet() + + proc unregister*(fd: AsyncFD) {.raises: [OSError].} = + ## Unregister file descriptor ``fd`` from thread's dispatcher. + unregister2(fd).tryGet() + + proc addReader*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {. + raises: [OSError].} = + ## Start watching the file descriptor ``fd`` for read availability and then + ## call the callback ``cb`` with specified argument ``udata``. + addReader2(fd, cb, udata).tryGet() + + proc removeReader*(fd: AsyncFD) {.raises: [OSError].} = + ## Stop watching the file descriptor ``fd`` for read availability. + removeReader2(fd).tryGet() + + proc addWriter*(fd: AsyncFD, cb: CallbackFunc, udata: pointer = nil) {. + raises: [OSError].} = + ## Start watching the file descriptor ``fd`` for write availability and then + ## call the callback ``cb`` with specified argument ``udata``. + addWriter2(fd, cb, udata).tryGet() + + proc removeWriter*(fd: AsyncFD) {.raises: [OSError].} = + ## Stop watching the file descriptor ``fd`` for write availability. + removeWriter2(fd).tryGet() + + proc unregisterAndCloseFd*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Unregister from system queue and close asynchronous socket. + ## + ## NOTE: Use this function to close temporary sockets/pipes only (which + ## are not exposed to the public and not supposed to be used/reused). + ## Please use closeSocket(AsyncFD) and closeHandle(AsyncFD) instead. + doAssert(fd != AsyncFD(osdefs.INVALID_SOCKET)) + ? unregister2(fd) + if closeFd(cint(fd)) != 0: + err(osLastError()) + else: + ok() + + proc closeSocket*(fd: AsyncFD, aftercb: CallbackFunc = nil) = + ## Close asynchronous socket. + ## + ## Please note, that socket is not closed immediately. To avoid bugs with + ## closing socket, while operation pending, socket will be closed as + ## soon as all pending operations will be notified. + let loop = getThreadDispatcher() + + proc continuation(udata: pointer) = + let + param = toPointer( + if SocketHandle(fd) in loop.selector: + let ures = unregister2(fd) + if ures.isErr(): + discard closeFd(cint(fd)) + ures.error() + else: + if closeFd(cint(fd)) != 0: + osLastError() + else: + OSErrorCode(0) + else: + osdefs.EBADF + ) + if not(isNil(aftercb)): aftercb(param) + + withData(loop.selector, cint(fd), adata) do: + # We are scheduling reader and writer callbacks to be called + # explicitly, so they can get an error and continue work. + # Callbacks marked as deleted so we don't need to get REAL notifications + # from system queue for this reader and writer. + + if not(isNil(adata.reader.function)): + loop.callbacks.addLast(adata.reader) + adata.reader = default(AsyncCallback) + + if not(isNil(adata.writer.function)): + loop.callbacks.addLast(adata.writer) + adata.writer = default(AsyncCallback) + + # We can't unregister file descriptor from system queue here, because + # in such case processing queue will stuck on poll() call, because there + # can be no file descriptors registered in system queue. + var acb = AsyncCallback(function: continuation) + loop.callbacks.addLast(acb) + + proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) = + ## Close asynchronous file/pipe handle. + ## + ## Please note, that socket is not closed immediately. To avoid bugs with + ## closing socket, while operation pending, socket will be closed as + ## soon as all pending operations will be notified. + ## You can execute ``aftercb`` before actual socket close operation. + closeSocket(fd, aftercb) + + when chronosEventEngine in ["epoll", "kqueue"]: + type + ProcessHandle* = distinct int + SignalHandle* = distinct int + + proc addSignal2*( + signal: int, + cb: CallbackFunc, + udata: pointer = nil + ): Result[SignalHandle, OSErrorCode] = + ## Start watching signal ``signal``, and when signal appears, call the + ## callback ``cb`` with specified argument ``udata``. Returns signal + ## identifier code, which can be used to remove signal callback + ## via ``removeSignal``. + let loop = getThreadDispatcher() + var data: SelectorData + let sigfd = ? loop.selector.registerSignal(signal, data) + withData(loop.selector, sigfd, adata) do: + adata.reader = AsyncCallback(function: cb, udata: udata) + do: + return err(osdefs.EBADF) + ok(SignalHandle(sigfd)) + + proc addProcess2*( + pid: int, + cb: CallbackFunc, + udata: pointer = nil + ): Result[ProcessHandle, OSErrorCode] = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process' descriptor, which can be + ## used to clear process callback via ``removeProcess``. + let loop = getThreadDispatcher() + var data: SelectorData + let procfd = ? loop.selector.registerProcess(pid, data) + withData(loop.selector, procfd, adata) do: + adata.reader = AsyncCallback(function: cb, udata: udata) + do: + return err(osdefs.EBADF) + ok(ProcessHandle(procfd)) + + proc removeSignal2*(signalHandle: SignalHandle): Result[void, OSErrorCode] = + ## Remove watching signal ``signal``. + getThreadDispatcher().selector.unregister2(cint(signalHandle)) + + proc removeProcess2*(procHandle: ProcessHandle): Result[void, OSErrorCode] = + ## Remove process' watching using process' descriptor ``procfd``. + getThreadDispatcher().selector.unregister2(cint(procHandle)) + + proc addSignal*(signal: int, cb: CallbackFunc, + udata: pointer = nil): SignalHandle {. + raises: [OSError].} = + ## Start watching signal ``signal``, and when signal appears, call the + ## callback ``cb`` with specified argument ``udata``. Returns signal + ## identifier code, which can be used to remove signal callback + ## via ``removeSignal``. + addSignal2(signal, cb, udata).tryGet() + + proc removeSignal*(signalHandle: SignalHandle) {. + raises: [OSError].} = + ## Remove watching signal ``signal``. + removeSignal2(signalHandle).tryGet() + + proc addProcess*(pid: int, cb: CallbackFunc, + udata: pointer = nil): ProcessHandle {. + raises: [OSError].} = + ## Registers callback ``cb`` to be called when process with process + ## identifier ``pid`` exited. Returns process identifier, which can be + ## used to clear process callback via ``removeProcess``. + addProcess2(pid, cb, udata).tryGet() + + proc removeProcess*(procHandle: ProcessHandle) {. + raises: [OSError].} = + ## Remove process' watching using process' descriptor ``procHandle``. + removeProcess2(procHandle).tryGet() + + proc poll*() {.gcsafe.} = + ## Perform single asynchronous step. + let loop = getThreadDispatcher() + var curTime = Moment.now() + var curTimeout = 0 + + # On reentrant `poll` calls from `processCallbacks`, e.g., `waitFor`, + # complete pending work of the outer `processCallbacks` call. + # On non-reentrant `poll` calls, this only removes sentinel element. + processCallbacks(loop) + + # Moving expired timers to `loop.callbacks` and calculate timeout. + loop.processTimersGetTimeout(curTimeout) + + # Processing IO descriptors and all hardware events. + let count = + block: + let res = loop.selector.selectInto2(curTimeout, loop.keys) + if res.isErr(): + raiseOsDefect(res.error(), "poll(): Unable to get OS events") + res.get() + + for i in 0 ..< count: + let fd = loop.keys[i].fd + let events = loop.keys[i].events + + withData(loop.selector, cint(fd), adata) do: + if (Event.Read in events) or (events == {Event.Error}): + if not isNil(adata.reader.function): + loop.callbacks.addLast(adata.reader) + + if (Event.Write in events) or (events == {Event.Error}): + if not isNil(adata.writer.function): + loop.callbacks.addLast(adata.writer) + + if Event.User in events: + if not isNil(adata.reader.function): + loop.callbacks.addLast(adata.reader) + + when chronosEventEngine in ["epoll", "kqueue"]: + let customSet = {Event.Timer, Event.Signal, Event.Process, + Event.Vnode} + if customSet * events != {}: + if not isNil(adata.reader.function): + loop.callbacks.addLast(adata.reader) + + # Moving expired timers to `loop.callbacks`. + loop.processTimers() + + # We move idle callbacks to `loop.callbacks` only if there no pending + # network events. + if count == 0: + loop.processIdlers() + + # We move tick callbacks to `loop.callbacks` always. + processTicks(loop) + + # All callbacks which will be added during `processCallbacks` will be + # scheduled after the sentinel and are processed on next `poll()` call. + loop.callbacks.addLast(SentinelCallback) + processCallbacks(loop) + + # All callbacks done, skip `processCallbacks` at start. + loop.callbacks.addFirst(SentinelCallback) + +else: + proc initAPI() = discard + proc globalInit() = discard + +proc setThreadDispatcher*(disp: PDispatcher) = + ## Set current thread's dispatcher instance to ``disp``. + if not(gDisp.isNil()): + doAssert gDisp.callbacks.len == 0 + gDisp = disp + +proc getThreadDispatcher*(): PDispatcher = + ## Returns current thread's dispatcher instance. + if gDisp.isNil(): + setThreadDispatcher(newDispatcher()) + gDisp + +proc setGlobalDispatcher*(disp: PDispatcher) {. + gcsafe, deprecated: "Use setThreadDispatcher() instead".} = + setThreadDispatcher(disp) + +proc getGlobalDispatcher*(): PDispatcher {. + gcsafe, deprecated: "Use getThreadDispatcher() instead".} = + getThreadDispatcher() + +proc setTimer*(at: Moment, cb: CallbackFunc, + udata: pointer = nil): TimerCallback = + ## Arrange for the callback ``cb`` to be called at the given absolute + ## timestamp ``at``. You can also pass ``udata`` to callback. + let loop = getThreadDispatcher() + result = TimerCallback(finishAt: at, + function: AsyncCallback(function: cb, udata: udata)) + loop.timers.push(result) + +proc clearTimer*(timer: TimerCallback) {.inline.} = + timer.function = default(AsyncCallback) + +proc addTimer*(at: Moment, cb: CallbackFunc, udata: pointer = nil) {. + inline, deprecated: "Use setTimer/clearTimer instead".} = + ## Arrange for the callback ``cb`` to be called at the given absolute + ## timestamp ``at``. You can also pass ``udata`` to callback. + discard setTimer(at, cb, udata) + +proc addTimer*(at: int64, cb: CallbackFunc, udata: pointer = nil) {. + inline, deprecated: "Use addTimer(Duration, cb, udata)".} = + discard setTimer(Moment.init(at, Millisecond), cb, udata) + +proc addTimer*(at: uint64, cb: CallbackFunc, udata: pointer = nil) {. + inline, deprecated: "Use addTimer(Duration, cb, udata)".} = + discard setTimer(Moment.init(int64(at), Millisecond), cb, udata) + +proc removeTimer*(at: Moment, cb: CallbackFunc, udata: pointer = nil) = + ## Remove timer callback ``cb`` with absolute timestamp ``at`` from waiting + ## queue. + let loop = getThreadDispatcher() + var list = cast[seq[TimerCallback]](loop.timers) + var index = -1 + for i in 0.. 0, "Number should be positive integer") + var + retFuture = newFuture[void]("chronos.stepsAsync(int)") + counter = 0 + continuation: proc(data: pointer) {.gcsafe, raises: [].} + + continuation = proc(data: pointer) {.gcsafe, raises: [].} = + if not(retFuture.finished()): + inc(counter) + if counter < number: + internalCallTick(continuation) + else: + retFuture.complete() + + if number <= 0: + retFuture.complete() + else: + internalCallTick(continuation) + + retFuture + +proc idleAsync*(): Future[void] = + ## Suspends the execution of the current asynchronous task until "idle" time. + ## + ## "idle" time its moment of time, when no network events were processed by + ## ``poll()`` call. + var retFuture = newFuture[void]("chronos.idleAsync()") + + proc continuation(data: pointer) {.gcsafe.} = + if not(retFuture.finished()): + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe.} = + discard + + retFuture.cancelCallback = cancellation + callIdle(continuation, nil) + retFuture + +proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] = + ## Returns a future which will complete once ``fut`` completes or after + ## ``timeout`` milliseconds has elapsed. + ## + ## If ``fut`` completes first the returned future will hold true, + ## otherwise, if ``timeout`` milliseconds has elapsed first, the returned + ## future will hold false. + var + retFuture = newFuture[bool]("chronos.withTimeout", + {FutureFlag.OwnCancelSchedule}) + moment: Moment + timer: TimerCallback + timeouted = false + + template completeFuture(fut: untyped): untyped = + if fut.failed() or fut.completed(): + retFuture.complete(true) + else: + retFuture.cancelAndSchedule() + + # TODO: raises annotation shouldn't be needed, but likely similar issue as + # https://github.com/nim-lang/Nim/issues/17369 + proc continuation(udata: pointer) {.gcsafe, raises: [].} = + if not(retFuture.finished()): + if timeouted: + retFuture.complete(false) + return + if not(fut.finished()): + # Timer exceeded first, we going to cancel `fut` and wait until it + # not completes. + timeouted = true + fut.cancelSoon() + else: + # Future `fut` completed/failed/cancelled first. + if not(isNil(timer)): + clearTimer(timer) + fut.completeFuture() + + # TODO: raises annotation shouldn't be needed, but likely similar issue as + # https://github.com/nim-lang/Nim/issues/17369 + proc cancellation(udata: pointer) {.gcsafe, raises: [].} = + if not(fut.finished()): + if not isNil(timer): + clearTimer(timer) + fut.cancelSoon() + else: + fut.completeFuture() + + if fut.finished(): + retFuture.complete(true) + else: + if timeout.isZero(): + retFuture.complete(false) + elif timeout.isInfinite(): + retFuture.cancelCallback = cancellation + fut.addCallback(continuation) + else: + moment = Moment.fromNow(timeout) + retFuture.cancelCallback = cancellation + timer = setTimer(moment, continuation, nil) + fut.addCallback(continuation) + + retFuture + +proc withTimeout*[T](fut: Future[T], timeout: int): Future[bool] {. + inline, deprecated: "Use withTimeout(Future[T], Duration)".} = + withTimeout(fut, timeout.milliseconds()) + +proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = + ## Returns a future which will complete once future ``fut`` completes + ## or if timeout of ``timeout`` milliseconds has been expired. + ## + ## If ``timeout`` is ``-1``, then statement ``await wait(fut)`` is + ## equal to ``await fut``. + ## + ## TODO: In case when ``fut`` got cancelled, what result Future[T] + ## should return, because it can't be cancelled too. + var + retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + moment: Moment + timer: TimerCallback + timeouted = false + + template completeFuture(fut: untyped): untyped = + if fut.failed(): + retFuture.fail(fut.error) + elif fut.cancelled(): + retFuture.cancelAndSchedule() + else: + when T is void: + retFuture.complete() + else: + retFuture.complete(fut.value) + + proc continuation(udata: pointer) {.raises: [].} = + if not(retFuture.finished()): + if timeouted: + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + return + if not(fut.finished()): + # Timer exceeded first. + timeouted = true + fut.cancelSoon() + else: + # Future `fut` completed/failed/cancelled first. + if not(isNil(timer)): + clearTimer(timer) + fut.completeFuture() + + var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} + cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = + if not(fut.finished()): + if not(isNil(timer)): + clearTimer(timer) + fut.cancelSoon() + else: + fut.completeFuture() + + if fut.finished(): + fut.completeFuture() + else: + if timeout.isZero(): + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + elif timeout.isInfinite(): + retFuture.cancelCallback = cancellation + fut.addCallback(continuation) + else: + moment = Moment.fromNow(timeout) + retFuture.cancelCallback = cancellation + timer = setTimer(moment, continuation, nil) + fut.addCallback(continuation) + + retFuture + +proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. + inline, deprecated: "Use wait(Future[T], Duration)".} = + if timeout == -1: + wait(fut, InfiniteDuration) + elif timeout == 0: + wait(fut, ZeroDuration) + else: + wait(fut, timeout.milliseconds()) + + +when defined(windows): + import ../osdefs + + proc waitForSingleObject*(handle: HANDLE, + timeout: Duration): Future[WaitableResult] {. + raises: [].} = + ## Waits until the specified object is in the signaled state or the + ## time-out interval elapses. WaitForSingleObject() for asynchronous world. + let flags = WT_EXECUTEONLYONCE + + var + retFuture = newFuture[WaitableResult]("chronos.waitForSingleObject()") + waitHandle: WaitableHandle = nil + + proc continuation(udata: pointer) {.gcsafe.} = + doAssert(not(isNil(waitHandle))) + if not(retFuture.finished()): + let + ovl = cast[PtrCustomOverlapped](udata) + returnFlag = WINBOOL(ovl.data.bytesCount) + res = closeWaitable(waitHandle) + if res.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(res.error()))) + else: + if returnFlag == TRUE: + retFuture.complete(WaitableResult.Timeout) + else: + retFuture.complete(WaitableResult.Ok) + + proc cancellation(udata: pointer) {.gcsafe.} = + doAssert(not(isNil(waitHandle))) + if not(retFuture.finished()): + discard closeWaitable(waitHandle) + + let wres = uint32(waitForSingleObject(handle, DWORD(0))) + if wres == WAIT_OBJECT_0: + retFuture.complete(WaitableResult.Ok) + return retFuture + elif wres == WAIT_ABANDONED: + retFuture.fail(newException(AsyncError, "Handle was abandoned")) + return retFuture + elif wres == WAIT_FAILED: + retFuture.fail(newException(AsyncError, osErrorMsg(osLastError()))) + return retFuture + + if timeout == ZeroDuration: + retFuture.complete(WaitableResult.Timeout) + return retFuture + + waitHandle = + block: + let res = registerWaitable(handle, flags, timeout, continuation, nil) + if res.isErr(): + retFuture.fail(newException(AsyncError, osErrorMsg(res.error()))) + return retFuture + res.get() + + retFuture.cancelCallback = cancellation + return retFuture diff --git a/chronos/asyncmacro2.nim b/chronos/internal/asyncmacro.nim similarity index 99% rename from chronos/asyncmacro2.nim rename to chronos/internal/asyncmacro.nim index 499f847e8..8d99155be 100644 --- a/chronos/asyncmacro2.nim +++ b/chronos/internal/asyncmacro.nim @@ -8,7 +8,9 @@ # distribution, for details about the copyright. # -import std/algorithm +import + std/[algorithm, macros, sequtils], + ../[futures, config] proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} = case node.kind diff --git a/chronos/internal/errors.nim b/chronos/internal/errors.nim new file mode 100644 index 000000000..083f7a2c3 --- /dev/null +++ b/chronos/internal/errors.nim @@ -0,0 +1,5 @@ +type + AsyncError* = object of CatchableError + ## Generic async exception + AsyncTimeoutError* = object of AsyncError + ## Timeout exception From f56d2866877314ded1179a06fda63c651a7e3d72 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 24 Oct 2023 16:21:07 +0200 Subject: [PATCH 071/146] introduce `asyncraises` to core future utilities (#454) * introduce `asyncraises` to core future utilities Similar to the introduction of `raises` into a codebase, `asyncraises` needs to be introduced gradually across all functionality before deriving benefit. This is a first introduction along with utilities to manage raises lists and transform them at compile time. Several scenarios ensue: * for trivial cases, adding `asyncraises` is enough and the framework deduces the rest * some functions "add" new asyncraises (similar to what `raise` does in "normal" code) - for example `wait` may raise all exceptions of the future passed to it and additionally a few of its own - this requires extending the raises list * som functions "remove" raises (similar to what `try/except` does) such as `nocancel` with blocks cancellations and therefore reduce the raising set Both of the above cases are currently handled by a macro, but depending on the situation lead to code organisation issues around return types and pragma limitations - in particular, to keep `asyncraises` backwards-compatibility, some code needs to exist in two versions which somewhat complicates the implementation. * add `asyncraises` versions for several `asyncfutures` utilities * when assigning exceptions to a `Future` via `fail`, check at compile time if possible and at runtime if not that the exception matches constraints * fix `waitFor` comments * move async raises to separate module, implement `or` --- README.md | 30 ++- chronos/internal/asyncfutures.nim | 288 ++++++++++++++++------------- chronos/internal/asyncmacro.nim | 9 +- chronos/internal/raisesfutures.nim | 124 +++++++++++++ tests/testmacro.nim | 60 ++++++ 5 files changed, 375 insertions(+), 136 deletions(-) create mode 100644 chronos/internal/raisesfutures.nim diff --git a/README.md b/README.md index c06cfa935..0a23ea16c 100644 --- a/README.md +++ b/README.md @@ -222,8 +222,8 @@ proc p1(): Future[void] {.async, asyncraises: [IOError].} = raise newException(IOError, "works") # Or any child of IOError ``` -Under the hood, the return type of `p1` will be rewritten to another type, -which will convey raises informations to await. +Under the hood, the return type of `p1` will be rewritten to an internal type, +which will convey raises informations to `await`. ```nim proc p2(): Future[void] {.async, asyncraises: [IOError].} = @@ -231,8 +231,10 @@ proc p2(): Future[void] {.async, asyncraises: [IOError].} = # can only raise IOError ``` -The hidden type (`RaiseTrackingFuture`) is implicitely convertible into a Future. -However, it may causes issues when creating callback or methods +Raw functions and callbacks that don't go through the `async` transformation but +still return a `Future` and interact with the rest of the framework also need to +be annotated with `asyncraises` to participate in the checked exception scheme: + ```nim proc p3(): Future[void] {.async, asyncraises: [IOError].} = let fut: Future[void] = p1() # works @@ -247,6 +249,24 @@ proc p3(): Future[void] {.async, asyncraises: [IOError].} = ) ``` +When `chronos` performs the `async` transformation, all code is placed in a +a special `try/except` clause that re-routes exception handling to the `Future`. + +Beacuse of this re-routing, functions that return a `Future` instance manually +never directly raise exceptions themselves - instead, exceptions are handled +indirectly via `await` or `Future.read`. When writing raw async functions, they +too must not raise exceptions - instead, they must store exceptions in the +future they return: + +```nim +proc p4(): Future[void] {.asyncraises: [ValueError].} = + let fut = newFuture[void] + + # Equivalent of `raise (ref ValueError)()` in raw async functions: + fut.fail((ref ValueError)(msg: "raising in raw async function")) + fut +``` + ### Platform independence Several functions in `chronos` are backed by the operating system, such as @@ -268,7 +288,7 @@ Because of this, the effect system thinks no exceptions are "leaking" because in fact, exception _handling_ is deferred to when the future is being read. Effectively, this means that while code can be compiled with -`{.push raises: [Defect]}`, the intended effect propagation and checking is +`{.push raises: []}`, the intended effect propagation and checking is **disabled** for `async` functions. To enable checking exception effects in `async` code, enable strict mode with diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 860b8b65d..abf28c716 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -8,12 +8,16 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) +{.push raises: [].} + import std/[sequtils, macros] import stew/base10 -import ./asyncengine +import ./[asyncengine, raisesfutures] import ../[config, futures] +export raisesfutures.InternalRaisesFuture + when chronosStackTrace: import std/strutils when defined(nimHasStacktracesModule): @@ -38,12 +42,6 @@ func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {. else: raiseAssert("Unknown source location " & $v) type - InternalRaisesFuture*[T, E] = ref object of Future[T] - ## Future with a tuple of possible exception types - ## eg InternalRaisesFuture[void, (ValueError, OSError)] - ## Will be injected by `asyncraises`, should generally - ## not be used manually - FutureStr*[T] = ref object of Future[T] ## Future to hold GC strings gcholder*: string @@ -52,6 +50,8 @@ type ## Future to hold GC seqs gcholder*: seq[B] + SomeFuture = Future|InternalRaisesFuture + # Backwards compatibility for old FutureState name template Finished* {.deprecated: "Use Completed instead".} = Completed template Finished*(T: type FutureState): FutureState {. @@ -68,11 +68,18 @@ proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] = internalInitFutureBase(fut, loc, FutureState.Pending, flags) fut -proc newInternalRaisesFutureImpl[T, E](loc: ptr SrcLoc): InternalRaisesFuture[T, E] = +proc newInternalRaisesFutureImpl[T, E]( + loc: ptr SrcLoc): InternalRaisesFuture[T, E] = let fut = InternalRaisesFuture[T, E]() internalInitFutureBase(fut, loc, FutureState.Pending, {}) fut +proc newInternalRaisesFutureImpl[T, E]( + loc: ptr SrcLoc, flags: FutureFlags): InternalRaisesFuture[T, E] = + let fut = InternalRaisesFuture[T, E]() + internalInitFutureBase(fut, loc, FutureState.Pending, flags) + fut + proc newFutureSeqImpl[A, B](loc: ptr SrcLoc): FutureSeq[A, B] = let fut = FutureSeq[A, B]() internalInitFutureBase(fut, loc, FutureState.Pending, {}) @@ -90,7 +97,8 @@ template newFuture*[T](fromProc: static[string] = "", ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. when declared(InternalRaisesFutureRaises): # injected by `asyncraises` - newInternalRaisesFutureImpl[T, InternalRaisesFutureRaises](getSrcLocation(fromProc)) + newInternalRaisesFutureImpl[T, InternalRaisesFutureRaises]( + getSrcLocation(fromProc), flags) else: newFutureImpl[T](getSrcLocation(fromProc), flags) @@ -214,53 +222,11 @@ proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = getStackTrace(error) future.finish(FutureState.Failed) -template fail*(future: FutureBase, error: ref CatchableError) = +template fail*( + future: FutureBase, error: ref CatchableError, warn: static bool = false) = ## Completes ``future`` with ``error``. fail(future, error, getSrcLocation()) -macro checkFailureType(future, error: typed): untyped = - let e = getTypeInst(future)[2] - let types = getType(e) - - if types.eqIdent("void"): - error("Can't raise exceptions on this Future") - - expectKind(types, nnkBracketExpr) - expectKind(types[0], nnkSym) - assert types[0].strVal == "tuple" - assert types.len > 1 - - expectKind(getTypeInst(error), nnkRefTy) - let toMatch = getTypeInst(error)[0] - - # Can't find a way to check `is` in the macro. (sameType doesn't - # work for inherited objects). Dirty hack here, for [IOError, OSError], - # this will generate: - # - # static: - # if not((`toMatch` is IOError) or (`toMatch` is OSError) - # or (`toMatch` is CancelledError) or false): - # raiseAssert("Can't fail with `toMatch`, only [IOError, OSError] is allowed") - var typeChecker = ident"false" - - for errorType in types[1..^1]: - typeChecker = newCall("or", typeChecker, newCall("is", toMatch, errorType)) - typeChecker = newCall( - "or", typeChecker, - newCall("is", toMatch, ident"CancelledError")) - - let errorMsg = "Can't fail with " & repr(toMatch) & ". Only " & repr(types[1..^1]) & " allowed" - - result = nnkStaticStmt.newNimNode(lineInfoFrom=error).add( - quote do: - if not(`typeChecker`): - raiseAssert(`errorMsg`) - ) - -template fail*[T, E](future: InternalRaisesFuture[T, E], error: ref CatchableError) = - checkFailureType(future, error) - fail(future, error, getSrcLocation()) - template newCancelledError(): ref CancelledError = (ref CancelledError)(msg: "Future operation cancelled!") @@ -572,29 +538,6 @@ proc read*(future: Future[void] ) {.raises: [CatchableError].} = # TODO: Make a custom exception type for this? raise newException(ValueError, "Future still in progress.") -proc read*[T: not void, E](future: InternalRaisesFuture[T, E] ): lent T = - ## Retrieves the value of ``future``. Future must be finished otherwise - ## this function will fail with a ``ValueError`` exception. - ## - ## If the result of the future is an error then that error will be raised. - if not future.finished(): - # TODO: Make a custom exception type for this? - raise newException(ValueError, "Future still in progress.") - - internalCheckComplete(future) - future.internalValue - -proc read*[E](future: InternalRaisesFuture[void, E]) = - ## Retrieves the value of ``future``. Future must be finished otherwise - ## this function will fail with a ``ValueError`` exception. - ## - ## If the result of the future is an error then that error will be raised. - if future.finished(): - internalCheckComplete(future) - else: - # TODO: Make a custom exception type for this? - raise newException(ValueError, "Future still in progress.") - proc readError*(future: FutureBase): ref CatchableError {.raises: [ValueError].} = ## Retrieves the exception stored in ``future``. ## @@ -621,8 +564,9 @@ template taskCancelMessage(future: FutureBase): string = "Asynchronous task " & taskFutureLocation(future) & " was cancelled!" proc waitFor*[T](fut: Future[T]): T {.raises: [CatchableError].} = - ## **Blocks** the current thread until the specified future completes. - ## There's no way to tell if poll or read raised the exception + ## **Blocks** the current thread until the specified future finishes and + ## reads it, potentially raising an exception if the future failed or was + ## cancelled. while not(fut.finished()): poll() @@ -716,20 +660,7 @@ proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {. retFuture.cancelCallback = cancellation return retFuture -proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = - ## Returns a future which will complete once either ``fut1`` or ``fut2`` - ## finish. - ## - ## If ``fut1`` or ``fut2`` future is failed, the result future will also be - ## failed with an error stored in ``fut1`` or ``fut2`` respectively. - ## - ## If both ``fut1`` and ``fut2`` future are completed or failed, the result - ## future will depend on the state of ``fut1`` future. So if ``fut1`` future - ## is failed, the result future will also be failed, if ``fut1`` future is - ## completed, the result future will also be completed. - ## - ## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled. - var retFuture = newFuture[void]("chronos.or") +template orImpl*[T, Y](fut1: Future[T], fut2: Future[Y]): untyped = var cb: proc(udata: pointer) {.gcsafe, raises: [].} cb = proc(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): @@ -739,7 +670,7 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = else: fut1.removeCallback(cb) if fut.failed(): - retFuture.fail(fut.error) + retFuture.fail(fut.error, warn = false) else: retFuture.complete() @@ -752,14 +683,14 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = if fut1.finished(): if fut1.failed(): - retFuture.fail(fut1.error) + retFuture.fail(fut1.error, warn = false) else: retFuture.complete() return retFuture if fut2.finished(): if fut2.failed(): - retFuture.fail(fut2.error) + retFuture.fail(fut2.error, warn = false) else: retFuture.complete() return retFuture @@ -770,6 +701,23 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = retFuture.cancelCallback = cancellation return retFuture +proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = + ## Returns a future which will complete once either ``fut1`` or ``fut2`` + ## finish. + ## + ## If ``fut1`` or ``fut2`` future is failed, the result future will also be + ## failed with an error stored in ``fut1`` or ``fut2`` respectively. + ## + ## If both ``fut1`` and ``fut2`` future are completed or failed, the result + ## future will depend on the state of ``fut1`` future. So if ``fut1`` future + ## is failed, the result future will also be failed, if ``fut1`` future is + ## completed, the result future will also be completed. + ## + ## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled. + var retFuture = newFuture[void]("chronos.or") + orImpl(fut1, fut2) + + proc all*[T](futs: varargs[Future[T]]): auto {. deprecated: "Use allFutures(varargs[Future[T]])".} = ## Returns a future which will complete once all futures in ``futs`` finish. @@ -908,7 +856,7 @@ proc oneValue*[T](futs: varargs[Future[T]]): Future[T] {. return retFuture proc cancelSoon(future: FutureBase, aftercb: CallbackFunc, udata: pointer, - loc: ptr SrcLoc) = + loc: ptr SrcLoc) {.raises: [].} = ## Perform cancellation ``future`` and call ``aftercb`` callback when ## ``future`` become finished (completed with value, failed or cancelled). ## @@ -965,7 +913,8 @@ template cancel*(future: FutureBase) {. ## Cancel ``future``. cancelSoon(future, nil, nil, getSrcLocation()) -proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] = +proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {. + asyncraises: [CancelledError].} = ## Perform cancellation ``future`` return Future which will be completed when ## ``future`` become finished (completed with value, failed or cancelled). ## @@ -989,7 +938,7 @@ template cancelAndWait*(future: FutureBase): Future[void] = ## Cancel ``future``. cancelAndWait(future, getSrcLocation()) -proc noCancel*[T](future: Future[T]): Future[T] = +proc noCancel*[F: SomeFuture](future: F): auto = # asyncraises: asyncraiseOf(future) - CancelledError ## Prevent cancellation requests from propagating to ``future`` while ## forwarding its value or error when it finishes. ## @@ -997,16 +946,25 @@ proc noCancel*[T](future: Future[T]): Future[T] = ## should not be cancelled at all cost, for example closing sockets, pipes, ## connections or servers. Usually it become useful in exception or finally ## blocks. - let retFuture = newFuture[T]("chronos.noCancel(T)", - {FutureFlag.OwnCancelSchedule}) + when F is InternalRaisesFuture: + type + E = F.E + InternalRaisesFutureRaises = E.remove(CancelledError) + + let retFuture = newFuture[F.T]("chronos.noCancel(T)", + {FutureFlag.OwnCancelSchedule}) template completeFuture() = if future.completed(): - when T is void: + when F.T is void: retFuture.complete() else: retFuture.complete(future.value) elif future.failed(): - retFuture.fail(future.error) + when F is Future: + retFuture.fail(future.error, warn = false) + when declared(InternalRaisesFutureRaises): + when InternalRaisesFutureRaises isnot void: + retFuture.fail(future.error, warn = false) else: raiseAssert("Unexpected future state [" & $future.state & "]") @@ -1019,7 +977,8 @@ proc noCancel*[T](future: Future[T]): Future[T] = future.addCallback(continuation) retFuture -proc allFutures*(futs: varargs[FutureBase]): Future[void] = +proc allFutures*(futs: varargs[FutureBase]): Future[void] {. + asyncraises: [CancelledError].} = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. ## @@ -1057,7 +1016,8 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] = retFuture -proc allFutures*[T](futs: varargs[Future[T]]): Future[void] = +proc allFutures*[T](futs: varargs[Future[T]]): Future[void] {. + asyncraises: [CancelledError].} = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. ## @@ -1070,7 +1030,8 @@ proc allFutures*[T](futs: varargs[Future[T]]): Future[void] = nfuts.add(future) allFutures(nfuts) -proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] = +proc allFinished*[F: SomeFuture](futs: varargs[F]): Future[seq[F]] {. + asyncraises: [CancelledError].} = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. ## @@ -1080,7 +1041,7 @@ proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] = ## If the argument is empty, the returned future COMPLETES immediately. ## ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. - var retFuture = newFuture[seq[Future[T]]]("chronos.allFinished()") + var retFuture = newFuture[seq[F]]("chronos.allFinished()") let totalFutures = len(futs) var finishedFutures = 0 @@ -1110,7 +1071,8 @@ proc allFinished*[T](futs: varargs[Future[T]]): Future[seq[Future[T]]] = return retFuture -proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] = +proc one*[F: SomeFuture](futs: varargs[F]): Future[F] {. + asyncraises: [ValueError, CancelledError].} = ## Returns a future which will complete and return completed Future[T] inside, ## when one of the futures in ``futs`` will be completed, failed or canceled. ## @@ -1119,7 +1081,7 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] = ## On success returned Future will hold finished Future[T]. ## ## On cancel futures in ``futs`` WILL NOT BE cancelled. - var retFuture = newFuture[Future[T]]("chronos.one()") + var retFuture = newFuture[F]("chronos.one()") if len(futs) == 0: retFuture.fail(newException(ValueError, "Empty Future[T] list")) @@ -1137,7 +1099,7 @@ proc one*[T](futs: varargs[Future[T]]): Future[Future[T]] = var cb: proc(udata: pointer) {.gcsafe, raises: [].} cb = proc(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): - var res: Future[T] + var res: F var rfut = cast[FutureBase](udata) for i in 0..= 1 + + types + +macro checkRaises*[T: CatchableError]( + future: InternalRaisesFuture, error: ref T, warn: static bool = true): untyped = + ## Generate code that checks that the given error is compatible with the + ## raises restrictions of `future`. + ## + ## This check is done either at compile time or runtime depending on the + ## information available at compile time - in particular, if the raises + ## inherit from `error`, we end up with the equivalent of a downcast which + ## raises a Defect if it fails. + let raises = getRaises(future) + + expectKind(getTypeInst(error), nnkRefTy) + let toMatch = getTypeInst(error)[0] + + var + typeChecker = ident"false" + maybeChecker = ident"false" + runtimeChecker = ident"false" + + for errorType in raises[1..^1]: + typeChecker = infix(typeChecker, "or", infix(toMatch, "is", errorType)) + maybeChecker = infix(maybeChecker, "or", infix(errorType, "is", toMatch)) + runtimeChecker = infix( + runtimeChecker, "or", + infix(error, "of", nnkBracketExpr.newTree(ident"typedesc", errorType))) + + let + errorMsg = "`fail`: `" & repr(toMatch) & "` incompatible with `asyncraises: " & repr(raises[1..^1]) & "`" + warningMsg = "Can't verify `fail` exception type at compile time - expected one of " & repr(raises[1..^1]) & ", got `" & repr(toMatch) & "`" + # A warning from this line means exception type will be verified at runtime + warning = if warn: + quote do: {.warning: `warningMsg`.} + else: newEmptyNode() + + # Cannot check inhertance in macro so we let `static` do the heavy lifting + quote do: + when not(`typeChecker`): + when not(`maybeChecker`): + static: + {.error: `errorMsg`.} + else: + `warning` + assert(`runtimeChecker`, `errorMsg`) diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 2d95a7fad..2fc24be98 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -477,3 +477,63 @@ suite "Exceptions tracking": proc test44 {.asyncraises: [ValueError], async.} = raise newException(ValueError, "hey") checkNotCompiles: proc test33 {.asyncraises: [IOError], async.} = raise newException(ValueError, "hey") + + test "or errors": + proc testit {.asyncraises: [ValueError], async.} = + raise (ref ValueError)() + + proc testit2 {.asyncraises: [IOError], async.} = + raise (ref IOError)() + + proc test {.async, asyncraises: [ValueError, IOError].} = + await testit() or testit2() + + proc noraises() {.raises: [].} = + expect(ValueError): + try: + let f = test() + waitFor(f) + except IOError: + doAssert false + + noraises() + + test "Wait errors": + proc testit {.asyncraises: [ValueError], async.} = raise newException(ValueError, "hey") + + proc test {.async, asyncraises: [ValueError, AsyncTimeoutError, CancelledError].} = + await wait(testit(), 1000.milliseconds) + + proc noraises() {.raises: [].} = + try: + expect(ValueError): waitFor(test()) + except CancelledError: doAssert false + except AsyncTimeoutError: doAssert false + + noraises() + + test "Nocancel errors": + proc testit {.asyncraises: [ValueError, CancelledError], async.} = + await sleepAsync(5.milliseconds) + raise (ref ValueError)() + + proc test {.async, asyncraises: [ValueError].} = + await noCancel testit() + + proc noraises() {.raises: [].} = + expect(ValueError): + let f = test() + waitFor(f.cancelAndWait()) + waitFor(f) + + noraises() + + test "Defect on wrong exception type at runtime": + {.push warning[User]: off} + let f = InternalRaisesFuture[void, (ValueError,)]() + expect(Defect): f.fail((ref CatchableError)()) + {.pop.} + check: not f.finished() + + expect(Defect): f.fail((ref CatchableError)(), warn = false) + check: not f.finished() From 12dc36cfeee3ac487aef1278c9b324cc082dcfeb Mon Sep 17 00:00:00 2001 From: Tanguy Date: Wed, 25 Oct 2023 15:16:10 +0200 Subject: [PATCH 072/146] Update README regarding cancellation (#450) * Update README regarding cancellation * Apply suggestions from code review Co-authored-by: Eugene Kabanov --------- Co-authored-by: Jacek Sieka Co-authored-by: Eugene Kabanov --- README.md | 89 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 0a23ea16c..495f9f8b3 100644 --- a/README.md +++ b/README.md @@ -301,45 +301,64 @@ effects on forward declarations, callbacks and methods using ### Cancellation support -Any running `Future` can be cancelled. This can be used to launch multiple -futures, and wait for one of them to finish, and cancel the rest of them, -to add timeout, or to let the user cancel a running task. +Any running `Future` can be cancelled. This can be used for timeouts, +to let a user cancel a running task, to start multiple futures in parallel +and cancel them as soon as one finishes, etc. ```nim -# Simple cancellation -let future = sleepAsync(10.minutes) -future.cancel() - -# Wait for cancellation -let future2 = sleepAsync(10.minutes) -await future2.cancelAndWait() - -# Race between futures -proc retrievePage(uri: string): Future[string] {.async.} = - # requires to import uri, chronos/apps/http/httpclient, stew/byteutils - let httpSession = HttpSessionRef.new() - try: - resp = await httpSession.fetch(parseUri(uri)) - result = string.fromBytes(resp.data) - finally: - # be sure to always close the session - await httpSession.closeWait() - -let - futs = - @[ - retrievePage("https://duckduckgo.com/?q=chronos"), - retrievePage("https://www.google.fr/search?q=chronos") - ] - -let finishedFut = await one(futs) -for fut in futs: - if not fut.finished: - fut.cancel() -echo "Result: ", await finishedFut +import chronos/apps/http/httpclient + +proc cancellationExample() {.async.} = + # Simple cancellation + let future = sleepAsync(10.minutes) + future.cancelSoon() + # `cancelSoon` will not wait for the cancellation + # to be finished, so the Future could still be + # pending at this point. + + # Wait for cancellation + let future2 = sleepAsync(10.minutes) + await future2.cancelAndWait() + # Using `cancelAndWait`, we know that future2 isn't + # pending anymore. However, it could have completed + # before cancellation happened (in which case, it + # will hold a value) + + # Race between futures + proc retrievePage(uri: string): Future[string] {.async.} = + let httpSession = HttpSessionRef.new() + try: + let resp = await httpSession.fetch(parseUri(uri)) + return bytesToString(resp.data) + finally: + # be sure to always close the session + # `finally` will run also during cancellation - + # `noCancel` ensures that `closeWait` doesn't get cancelled + await noCancel(httpSession.closeWait()) + + let + futs = + @[ + retrievePage("https://duckduckgo.com/?q=chronos"), + retrievePage("https://www.google.fr/search?q=chronos") + ] + + let finishedFut = await one(futs) + for fut in futs: + if not fut.finished: + fut.cancelSoon() + echo "Result: ", await finishedFut + +waitFor(cancellationExample()) ``` -When an `await` is cancelled, it will raise a `CancelledError`: +Even if cancellation is initiated, it is not guaranteed that +the operation gets cancelled - the future might still be completed +or fail depending on the ordering of events and the specifics of +the operation. + +If the future indeed gets cancelled, `await` will raise a +`CancelledError` as is likely to happen in the following example: ```nim proc c1 {.async.} = echo "Before sleep" From 8375770fe578fb880570838cc558753bd6af6809 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 30 Oct 2023 15:27:25 +0200 Subject: [PATCH 073/146] Fix unreachable code places. (#459) * Fix unreachable code. * Use implicit returns instead. --- chronos/osdefs.nim | 2 ++ chronos/streams/tlsstream.nim | 34 ++++++++++++---------------------- chronos/transports/osnet.nim | 4 ++-- 3 files changed, 16 insertions(+), 24 deletions(-) diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 75ceb6769..78de4b750 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -1617,6 +1617,8 @@ elif defined(linux): # RTA_PRIORITY* = 6'u16 RTA_PREFSRC* = 7'u16 # RTA_METRICS* = 8'u16 + RTM_NEWLINK* = 16'u16 + RTM_NEWROUTE* = 24'u16 RTM_F_LOOKUP_TABLE* = 0x1000 diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 6432a10d4..0c8efb945 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -157,17 +157,15 @@ proc tlsWriteRec(engine: ptr SslEngineContext, doAssert(length != 0 and not isNil(buf)) await writer.wsource.write(buf, int(length)) sslEngineSendrecAck(engine[], length) - return TLSResult.Success + TLSResult.Success except AsyncStreamError as exc: writer.state = AsyncStreamState.Error writer.error = exc - return TLSResult.Error + TLSResult.Error except CancelledError: if writer.state == AsyncStreamState.Running: writer.state = AsyncStreamState.Stopped - return TLSResult.Stopped - - return TLSResult.Error + TLSResult.Stopped proc tlsWriteApp(engine: ptr SslEngineContext, writer: TLSStreamWriter): Future[TLSResult] {.async.} = @@ -182,7 +180,6 @@ proc tlsWriteApp(engine: ptr SslEngineContext, # (and discarded). writer.state = AsyncStreamState.Finished return TLSResult.WriteEof - let toWrite = min(int(length), item.size) copyOut(buf, item, toWrite) if int(length) >= item.size: @@ -190,7 +187,6 @@ proc tlsWriteApp(engine: ptr SslEngineContext, sslEngineSendappAck(engine[], uint(item.size)) sslEngineFlush(engine[], 0) item.future.complete() - return TLSResult.Success else: # BearSSL is not ready to accept whole item, so we will send # only part of item and adjust offset. @@ -198,17 +194,15 @@ proc tlsWriteApp(engine: ptr SslEngineContext, item.size = item.size - int(length) writer.queue.addFirstNoWait(item) sslEngineSendappAck(engine[], length) - return TLSResult.Success + TLSResult.Success else: sslEngineClose(engine[]) item.future.complete() - return TLSResult.Success + TLSResult.Success except CancelledError: if writer.state == AsyncStreamState.Running: writer.state = AsyncStreamState.Stopped - return TLSResult.Stopped - - return TLSResult.Error + TLSResult.Stopped proc tlsReadRec(engine: ptr SslEngineContext, reader: TLSStreamReader): Future[TLSResult] {.async.} = @@ -219,19 +213,17 @@ proc tlsReadRec(engine: ptr SslEngineContext, sslEngineRecvrecAck(engine[], uint(res)) if res == 0: sslEngineClose(engine[]) - return TLSResult.ReadEof + TLSResult.ReadEof else: - return TLSResult.Success + TLSResult.Success except AsyncStreamError as exc: reader.state = AsyncStreamState.Error reader.error = exc - return TLSResult.Error + TLSResult.Error except CancelledError: if reader.state == AsyncStreamState.Running: reader.state = AsyncStreamState.Stopped - return TLSResult.Stopped - - return TLSResult.Error + TLSResult.Stopped proc tlsReadApp(engine: ptr SslEngineContext, reader: TLSStreamReader): Future[TLSResult] {.async.} = @@ -240,13 +232,11 @@ proc tlsReadApp(engine: ptr SslEngineContext, var buf = sslEngineRecvappBuf(engine[], length) await upload(addr reader.buffer, buf, int(length)) sslEngineRecvappAck(engine[], length) - return TLSResult.Success + TLSResult.Success except CancelledError: if reader.state == AsyncStreamState.Running: reader.state = AsyncStreamState.Stopped - return TLSResult.Stopped - - return TLSResult.Error + TLSResult.Stopped template readAndReset(fut: untyped) = if fut.finished(): diff --git a/chronos/transports/osnet.nim b/chronos/transports/osnet.nim index 21adb656e..99dabd7e2 100644 --- a/chronos/transports/osnet.nim +++ b/chronos/transports/osnet.nim @@ -677,10 +677,10 @@ when defined(linux): var msg = cast[ptr NlMsgHeader](addr data[0]) var endflag = false while NLMSG_OK(msg, length): - if msg.nlmsg_type == NLMSG_ERROR: + if msg.nlmsg_type in [uint16(NLMSG_DONE), uint16(NLMSG_ERROR)]: endflag = true break - else: + elif msg.nlmsg_type == RTM_NEWROUTE: res = processRoute(msg) endflag = true break From a70b145964dddd64d1d0af567da30d0572f2a10e Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 30 Oct 2023 15:27:50 +0200 Subject: [PATCH 074/146] IPv4/IPv6 dualstack (#456) * Initial commit. * Fix tests. * Fix linux compilation issue. * Add getDomain() implementation. Add getDomain() tests. Add datagram tests. * Fix style errors. * Deprecate NetFlag. Deprecate new flags in ServerFlags. Add isAvailable(). Fix setDualstack() to ignore errors on `Auto`. Updatetests. * Deprecate some old procedures. Improve datagram transport a bit. * Address review comments, and fix tests. * Fix setDescriptorBlocking() issue. Recover connect() dualstack behavior. Add test for connect() IPv6-[IPv4 mapped] addresses. * Fix alignment code issue. Fix TcpNoDelay was not available on Windows. * Add dualstack support to HTTP/HTTPS client/server. --- chronos/apps/http/httpclient.nim | 10 +- chronos/apps/http/httpserver.nim | 5 +- chronos/apps/http/shttpserver.nim | 5 +- chronos/handles.nim | 176 +++++++++++++------- chronos/internal/asyncengine.nim | 13 ++ chronos/osdefs.nim | 92 +++++++---- chronos/oserrno.nim | 1 + chronos/osutils.nim | 4 + chronos/transports/common.nim | 79 ++++++++- chronos/transports/datagram.nim | 193 ++++++++++++---------- chronos/transports/stream.nim | 258 ++++++++++++++++-------------- tests/testdatagram.nim | 126 +++++++++++++++ tests/teststream.nim | 127 +++++++++++++++ 13 files changed, 791 insertions(+), 298 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 01e2bab12..34089c70d 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -126,6 +126,7 @@ type connectionsCount*: int socketFlags*: set[SocketFlags] flags*: HttpClientFlags + dualstack*: DualStackType HttpAddress* = object id*: string @@ -263,7 +264,8 @@ proc new*(t: typedesc[HttpSessionRef], maxConnections = -1, idleTimeout = HttpConnectionIdleTimeout, idlePeriod = HttpConnectionCheckPeriod, - socketFlags: set[SocketFlags] = {}): HttpSessionRef {. + socketFlags: set[SocketFlags] = {}, + dualstack = DualStackType.Auto): HttpSessionRef {. raises: [] .} = ## Create new HTTP session object. ## @@ -283,7 +285,8 @@ proc new*(t: typedesc[HttpSessionRef], idleTimeout: idleTimeout, idlePeriod: idlePeriod, connections: initTable[string, seq[HttpClientConnectionRef]](), - socketFlags: socketFlags + socketFlags: socketFlags, + dualstack: dualstack ) res.watcherFut = if HttpClientFlag.Http11Pipeline in flags: @@ -620,7 +623,8 @@ proc connect(session: HttpSessionRef, let transp = try: await connect(address, bufferSize = session.connectionBufferSize, - flags = session.socketFlags) + flags = session.socketFlags, + dualstack = session.dualstack) except CancelledError as exc: raise exc except CatchableError: diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index f0788e2ea..2ab53178d 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -191,7 +191,8 @@ proc new*(htype: typedesc[HttpServerRef], backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, - maxRequestBodySize: int = 1_048_576): HttpResult[HttpServerRef] {. + maxRequestBodySize: int = 1_048_576, + dualstack = DualStackType.Auto): HttpResult[HttpServerRef] {. raises: [].} = let serverUri = @@ -206,7 +207,7 @@ proc new*(htype: typedesc[HttpServerRef], let serverInstance = try: createStreamServer(address, flags = socketFlags, bufferSize = bufferSize, - backlog = backlogSize) + backlog = backlogSize, dualstack = dualstack) except TransportOsError as exc: return err(exc.msg) except CatchableError as exc: diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 6d321a02d..030059711 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -92,7 +92,8 @@ proc new*(htype: typedesc[SecureHttpServerRef], backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, - maxRequestBodySize: int = 1_048_576 + maxRequestBodySize: int = 1_048_576, + dualstack = DualStackType.Auto ): HttpResult[SecureHttpServerRef] {.raises: [].} = doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!") @@ -110,7 +111,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], let serverInstance = try: createStreamServer(address, flags = socketFlags, bufferSize = bufferSize, - backlog = backlogSize) + backlog = backlogSize, dualstack = dualstack) except TransportOsError as exc: return err(exc.msg) except CatchableError as exc: diff --git a/chronos/handles.nim b/chronos/handles.nim index 2348b33cc..afa57fb27 100644 --- a/chronos/handles.nim +++ b/chronos/handles.nim @@ -21,66 +21,113 @@ const asyncInvalidSocket* = AsyncFD(osdefs.INVALID_SOCKET) asyncInvalidPipe* = asyncInvalidSocket -proc setSocketBlocking*(s: SocketHandle, blocking: bool): bool = +proc setSocketBlocking*(s: SocketHandle, blocking: bool): bool {. + deprecated: "Please use setDescriptorBlocking() instead".} = ## Sets blocking mode on socket. - when defined(windows) or defined(nimdoc): - var mode = clong(ord(not blocking)) - if osdefs.ioctlsocket(s, osdefs.FIONBIO, addr(mode)) == -1: - false - else: - true - else: - let x: int = osdefs.fcntl(s, osdefs.F_GETFL, 0) - if x == -1: - false - else: - let mode = - if blocking: x and not osdefs.O_NONBLOCK else: x or osdefs.O_NONBLOCK - if osdefs.fcntl(s, osdefs.F_SETFL, mode) == -1: - false - else: - true + setDescriptorBlocking(s, blocking).isOkOr: + return false + true + +proc setSockOpt2*(socket: AsyncFD, + level, optname, optval: int): Result[void, OSErrorCode] = + var value = cint(optval) + let res = osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname), + addr(value), SockLen(sizeof(value))) + if res == -1: + return err(osLastError()) + ok() -proc setSockOpt*(socket: AsyncFD, level, optname, optval: int): bool = +proc setSockOpt2*(socket: AsyncFD, level, optname: int, value: pointer, + valuelen: int): Result[void, OSErrorCode] = + ## `setsockopt()` for custom options (pointer and length). + ## Returns ``true`` on success, ``false`` on error. + let res = osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname), + value, SockLen(valuelen)) + if res == -1: + return err(osLastError()) + ok() + +proc setSockOpt*(socket: AsyncFD, level, optname, optval: int): bool {. + deprecated: "Please use setSockOpt2() instead".} = ## `setsockopt()` for integer options. ## Returns ``true`` on success, ``false`` on error. - var value = cint(optval) - osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname), - addr(value), SockLen(sizeof(value))) >= cint(0) + setSockOpt2(socket, level, optname, optval).isOk proc setSockOpt*(socket: AsyncFD, level, optname: int, value: pointer, - valuelen: int): bool = + valuelen: int): bool {. + deprecated: "Please use setSockOpt2() instead".} = ## `setsockopt()` for custom options (pointer and length). ## Returns ``true`` on success, ``false`` on error. - osdefs.setsockopt(SocketHandle(socket), cint(level), cint(optname), value, - SockLen(valuelen)) >= cint(0) + setSockOpt2(socket, level, optname, value, valuelen).isOk + +proc getSockOpt2*(socket: AsyncFD, + level, optname: int): Result[cint, OSErrorCode] = + var + value: cint + size = SockLen(sizeof(value)) + let res = osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname), + addr(value), addr(size)) + if res == -1: + return err(osLastError()) + ok(value) + +proc getSockOpt2*(socket: AsyncFD, level, optname: int, + T: type): Result[T, OSErrorCode] = + var + value = default(T) + size = SockLen(sizeof(value)) + let res = osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname), + cast[ptr byte](addr(value)), addr(size)) + if res == -1: + return err(osLastError()) + ok(value) -proc getSockOpt*(socket: AsyncFD, level, optname: int, value: var int): bool = +proc getSockOpt*(socket: AsyncFD, level, optname: int, value: var int): bool {. + deprecated: "Please use getSockOpt2() instead".} = ## `getsockopt()` for integer options. ## Returns ``true`` on success, ``false`` on error. - var res: cint - var size = SockLen(sizeof(res)) - if osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname), - addr(res), addr(size)) >= cint(0): - value = int(res) - true - else: - false + value = getSockOpt2(socket, level, optname).valueOr: + return false + true -proc getSockOpt*(socket: AsyncFD, level, optname: int, value: pointer, - valuelen: var int): bool = +proc getSockOpt*(socket: AsyncFD, level, optname: int, value: var pointer, + valuelen: var int): bool {. + deprecated: "Please use getSockOpt2() instead".} = ## `getsockopt()` for custom options (pointer and length). ## Returns ``true`` on success, ``false`` on error. osdefs.getsockopt(SocketHandle(socket), cint(level), cint(optname), value, cast[ptr SockLen](addr valuelen)) >= cint(0) -proc getSocketError*(socket: AsyncFD, err: var int): bool = +proc getSocketError*(socket: AsyncFD, err: var int): bool {. + deprecated: "Please use getSocketError() instead".} = ## Recover error code associated with socket handle ``socket``. - getSockOpt(socket, cint(osdefs.SOL_SOCKET), cint(osdefs.SO_ERROR), err) + err = getSockOpt2(socket, cint(osdefs.SOL_SOCKET), + cint(osdefs.SO_ERROR)).valueOr: + return false + true + +proc getSocketError2*(socket: AsyncFD): Result[cint, OSErrorCode] = + getSockOpt2(socket, cint(osdefs.SOL_SOCKET), cint(osdefs.SO_ERROR)) + +proc isAvailable*(domain: Domain): bool = + when defined(windows): + let fd = wsaSocket(toInt(domain), toInt(SockType.SOCK_STREAM), + toInt(Protocol.IPPROTO_TCP), nil, GROUP(0), 0'u32) + if fd == osdefs.INVALID_SOCKET: + return if osLastError() == osdefs.WSAEAFNOSUPPORT: false else: true + discard closeFd(fd) + true + else: + let fd = osdefs.socket(toInt(domain), toInt(SockType.SOCK_STREAM), + toInt(Protocol.IPPROTO_TCP)) + if fd == -1: + return if osLastError() == osdefs.EAFNOSUPPORT: false else: true + discard closeFd(fd) + true proc createAsyncSocket2*(domain: Domain, sockType: SockType, - protocol: Protocol, - inherit = true): Result[AsyncFD, OSErrorCode] = + protocol: Protocol, + inherit = true): Result[AsyncFD, OSErrorCode] = ## Creates new asynchronous socket. when defined(windows): let flags = @@ -93,15 +140,12 @@ proc createAsyncSocket2*(domain: Domain, sockType: SockType, if fd == osdefs.INVALID_SOCKET: return err(osLastError()) - let bres = setDescriptorBlocking(fd, false) - if bres.isErr(): + setDescriptorBlocking(fd, false).isOkOr: discard closeFd(fd) - return err(bres.error()) - - let res = register2(AsyncFD(fd)) - if res.isErr(): + return err(error) + register2(AsyncFD(fd)).isOkOr: discard closeFd(fd) - return err(res.error()) + return err(error) ok(AsyncFD(fd)) else: @@ -114,23 +158,20 @@ proc createAsyncSocket2*(domain: Domain, sockType: SockType, let fd = osdefs.socket(toInt(domain), socketType, toInt(protocol)) if fd == -1: return err(osLastError()) - let res = register2(AsyncFD(fd)) - if res.isErr(): + register2(AsyncFD(fd)).isOkOr: discard closeFd(fd) - return err(res.error()) + return err(error) ok(AsyncFD(fd)) else: let fd = osdefs.socket(toInt(domain), toInt(sockType), toInt(protocol)) if fd == -1: return err(osLastError()) - let bres = setDescriptorFlags(cint(fd), true, true) - if bres.isErr(): + setDescriptorFlags(cint(fd), true, true).isOkOr: discard closeFd(fd) - return err(bres.error()) - let res = register2(AsyncFD(fd)) - if res.isErr(): + return err(error) + register2(AsyncFD(fd)).isOkOr: discard closeFd(fd) - return err(bres.error()) + return err(error) ok(AsyncFD(fd)) proc wrapAsyncSocket2*(sock: cint|SocketHandle): Result[AsyncFD, OSErrorCode] = @@ -230,3 +271,26 @@ proc createAsyncPipe*(): tuple[read: AsyncFD, write: AsyncFD] = else: let pipes = res.get() (read: AsyncFD(pipes.read), write: AsyncFD(pipes.write)) + +proc getDualstack*(fd: AsyncFD): Result[bool, OSErrorCode] = + ## Returns `true` if `IPV6_V6ONLY` socket option set to `false`. + var + flag = cint(0) + size = SockLen(sizeof(flag)) + let res = osdefs.getsockopt(SocketHandle(fd), cint(osdefs.IPPROTO_IPV6), + cint(osdefs.IPV6_V6ONLY), addr(flag), addr(size)) + if res == -1: + return err(osLastError()) + ok(flag == cint(0)) + +proc setDualstack*(fd: AsyncFD, value: bool): Result[void, OSErrorCode] = + ## Sets `IPV6_V6ONLY` socket option value to `false` if `value == true` and + ## to `true` if `value == false`. + var + flag = cint(if value: 0 else: 1) + size = SockLen(sizeof(flag)) + let res = osdefs.setsockopt(SocketHandle(fd), cint(osdefs.IPPROTO_IPV6), + cint(osdefs.IPV6_V6ONLY), addr(flag), size) + if res == -1: + return err(osLastError()) + ok() diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim index 5a46f0489..ebcc27850 100644 --- a/chronos/internal/asyncengine.nim +++ b/chronos/internal/asyncengine.nim @@ -670,6 +670,19 @@ when defined(windows): if not(isNil(aftercb)): loop.callbacks.addLast(AsyncCallback(function: aftercb, udata: param)) + proc unregisterAndCloseFd*(fd: AsyncFD): Result[void, OSErrorCode] = + ## Unregister from system queue and close asynchronous socket. + ## + ## NOTE: Use this function to close temporary sockets/pipes only (which + ## are not exposed to the public and not supposed to be used/reused). + ## Please use closeSocket(AsyncFD) and closeHandle(AsyncFD) instead. + doAssert(fd != AsyncFD(osdefs.INVALID_SOCKET)) + unregister(fd) + if closeFd(SocketHandle(fd)) != 0: + err(osLastError()) + else: + ok() + proc contains*(disp: PDispatcher, fd: AsyncFD): bool = ## Returns ``true`` if ``fd`` is registered in thread's dispatcher. fd in disp.handles diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 78de4b750..ab0772112 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -122,6 +122,7 @@ when defined(windows): SO_UPDATE_ACCEPT_CONTEXT* = 0x700B SO_CONNECT_TIME* = 0x700C SO_UPDATE_CONNECT_CONTEXT* = 0x7010 + SO_PROTOCOL_INFOW* = 0x2005 FILE_FLAG_FIRST_PIPE_INSTANCE* = 0x00080000'u32 FILE_FLAG_OPEN_NO_RECALL* = 0x00100000'u32 @@ -258,6 +259,9 @@ when defined(windows): FIONBIO* = WSAIOW(102, 126) HANDLE_FLAG_INHERIT* = 1'u32 + IPV6_V6ONLY* = 27 + MAX_PROTOCOL_CHAIN* = 7 + WSAPROTOCOL_LEN* = 255 type LONG* = int32 @@ -441,6 +445,32 @@ when defined(windows): prefix*: SOCKADDR_INET prefixLength*: uint8 + WSAPROTOCOLCHAIN* {.final, pure.} = object + chainLen*: int32 + chainEntries*: array[MAX_PROTOCOL_CHAIN, DWORD] + + WSAPROTOCOL_INFO* {.final, pure.} = object + dwServiceFlags1*: uint32 + dwServiceFlags2*: uint32 + dwServiceFlags3*: uint32 + dwServiceFlags4*: uint32 + dwProviderFlags*: uint32 + providerId*: GUID + dwCatalogEntryId*: DWORD + protocolChain*: WSAPROTOCOLCHAIN + iVersion*: int32 + iAddressFamily*: int32 + iMaxSockAddr*: int32 + iMinSockAddr*: int32 + iSocketType*: int32 + iProtocol*: int32 + iProtocolMaxOffset*: int32 + iNetworkByteOrder*: int32 + iSecurityScheme*: int32 + dwMessageSize*: uint32 + dwProviderReserved*: uint32 + szProtocol*: array[WSAPROTOCOL_LEN + 1, WCHAR] + MibIpForwardRow2* {.final, pure.} = object interfaceLuid*: uint64 interfaceIndex*: uint32 @@ -890,7 +920,7 @@ elif defined(macos) or defined(macosx): O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, - SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, @@ -915,7 +945,7 @@ elif defined(macos) or defined(macosx): O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, - SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, SHUT_RD, SHUT_WR, SHUT_RDWR, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, @@ -977,7 +1007,8 @@ elif defined(linux): SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, MSG_PEEK, AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT, - SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, + SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, + IPV6_MULTICAST_HOPS, SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR, POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, @@ -1005,7 +1036,7 @@ elif defined(linux): SOL_SOCKET, SO_ERROR, RLIMIT_NOFILE, MSG_NOSIGNAL, MSG_PEEK, AF_INET, AF_INET6, AF_UNIX, SO_REUSEADDR, SO_REUSEPORT, - SO_BROADCAST, IPPROTO_IP, IPV6_MULTICAST_HOPS, + SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, SOCK_DGRAM, SOCK_STREAM, SHUT_RD, SHUT_WR, SHUT_RDWR, POLLIN, POLLOUT, POLLERR, POLLHUP, POLLNVAL, SIGHUP, SIGINT, SIGQUIT, SIGILL, SIGTRAP, SIGABRT, @@ -1127,7 +1158,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, - SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, SHUT_RD, SHUT_WR, SHUT_RDWR, @@ -1154,7 +1185,7 @@ elif defined(freebsd) or defined(openbsd) or defined(netbsd) or O_NONBLOCK, SOL_SOCKET, SOCK_RAW, SOCK_DGRAM, SOCK_STREAM, MSG_NOSIGNAL, MSG_PEEK, AF_INET, AF_INET6, AF_UNIX, SO_ERROR, SO_REUSEADDR, - SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, + SO_REUSEPORT, SO_BROADCAST, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, SOCK_DGRAM, RLIMIT_NOFILE, SIG_BLOCK, SIG_UNBLOCK, CLOCK_MONOTONIC, SHUT_RD, SHUT_WR, SHUT_RDWR, @@ -1182,47 +1213,52 @@ when defined(linux): SOCK_CLOEXEC* = 0x80000 TCP_NODELAY* = cint(1) IPPROTO_TCP* = 6 -elif defined(freebsd) or defined(netbsd) or defined(dragonfly): + O_CLOEXEC* = 0x80000 + POSIX_SPAWN_USEVFORK* = 0x40 + IPV6_V6ONLY* = 26 +elif defined(freebsd): const SOCK_NONBLOCK* = 0x20000000 SOCK_CLOEXEC* = 0x10000000 TCP_NODELAY* = cint(1) IPPROTO_TCP* = 6 -elif defined(openbsd): + O_CLOEXEC* = 0x00100000 + POSIX_SPAWN_USEVFORK* = 0x00 + IPV6_V6ONLY* = 27 +elif defined(netbsd): const - SOCK_CLOEXEC* = 0x8000 - SOCK_NONBLOCK* = 0x4000 + SOCK_NONBLOCK* = 0x20000000 + SOCK_CLOEXEC* = 0x10000000 TCP_NODELAY* = cint(1) IPPROTO_TCP* = 6 -elif defined(macos) or defined(macosx): + O_CLOEXEC* = 0x00400000 + POSIX_SPAWN_USEVFORK* = 0x00 + IPV6_V6ONLY* = 27 +elif defined(dragonfly): const + SOCK_NONBLOCK* = 0x20000000 + SOCK_CLOEXEC* = 0x10000000 TCP_NODELAY* = cint(1) - IP_MULTICAST_TTL* = cint(10) IPPROTO_TCP* = 6 - -when defined(linux): - const - O_CLOEXEC* = 0x80000 - POSIX_SPAWN_USEVFORK* = 0x40 -elif defined(freebsd): - const - O_CLOEXEC* = 0x00100000 + O_CLOEXEC* = 0x00020000 POSIX_SPAWN_USEVFORK* = 0x00 + IPV6_V6ONLY* = 27 elif defined(openbsd): const + SOCK_CLOEXEC* = 0x8000 + SOCK_NONBLOCK* = 0x4000 + TCP_NODELAY* = cint(1) + IPPROTO_TCP* = 6 O_CLOEXEC* = 0x10000 POSIX_SPAWN_USEVFORK* = 0x00 -elif defined(netbsd): - const - O_CLOEXEC* = 0x00400000 - POSIX_SPAWN_USEVFORK* = 0x00 -elif defined(dragonfly): - const - O_CLOEXEC* = 0x00020000 - POSIX_SPAWN_USEVFORK* = 0x00 + IPV6_V6ONLY* = 27 elif defined(macos) or defined(macosx): const + TCP_NODELAY* = cint(1) + IP_MULTICAST_TTL* = cint(10) + IPPROTO_TCP* = 6 POSIX_SPAWN_USEVFORK* = 0x00 + IPV6_V6ONLY* = 27 when defined(linux) or defined(macos) or defined(macosx) or defined(freebsd) or defined(openbsd) or defined(netbsd) or defined(dragonfly): diff --git a/chronos/oserrno.nim b/chronos/oserrno.nim index 4f1c7658c..2a9f82ce5 100644 --- a/chronos/oserrno.nim +++ b/chronos/oserrno.nim @@ -1328,6 +1328,7 @@ elif defined(windows): ERROR_CONNECTION_REFUSED* = OSErrorCode(1225) ERROR_CONNECTION_ABORTED* = OSErrorCode(1236) WSAEMFILE* = OSErrorCode(10024) + WSAEAFNOSUPPORT* = OSErrorCode(10047) WSAENETDOWN* = OSErrorCode(10050) WSAENETRESET* = OSErrorCode(10052) WSAECONNABORTED* = OSErrorCode(10053) diff --git a/chronos/osutils.nim b/chronos/osutils.nim index 86505c2b8..f9c09f257 100644 --- a/chronos/osutils.nim +++ b/chronos/osutils.nim @@ -346,6 +346,10 @@ else: return err(osLastError()) ok() + proc setDescriptorBlocking*(s: SocketHandle, + value: bool): Result[void, OSErrorCode] = + setDescriptorBlocking(cint(s), value) + proc setDescriptorInheritance*(s: cint, value: bool): Result[void, OSErrorCode] = let flags = handleEintr(osdefs.fcntl(s, osdefs.F_GETFD)) diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 4b4be7de3..b7776e535 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -11,7 +11,7 @@ import std/[strutils] import stew/[base10, byteutils] -import ".."/[asyncloop, osdefs, oserrno] +import ".."/[asyncloop, osdefs, oserrno, handles] from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, Protocol, Port, `$` @@ -31,6 +31,9 @@ type ReuseAddr, ReusePort, TcpNoDelay, NoAutoRead, GCUserData, FirstPipe, NoPipeFlash, Broadcast + DualStackType* {.pure.} = enum + Auto, Enabled, Disabled, Default + AddressFamily* {.pure.} = enum None, IPv4, IPv6, Unix @@ -76,6 +79,7 @@ when defined(windows) or defined(nimdoc): asock*: AsyncFD # Current AcceptEx() socket errorCode*: OSErrorCode # Current error code abuffer*: array[128, byte] # Windows AcceptEx() buffer + dualstack*: DualStackType # IPv4/IPv6 dualstack parameters when defined(windows): aovl*: CustomOverlapped # AcceptEx OVERLAPPED structure else: @@ -90,6 +94,7 @@ else: bufferSize*: int # Size of internal transports' buffer loopFuture*: Future[void] # Server's main Future errorCode*: OSErrorCode # Current error code + dualstack*: DualStackType # IPv4/IPv6 dualstack parameters type TransportError* = object of AsyncError @@ -720,3 +725,75 @@ proc raiseTransportError*(ecode: OSErrorCode) {. raise getTransportTooManyError(ecode) else: raise getTransportOsError(ecode) + +proc isAvailable*(family: AddressFamily): bool = + case family + of AddressFamily.None: + raiseAssert "Invalid address family" + of AddressFamily.IPv4: + isAvailable(Domain.AF_INET) + of AddressFamily.IPv6: + isAvailable(Domain.AF_INET6) + of AddressFamily.Unix: + isAvailable(Domain.AF_UNIX) + +proc getDomain*(socket: AsyncFD): Result[AddressFamily, OSErrorCode] = + ## Returns address family which is used to create socket ``socket``. + ## + ## Note: `chronos` supports only `AF_INET`, `AF_INET6` and `AF_UNIX` sockets. + ## For all other types of sockets this procedure returns + ## `EAFNOSUPPORT/WSAEAFNOSUPPORT` error. + when defined(windows): + let protocolInfo = ? getSockOpt2(socket, cint(osdefs.SOL_SOCKET), + cint(osdefs.SO_PROTOCOL_INFOW), + WSAPROTOCOL_INFO) + if protocolInfo.iAddressFamily == toInt(Domain.AF_INET): + ok(AddressFamily.IPv4) + elif protocolInfo.iAddressFamily == toInt(Domain.AF_INET6): + ok(AddressFamily.IPv6) + else: + err(WSAEAFNOSUPPORT) + else: + var + saddr = Sockaddr_storage() + slen = SockLen(sizeof(saddr)) + if getsockname(SocketHandle(socket), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + return err(osLastError()) + if int(saddr.ss_family) == toInt(Domain.AF_INET): + ok(AddressFamily.IPv4) + elif int(saddr.ss_family) == toInt(Domain.AF_INET6): + ok(AddressFamily.IPv6) + elif int(saddr.ss_family) == toInt(Domain.AF_UNIX): + ok(AddressFamily.Unix) + else: + err(EAFNOSUPPORT) + +proc setDualstack*(socket: AsyncFD, family: AddressFamily, + flag: DualStackType): Result[void, OSErrorCode] = + if family == AddressFamily.IPv6: + case flag + of DualStackType.Auto: + # In case of `Auto` we going to ignore all the errors. + discard setDualstack(socket, true) + ok() + of DualStackType.Enabled: + ? setDualstack(socket, true) + ok() + of DualStackType.Disabled: + ? setDualstack(socket, false) + ok() + of DualStackType.Default: + ok() + else: + ok() + +proc setDualstack*(socket: AsyncFD, + flag: DualStackType): Result[void, OSErrorCode] = + let family = + case flag + of DualStackType.Auto: + getDomain(socket).get(AddressFamily.IPv6) + else: + ? getDomain(socket) + setDualstack(socket, family, flag) diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index af29c2acc..aec18ae32 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -11,7 +11,7 @@ import std/deques when not(defined(windows)): import ".."/selectors2 -import ".."/[asyncloop, osdefs, oserrno, handles] +import ".."/[asyncloop, osdefs, oserrno, osutils, handles] import "."/common type @@ -247,57 +247,65 @@ when defined(windows): udata: pointer, child: DatagramTransport, bufferSize: int, - ttl: int): DatagramTransport {. + ttl: int, + dualstack = DualStackType.Auto + ): DatagramTransport {. raises: [TransportOsError].} = - var localSock: AsyncFD - doAssert(remote.family == local.family) doAssert(not isNil(cbproc)) - doAssert(remote.family in {AddressFamily.IPv4, AddressFamily.IPv6}) - var res = if isNil(child): DatagramTransport() else: child - if sock == asyncInvalidSocket: - localSock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM, - Protocol.IPPROTO_UDP) - - if localSock == asyncInvalidSocket: - raiseTransportOsError(osLastError()) - else: - if not setSocketBlocking(SocketHandle(sock), false): - raiseTransportOsError(osLastError()) - localSock = sock - let bres = register2(localSock) - if bres.isErr(): - raiseTransportOsError(bres.error()) + let localSock = + if sock == asyncInvalidSocket: + let proto = + if local.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_UDP + let res = createAsyncSocket2(local.getDomain(), SockType.SOCK_DGRAM, + proto) + if res.isErr(): + raiseTransportOsError(res.error) + res.get() + else: + setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + raiseTransportOsError(error) + register2(sock).isOkOr: + raiseTransportOsError(error) + sock ## Apply ServerFlags here if ServerFlags.ReuseAddr in flags: - if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEADDR, 1): - let err = osLastError() + setSockOpt2(localSock, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) if ServerFlags.ReusePort in flags: - if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEPORT, 1): - let err = osLastError() + setSockOpt2(localSock, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) if ServerFlags.Broadcast in flags: - if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_BROADCAST, 1): - let err = osLastError() + setSockOpt2(localSock, SOL_SOCKET, SO_BROADCAST, 1).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) if ttl > 0: - if not setSockOpt(localSock, osdefs.IPPROTO_IP, osdefs.IP_TTL, ttl): - let err = osLastError() + setSockOpt2(localSock, osdefs.IPPROTO_IP, osdefs.IP_TTL, ttl).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) + + ## IPV6_V6ONLY + if sock == asyncInvalidSocket: + setDualstack(localSock, local.family, dualstack).isOkOr: + closeSocket(localSock) + raiseTransportOsError(error) + else: + setDualstack(localSock, dualstack).isOkOr: + raiseTransportOsError(error) ## Fix for Q263823. var bytesRet: DWORD @@ -457,70 +465,75 @@ else: udata: pointer, child: DatagramTransport, bufferSize: int, - ttl: int): DatagramTransport {. + ttl: int, + dualstack = DualStackType.Auto + ): DatagramTransport {. raises: [TransportOsError].} = - var localSock: AsyncFD - doAssert(remote.family == local.family) doAssert(not isNil(cbproc)) - var res = if isNil(child): DatagramTransport() else: child - if sock == asyncInvalidSocket: - let proto = - if local.family == AddressFamily.Unix: - Protocol.IPPROTO_IP - else: - Protocol.IPPROTO_UDP - localSock = createAsyncSocket(local.getDomain(), SockType.SOCK_DGRAM, - proto) - if localSock == asyncInvalidSocket: - raiseTransportOsError(osLastError()) - else: - if not setSocketBlocking(SocketHandle(sock), false): - raiseTransportOsError(osLastError()) - localSock = sock - let bres = register2(localSock) - if bres.isErr(): - raiseTransportOsError(bres.error()) + let localSock = + if sock == asyncInvalidSocket: + let proto = + if local.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_UDP + let res = createAsyncSocket2(local.getDomain(), SockType.SOCK_DGRAM, + proto) + if res.isErr(): + raiseTransportOsError(res.error) + res.get() + else: + setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + raiseTransportOsError(error) + register2(sock).isOkOr: + raiseTransportOsError(error) + sock ## Apply ServerFlags here if ServerFlags.ReuseAddr in flags: - if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEADDR, 1): - let err = osLastError() + setSockOpt2(localSock, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) if ServerFlags.ReusePort in flags: - if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_REUSEPORT, 1): - let err = osLastError() + setSockOpt2(localSock, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) if ServerFlags.Broadcast in flags: - if not setSockOpt(localSock, osdefs.SOL_SOCKET, osdefs.SO_BROADCAST, 1): - let err = osLastError() + setSockOpt2(localSock, SOL_SOCKET, SO_BROADCAST, 1).isOkOr: if sock == asyncInvalidSocket: closeSocket(localSock) - raiseTransportOsError(err) + raiseTransportOsError(error) if ttl > 0: - let tres = - if local.family == AddressFamily.IPv4: - setSockOpt(localSock, osdefs.IPPROTO_IP, osdefs.IP_MULTICAST_TTL, - cint(ttl)) - elif local.family == AddressFamily.IPv6: - setSockOpt(localSock, osdefs.IPPROTO_IP, osdefs.IPV6_MULTICAST_HOPS, - cint(ttl)) - else: - raiseAssert "Unsupported address bound to local socket" + if local.family == AddressFamily.IPv4: + setSockOpt2(localSock, osdefs.IPPROTO_IP, osdefs.IP_MULTICAST_TTL, + cint(ttl)).isOkOr: + if sock == asyncInvalidSocket: + closeSocket(localSock) + raiseTransportOsError(error) + elif local.family == AddressFamily.IPv6: + setSockOpt2(localSock, osdefs.IPPROTO_IP, osdefs.IPV6_MULTICAST_HOPS, + cint(ttl)).isOkOr: + if sock == asyncInvalidSocket: + closeSocket(localSock) + raiseTransportOsError(error) + else: + raiseAssert "Unsupported address bound to local socket" - if not tres: - let err = osLastError() - if sock == asyncInvalidSocket: - closeSocket(localSock) - raiseTransportOsError(err) + ## IPV6_V6ONLY + if sock == asyncInvalidSocket: + setDualstack(localSock, local.family, dualstack).isOkOr: + closeSocket(localSock) + raiseTransportOsError(error) + else: + setDualstack(localSock, dualstack).isOkOr: + raiseTransportOsError(error) if local.family != AddressFamily.None: var saddr: Sockaddr_storage @@ -594,8 +607,9 @@ proc newDatagramTransport*(cbproc: DatagramCallback, udata: pointer = nil, child: DatagramTransport = nil, bufSize: int = DefaultDatagramBufferSize, - ttl: int = 0 - ): DatagramTransport {. + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. raises: [TransportOsError].} = ## Create new UDP datagram transport (IPv4). ## @@ -610,7 +624,7 @@ proc newDatagramTransport*(cbproc: DatagramCallback, ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has ## ``Broadcast`` option). newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child, - bufSize, ttl) + bufSize, ttl, dualstack) proc newDatagramTransport*[T](cbproc: DatagramCallback, udata: ref T, @@ -620,13 +634,15 @@ proc newDatagramTransport*[T](cbproc: DatagramCallback, flags: set[ServerFlags] = {}, child: DatagramTransport = nil, bufSize: int = DefaultDatagramBufferSize, - ttl: int = 0 - ): DatagramTransport {. + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. raises: [TransportOsError].} = var fflags = flags + {GCUserData} GC_ref(udata) newDatagramTransportCommon(cbproc, remote, local, sock, fflags, - cast[pointer](udata), child, bufSize, ttl) + cast[pointer](udata), child, bufSize, ttl, + dualstack) proc newDatagramTransport6*(cbproc: DatagramCallback, remote: TransportAddress = AnyAddress6, @@ -636,8 +652,9 @@ proc newDatagramTransport6*(cbproc: DatagramCallback, udata: pointer = nil, child: DatagramTransport = nil, bufSize: int = DefaultDatagramBufferSize, - ttl: int = 0 - ): DatagramTransport {. + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. raises: [TransportOsError].} = ## Create new UDP datagram transport (IPv6). ## @@ -652,7 +669,7 @@ proc newDatagramTransport6*(cbproc: DatagramCallback, ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has ## ``Broadcast`` option). newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child, - bufSize, ttl) + bufSize, ttl, dualstack) proc newDatagramTransport6*[T](cbproc: DatagramCallback, udata: ref T, @@ -662,13 +679,15 @@ proc newDatagramTransport6*[T](cbproc: DatagramCallback, flags: set[ServerFlags] = {}, child: DatagramTransport = nil, bufSize: int = DefaultDatagramBufferSize, - ttl: int = 0 - ): DatagramTransport {. + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. raises: [TransportOsError].} = var fflags = flags + {GCUserData} GC_ref(udata) newDatagramTransportCommon(cbproc, remote, local, sock, fflags, - cast[pointer](udata), child, bufSize, ttl) + cast[pointer](udata), child, bufSize, ttl, + dualstack) proc join*(transp: DatagramTransport): Future[void] = ## Wait until the transport ``transp`` will be closed. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index f96650c7c..8982b998b 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -639,7 +639,8 @@ when defined(windows): child: StreamTransport = nil, localAddress = TransportAddress(), flags: set[SocketFlags] = {}, - ): Future[StreamTransport] = + dualstack = DualStackType.Auto + ): Future[StreamTransport] = ## Open new connection to remote peer with address ``address`` and create ## new transport object ``StreamTransport`` for established connection. ## ``bufferSize`` is size of internal buffer for transport. @@ -658,24 +659,33 @@ when defined(windows): toSAddr(raddress, saddr, slen) proto = Protocol.IPPROTO_TCP - sock = createAsyncSocket(raddress.getDomain(), SockType.SOCK_STREAM, - proto) - if sock == asyncInvalidSocket: - retFuture.fail(getTransportOsError(osLastError())) + sock = createAsyncSocket2(raddress.getDomain(), SockType.SOCK_STREAM, + proto).valueOr: + retFuture.fail(getTransportOsError(error)) return retFuture + if address.family in {AddressFamily.IPv4, AddressFamily.IPv6}: + if SocketFlags.TcpNoDelay in flags: + setSockOpt2(sock, osdefs.IPPROTO_TCP, osdefs.TCP_NODELAY, 1).isOkOr: + sock.closeSocket() + retFuture.fail(getTransportOsError(error)) + return retFuture + if SocketFlags.ReuseAddr in flags: - if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEADDR, 1)): - let err = osLastError() + setSockOpt2(sock, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: sock.closeSocket() - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture if SocketFlags.ReusePort in flags: - if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEPORT, 1)): - let err = osLastError() + setSockOpt2(sock, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: sock.closeSocket() - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture + # IPV6_V6ONLY. + setDualstack(sock, address.family, dualstack).isOkOr: + sock.closeSocket() + retFuture.fail(getTransportOsError(error)) + return retFuture if localAddress != TransportAddress(): if localAddress.family != address.family: @@ -966,14 +976,9 @@ when defined(windows): if server.status notin {ServerStatus.Stopped, ServerStatus.Closed}: server.apending = true # TODO No way to report back errors! - server.asock = - block: - let sock = createAsyncSocket(server.domain, SockType.SOCK_STREAM, - Protocol.IPPROTO_TCP) - if sock == asyncInvalidSocket: - raiseOsDefect(osLastError(), - "acceptLoop(): Unablet to create new socket") - sock + server.asock = createAsyncSocket2(server.domain, SockType.SOCK_STREAM, + Protocol.IPPROTO_TCP).valueOr: + raiseOsDefect(error, "acceptLoop(): Unablet to create new socket") var dwBytesReceived = DWORD(0) let dwReceiveDataLength = DWORD(0) @@ -1167,15 +1172,13 @@ when defined(windows): if server.local.family in {AddressFamily.IPv4, AddressFamily.IPv6}: # TCP Sockets part var loop = getThreadDispatcher() - server.asock = createAsyncSocket(server.domain, SockType.SOCK_STREAM, - Protocol.IPPROTO_TCP) - if server.asock == asyncInvalidSocket: - let err = osLastError() - case err + server.asock = createAsyncSocket2(server.domain, SockType.SOCK_STREAM, + Protocol.IPPROTO_TCP).valueOr: + case error of ERROR_TOO_MANY_OPEN_FILES, WSAENOBUFS, WSAEMFILE: - retFuture.fail(getTransportTooManyError(err)) + retFuture.fail(getTransportTooManyError(error)) else: - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture var dwBytesReceived = DWORD(0) @@ -1468,7 +1471,8 @@ else: child: StreamTransport = nil, localAddress = TransportAddress(), flags: set[SocketFlags] = {}, - ): Future[StreamTransport] = + dualstack = DualStackType.Auto, + ): Future[StreamTransport] = ## Open new connection to remote peer with address ``address`` and create ## new transport object ``StreamTransport`` for established connection. ## ``bufferSize`` - size of internal buffer for transport. @@ -1483,36 +1487,37 @@ else: else: Protocol.IPPROTO_TCP - let sock = createAsyncSocket(address.getDomain(), SockType.SOCK_STREAM, - proto) - if sock == asyncInvalidSocket: - let err = osLastError() - case err + let sock = createAsyncSocket2(address.getDomain(), SockType.SOCK_STREAM, + proto).valueOr: + case error of oserrno.EMFILE: retFuture.fail(getTransportTooManyError()) else: - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture if address.family in {AddressFamily.IPv4, AddressFamily.IPv6}: if SocketFlags.TcpNoDelay in flags: - if not(setSockOpt(sock, osdefs.IPPROTO_TCP, osdefs.TCP_NODELAY, 1)): - let err = osLastError() + setSockOpt2(sock, osdefs.IPPROTO_TCP, osdefs.TCP_NODELAY, 1).isOkOr: sock.closeSocket() - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture + if SocketFlags.ReuseAddr in flags: - if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEADDR, 1)): - let err = osLastError() + setSockOpt2(sock, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: sock.closeSocket() - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture if SocketFlags.ReusePort in flags: - if not(setSockOpt(sock, SOL_SOCKET, SO_REUSEPORT, 1)): - let err = osLastError() + setSockOpt2(sock, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: sock.closeSocket() - retFuture.fail(getTransportOsError(err)) + retFuture.fail(getTransportOsError(error)) return retFuture + # IPV6_V6ONLY. + setDualstack(sock, address.family, dualstack).isOkOr: + sock.closeSocket() + retFuture.fail(getTransportOsError(error)) + return retFuture if localAddress != TransportAddress(): if localAddress.family != address.family: @@ -1532,17 +1537,14 @@ else: proc continuation(udata: pointer) = if not(retFuture.finished()): - var err = 0 - - let res = removeWriter2(sock) - if res.isErr(): + removeWriter2(sock).isOkOr: discard unregisterAndCloseFd(sock) - retFuture.fail(getTransportOsError(res.error())) + retFuture.fail(getTransportOsError(error)) return - if not(sock.getSocketError(err)): + let err = sock.getSocketError2().valueOr: discard unregisterAndCloseFd(sock) - retFuture.fail(getTransportOsError(res.error())) + retFuture.fail(getTransportOsError(error)) return if err != 0: @@ -1578,10 +1580,9 @@ else: # http://www.madore.org/~david/computers/connect-intr.html case errorCode of oserrno.EINPROGRESS, oserrno.EINTR: - let res = addWriter2(sock, continuation) - if res.isErr(): + addWriter2(sock, continuation).isOkOr: discard unregisterAndCloseFd(sock) - retFuture.fail(getTransportOsError(res.error())) + retFuture.fail(getTransportOsError(error)) return retFuture retFuture.cancelCallback = cancel break @@ -1782,11 +1783,13 @@ proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, child: StreamTransport = nil, flags: set[TransportFlags], - localAddress = TransportAddress()): Future[StreamTransport] = + localAddress = TransportAddress(), + dualstack = DualStackType.Auto + ): Future[StreamTransport] = # Retro compatibility with TransportFlags var mappedFlags: set[SocketFlags] if TcpNoDelay in flags: mappedFlags.incl(SocketFlags.TcpNoDelay) - address.connect(bufferSize, child, localAddress, mappedFlags) + connect(address, bufferSize, child, localAddress, mappedFlags, dualstack) proc close*(server: StreamServer) = ## Release ``server`` resources. @@ -1848,7 +1851,8 @@ proc createStreamServer*(host: TransportAddress, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil, - udata: pointer = nil): StreamServer {. + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. raises: [TransportOsError].} = ## Create new TCP stream server. ## @@ -1874,42 +1878,48 @@ proc createStreamServer*(host: TransportAddress, elif defined(windows): # Windows if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - if sock == asyncInvalidSocket: - serverSocket = createAsyncSocket(host.getDomain(), - SockType.SOCK_STREAM, - Protocol.IPPROTO_TCP) - - if serverSocket == asyncInvalidSocket: - raiseTransportOsError(osLastError()) - else: - let bres = setDescriptorBlocking(SocketHandle(sock), false) - if bres.isErr(): - raiseTransportOsError(bres.error()) - let wres = register2(sock) - if wres.isErr(): - raiseTransportOsError(wres.error()) - serverSocket = sock - # SO_REUSEADDR is not useful for Unix domain sockets. + serverSocket = + if sock == asyncInvalidSocket: + # TODO (cheatfate): `valueOr` generates weird compile error. + let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, + Protocol.IPPROTO_TCP) + if res.isErr(): + raiseTransportOsError(res.error()) + res.get() + else: + setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + raiseTransportOsError(error) + register2(sock).isOkOr: + raiseTransportOsError(error) + sock + # SO_REUSEADDR if ServerFlags.ReuseAddr in flags: - if not(setSockOpt(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1)): - let err = osLastError() + setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(err) + raiseTransportOsError(error) + # SO_REUSEPORT if ServerFlags.ReusePort in flags: - if not(setSockOpt(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1)): - let err = osLastError() + setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(err) - # TCP flags are not useful for Unix domain sockets. + raiseTransportOsError(error) + # TCP_NODELAY if ServerFlags.TcpNoDelay in flags: - if not(setSockOpt(serverSocket, osdefs.IPPROTO_TCP, - osdefs.TCP_NODELAY, 1)): - let err = osLastError() + setSockOpt2(serverSocket, osdefs.IPPROTO_TCP, + osdefs.TCP_NODELAY, 1).isOkOr: if sock == asyncInvalidSocket: discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(err) + raiseTransportOsError(error) + # IPV6_V6ONLY. + if sock == asyncInvalidSocket: + setDualstack(serverSocket, host.family, dualstack).isOkOr: + discard closeFd(SocketHandle(serverSocket)) + raiseTransportOsError(error) + else: + setDualstack(serverSocket, dualstack).isOkOr: + raiseTransportOsError(error) + host.toSAddr(saddr, slen) if bindSocket(SocketHandle(serverSocket), cast[ptr SockAddr](addr saddr), slen) != 0: @@ -1936,47 +1946,54 @@ proc createStreamServer*(host: TransportAddress, serverSocket = AsyncFD(0) else: # Posix - if sock == asyncInvalidSocket: - let proto = if host.family == AddressFamily.Unix: - Protocol.IPPROTO_IP + serverSocket = + if sock == asyncInvalidSocket: + let proto = if host.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_TCP + # TODO (cheatfate): `valueOr` generates weird compile error. + let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, + proto) + if res.isErr(): + raiseTransportOsError(res.error()) + res.get() else: - Protocol.IPPROTO_TCP - serverSocket = createAsyncSocket(host.getDomain(), - SockType.SOCK_STREAM, - proto) - if serverSocket == asyncInvalidSocket: - raiseTransportOsError(osLastError()) - else: - let bres = setDescriptorFlags(cint(sock), true, true) - if bres.isErr(): - raiseTransportOsError(osLastError()) - let rres = register2(sock) - if rres.isErr(): - raiseTransportOsError(osLastError()) - serverSocket = sock + setDescriptorFlags(cint(sock), true, true).isOkOr: + raiseTransportOsError(error) + register2(sock).isOkOr: + raiseTransportOsError(error) + sock if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - # SO_REUSEADDR and SO_REUSEPORT are not useful for Unix domain sockets. + # SO_REUSEADDR if ServerFlags.ReuseAddr in flags: - if not(setSockOpt(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1)): - let err = osLastError() + setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: if sock == asyncInvalidSocket: discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) + raiseTransportOsError(error) + # SO_REUSEPORT if ServerFlags.ReusePort in flags: - if not(setSockOpt(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1)): - let err = osLastError() + setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: if sock == asyncInvalidSocket: discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) - # TCP flags are not useful for Unix domain sockets. + raiseTransportOsError(error) + # TCP_NODELAY if ServerFlags.TcpNoDelay in flags: - if not(setSockOpt(serverSocket, osdefs.IPPROTO_TCP, - osdefs.TCP_NODELAY, 1)): - let err = osLastError() + setSockOpt2(serverSocket, osdefs.IPPROTO_TCP, + osdefs.TCP_NODELAY, 1).isOkOr: if sock == asyncInvalidSocket: discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) + raiseTransportOsError(error) + # IPV6_V6ONLY + if sock == asyncInvalidSocket: + setDualstack(serverSocket, host.family, dualstack).isOkOr: + discard closeFd(SocketHandle(serverSocket)) + raiseTransportOsError(error) + else: + setDualstack(serverSocket, dualstack).isOkOr: + raiseTransportOsError(error) + elif host.family in {AddressFamily.Unix}: # We do not care about result here, because if file cannot be removed, # `bindSocket` will return EADDRINUSE. @@ -2016,6 +2033,7 @@ proc createStreamServer*(host: TransportAddress, sres.status = Starting sres.loopFuture = newFuture[void]("stream.transport.server") sres.udata = udata + sres.dualstack = dualstack if localAddress.family == AddressFamily.None: sres.local = host else: @@ -2029,8 +2047,7 @@ proc createStreamServer*(host: TransportAddress, cb = acceptPipeLoop if not(isNil(cbproc)): - sres.aovl.data = CompletionData(cb: cb, - udata: cast[pointer](sres)) + sres.aovl.data = CompletionData(cb: cb, udata: cast[pointer](sres)) else: if host.family == AddressFamily.Unix: sres.sock = @@ -2055,10 +2072,11 @@ proc createStreamServer*(host: TransportAddress, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, init: TransportInitCallback = nil, - udata: pointer = nil): StreamServer {. + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. raises: [CatchableError].} = createStreamServer(host, nil, flags, sock, backlog, bufferSize, - child, init, cast[pointer](udata)) + child, init, cast[pointer](udata), dualstack) proc createStreamServer*[T](host: TransportAddress, cbproc: StreamCallback, @@ -2068,12 +2086,13 @@ proc createStreamServer*[T](host: TransportAddress, backlog: int = DefaultBacklogSize, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, - init: TransportInitCallback = nil): StreamServer {. + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. raises: [CatchableError].} = var fflags = flags + {GCUserData} GC_ref(udata) createStreamServer(host, cbproc, fflags, sock, backlog, bufferSize, - child, init, cast[pointer](udata)) + child, init, cast[pointer](udata), dualstack) proc createStreamServer*[T](host: TransportAddress, flags: set[ServerFlags] = {}, @@ -2082,12 +2101,13 @@ proc createStreamServer*[T](host: TransportAddress, backlog: int = DefaultBacklogSize, bufferSize: int = DefaultStreamBufferSize, child: StreamServer = nil, - init: TransportInitCallback = nil): StreamServer {. + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. raises: [CatchableError].} = var fflags = flags + {GCUserData} GC_ref(udata) createStreamServer(host, nil, fflags, sock, backlog, bufferSize, - child, init, cast[pointer](udata)) + child, init, cast[pointer](udata), dualstack) proc getUserData*[T](server: StreamServer): T {.inline.} = ## Obtain user data stored in ``server`` object. diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index 7db04f93e..ae7ab23af 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -533,6 +533,54 @@ suite "Datagram Transport test suite": result = res + proc performDualstackTest( + sstack: DualStackType, saddr: TransportAddress, + cstack: DualStackType, caddr: TransportAddress + ): Future[bool] {.async.} = + var + expectStr = "ANYADDRESS MESSAGE" + event = newAsyncEvent() + res = 0 + + proc process1(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {.async.} = + var bmsg = transp.getMessage() + var smsg = cast[string](bmsg) + if smsg == expectStr: + inc(res) + event.fire() + + proc process2(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {.async.} = + discard + + let + sdgram = newDatagramTransport(process1, local = saddr, + dualstack = sstack) + localcaddr = + if caddr.family == AddressFamily.IPv4: + AnyAddress + else: + AnyAddress6 + + cdgram = newDatagramTransport(process2, local = localcaddr, + dualstack = cstack) + + var address = caddr + address.port = sdgram.localAddress().port + + try: + await cdgram.sendTo(address, addr expectStr[0], len(expectStr)) + except CatchableError: + discard + try: + await event.wait().wait(500.milliseconds) + except CatchableError: + discard + + await allFutures(sdgram.closeWait(), cdgram.closeWait()) + res == 1 + test "close(transport) test": check waitFor(testTransportClose()) == true test m1: @@ -557,5 +605,83 @@ suite "Datagram Transport test suite": check waitFor(testBroadcast()) == 1 test "0.0.0.0/::0 (INADDR_ANY) test": check waitFor(testAnyAddress()) == 6 + asyncTest "[IP] getDomain(socket) [SOCK_DGRAM] test": + if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6): + block: + let res = createAsyncSocket2(Domain.AF_INET, SockType.SOCK_DGRAM, + Protocol.IPPROTO_UDP) + check res.isOk() + let fres = getDomain(res.get()) + check fres.isOk() + discard unregisterAndCloseFd(res.get()) + check fres.get() == AddressFamily.IPv4 + + block: + let res = createAsyncSocket2(Domain.AF_INET6, SockType.SOCK_DGRAM, + Protocol.IPPROTO_UDP) + check res.isOk() + let fres = getDomain(res.get()) + check fres.isOk() + discard unregisterAndCloseFd(res.get()) + check fres.get() == AddressFamily.IPv6 + + when not(defined(windows)): + block: + let res = createAsyncSocket2(Domain.AF_UNIX, SockType.SOCK_DGRAM, + Protocol.IPPROTO_IP) + check res.isOk() + let fres = getDomain(res.get()) + check fres.isOk() + discard unregisterAndCloseFd(res.get()) + check fres.get() == AddressFamily.Unix + else: + skip() + asyncTest "[IP] DualStack [UDP] server [DualStackType.Auto] test": + if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6): + let serverAddress = initTAddress("[::]:0") + check: + (await performDualstackTest( + DualStackType.Auto, serverAddress, + DualStackType.Auto, initTAddress("127.0.0.1:0"))) == true + check: + (await performDualstackTest( + DualStackType.Auto, serverAddress, + DualStackType.Auto, initTAddress("127.0.0.1:0").toIPv6())) == true + check: + (await performDualstackTest( + DualStackType.Auto, serverAddress, + DualStackType.Auto, initTAddress("[::1]:0"))) == true + else: + skip() + asyncTest "[IP] DualStack [UDP] server [DualStackType.Enabled] test": + if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6): + let serverAddress = initTAddress("[::]:0") + check: + (await performDualstackTest( + DualStackType.Enabled, serverAddress, + DualStackType.Auto, initTAddress("127.0.0.1:0"))) == true + (await performDualstackTest( + DualStackType.Enabled, serverAddress, + DualStackType.Auto, initTAddress("127.0.0.1:0").toIPv6())) == true + (await performDualstackTest( + DualStackType.Enabled, serverAddress, + DualStackType.Auto, initTAddress("[::1]:0"))) == true + else: + skip() + asyncTest "[IP] DualStack [UDP] server [DualStackType.Disabled] test": + if isAvailable(AddressFamily.IPv4) and isAvailable(AddressFamily.IPv6): + let serverAddress = initTAddress("[::]:0") + check: + (await performDualstackTest( + DualStackType.Disabled, serverAddress, + DualStackType.Auto, initTAddress("127.0.0.1:0"))) == false + (await performDualstackTest( + DualStackType.Disabled, serverAddress, + DualStackType.Auto, initTAddress("127.0.0.1:0").toIPv6())) == false + (await performDualstackTest( + DualStackType.Disabled, serverAddress, + DualStackType.Auto, initTAddress("[::1]:0"))) == true + else: + skip() test "Transports leak test": checkLeaks() diff --git a/tests/teststream.nim b/tests/teststream.nim index 762e99629..b0427928c 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1372,6 +1372,42 @@ suite "Stream Transport test suite": if not(sleepFut.finished()): await cancelAndWait(sleepFut) + proc performDualstackTest( + sstack: DualStackType, saddr: TransportAddress, + cstack: DualStackType, caddr: TransportAddress + ): Future[bool] {.async.} = + let server = createStreamServer(saddr, dualstack = sstack) + var address = caddr + address.port = server.localAddress().port + var acceptFut = server.accept() + let + clientTransp = + try: + let res = await connect(address, + dualstack = cstack).wait(500.milliseconds) + Opt.some(res) + except CatchableError: + Opt.none(StreamTransport) + serverTransp = + if clientTransp.isSome(): + let res = await acceptFut + Opt.some(res) + else: + Opt.none(StreamTransport) + + let testResult = clientTransp.isSome() and serverTransp.isSome() + var pending: seq[FutureBase] + if clientTransp.isSome(): + pending.add(closeWait(clientTransp.get())) + if serverTransp.isSome(): + pending.add(closeWait(serverTransp.get())) + else: + pending.add(cancelAndWait(acceptFut)) + await allFutures(pending) + server.stop() + await server.closeWait() + testResult + markFD = getCurrentFD() for i in 0.. Date: Tue, 31 Oct 2023 03:43:58 +0200 Subject: [PATCH 075/146] Consider ERROR_NETNAME_DELETED as ConnectionAbortedError. (#460) --- chronos/transports/stream.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 8982b998b..7471a4468 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1096,7 +1096,7 @@ when defined(windows): retFuture.fail(getServerUseClosedError()) server.clean() of WSAENETDOWN, WSAENETRESET, WSAECONNABORTED, WSAECONNRESET, - WSAETIMEDOUT: + WSAETIMEDOUT, ERROR_NETNAME_DELETED: server.asock.closeSocket() retFuture.fail(getConnectionAbortedError(ovl.data.errCode)) server.clean() From cd6369c0488e1bc1dd6b6ce2fbc3b372a8adb74f Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Nov 2023 11:12:59 +0100 Subject: [PATCH 076/146] `asyncraises` -> `async: (raises: ..., raw: ...)` (#455) Per discussion in https://github.com/status-im/nim-chronos/pull/251#issuecomment-1559233139, `async: (parameters..)` is introduced as a way to customize the async transformation instead of relying on separate keywords (like asyncraises). Two parameters are available as of now: `raises`: controls the exception effect tracking `raw`: disables body transformation Parameters are added to `async` as a tuple allowing more params to be added easily in the future: ```nim: proc f() {.async: (name: value, ...).}` ``` --- README.md | 104 +++++++++------ chronos/asyncloop.nim | 2 +- chronos/internal/asyncengine.nim | 2 +- chronos/internal/asyncfutures.nim | 43 +++---- chronos/internal/asyncmacro.nim | 199 +++++++++++++++-------------- chronos/internal/raisesfutures.nim | 28 ++-- tests/testmacro.nim | 64 +++++----- 7 files changed, 238 insertions(+), 204 deletions(-) diff --git a/README.md b/README.md index 495f9f8b3..f59a6c83a 100644 --- a/README.md +++ b/README.md @@ -46,18 +46,18 @@ Submit a PR to add yours! ### Concepts -Chronos implements the async/await paradigm in a self-contained library, using -macros, with no specific helpers from the compiler. +Chronos implements the async/await paradigm in a self-contained library using +the macro and closure iterator transformation features provided by Nim. -Our event loop is called a "dispatcher" and a single instance per thread is +The event loop is called a "dispatcher" and a single instance per thread is created, as soon as one is needed. To trigger a dispatcher's processing step, we need to call `poll()` - either -directly or through a wrapper like `runForever()` or `waitFor()`. This step +directly or through a wrapper like `runForever()` or `waitFor()`. Each step handles any file descriptors, timers and callbacks that are ready to be processed. -`Future` objects encapsulate the result of an async procedure, upon successful +`Future` objects encapsulate the result of an `async` procedure upon successful completion, and a list of callbacks to be scheduled after any type of completion - be that success, failure or cancellation. @@ -156,7 +156,7 @@ Exceptions inheriting from `CatchableError` are caught by hidden `try` blocks and placed in the `Future.error` field, changing the future's status to `Failed`. -When a future is awaited, that exception is re-raised, only to be caught again +When a future is awaited, that exception is re-raised only to be caught again by a hidden `try` block in the calling async procedure. That's how these exceptions move up the async chain. @@ -214,57 +214,81 @@ by the transformation. #### Checked exceptions -By specifying a `asyncraises` list to an async procedure, you can check which -exceptions can be thrown by it. +By specifying a `raises` list to an async procedure, you can check which +exceptions can be raised by it: + ```nim -proc p1(): Future[void] {.async, asyncraises: [IOError].} = +proc p1(): Future[void] {.async: (raises: [IOError]).} = assert not (compiles do: raise newException(ValueError, "uh-uh")) raise newException(IOError, "works") # Or any child of IOError + +proc p2(): Future[void] {.async, (raises: [IOError]).} = + await p1() # Works, because await knows that p1 + # can only raise IOError ``` -Under the hood, the return type of `p1` will be rewritten to an internal type, +Under the hood, the return type of `p1` will be rewritten to an internal type which will convey raises informations to `await`. +### Raw functions + +Raw functions are those that interact with `chronos` via the `Future` type but +whose body does not go through the async transformation. + +Such functions are created by adding `raw: true` to the `async` parameters: + ```nim -proc p2(): Future[void] {.async, asyncraises: [IOError].} = - await p1() # Works, because await knows that p1 - # can only raise IOError +proc rawAsync(): Future[void] {.async: (raw: true).} = + let future = newFuture[void]("rawAsync") + future.complete() + return future +``` + +Raw functions must not raise exceptions directly - they are implicitly declared +as `raises: []` - instead they should store exceptions in the returned `Future`: + +```nim +proc rawFailure(): Future[void] {.async: (raw: true).} = + let future = newFuture[void]("rawAsync") + future.fail((ref ValueError)(msg: "Oh no!")) + return future +``` + +Raw functions can also use checked exceptions: + +```nim +proc rawAsyncRaises(): Future[void] {.async: (raw: true, raises: [IOError]).} = + let fut = newFuture[void]() + assert not (compiles do: fut.fail((ref ValueError)(msg: "uh-uh"))) + fut.fail((ref IOError)(msg: "IO")) + return fut ``` -Raw functions and callbacks that don't go through the `async` transformation but -still return a `Future` and interact with the rest of the framework also need to -be annotated with `asyncraises` to participate in the checked exception scheme: +### Callbacks and closures + +Callback/closure types are declared using the `async` annotation as usual: ```nim -proc p3(): Future[void] {.async, asyncraises: [IOError].} = - let fut: Future[void] = p1() # works - assert not compiles(await fut) # await lost informations about raises, - # so it can raise anything - # Callbacks - assert not(compiles do: let cb1: proc(): Future[void] = p1) # doesn't work - let cb2: proc(): Future[void] {.async, asyncraises: [IOError].} = p1 # works - assert not(compiles do: - type c = proc(): Future[void] {.async, asyncraises: [IOError, ValueError].} - let cb3: c = p1 # doesn't work, the raises must match _exactly_ - ) +type MyCallback = proc(): Future[void] {.async.} + +proc runCallback(cb: MyCallback) {.async: (raises: []).} = + try: + await cb() + except CatchableError: + discard # handle errors as usual ``` -When `chronos` performs the `async` transformation, all code is placed in a -a special `try/except` clause that re-routes exception handling to the `Future`. +When calling a callback, it is important to remember that the given function +may raise and exceptions need to be handled. -Beacuse of this re-routing, functions that return a `Future` instance manually -never directly raise exceptions themselves - instead, exceptions are handled -indirectly via `await` or `Future.read`. When writing raw async functions, they -too must not raise exceptions - instead, they must store exceptions in the -future they return: +Checked exceptions can be used to limit the exceptions that a callback can +raise: ```nim -proc p4(): Future[void] {.asyncraises: [ValueError].} = - let fut = newFuture[void] +type MyEasyCallback = proc: Future[void] {.async: (raises: []).} - # Equivalent of `raise (ref ValueError)()` in raw async functions: - fut.fail((ref ValueError)(msg: "raising in raw async function")) - fut +proc runCallback(cb: MyEasyCallback) {.async: (raises: [])} = + await cb() ``` ### Platform independence @@ -278,7 +302,7 @@ annotated as raising `CatchableError` only raise on _some_ platforms - in order to work on all platforms, calling code must assume that they will raise even when they don't seem to do so on one platform. -### Exception effects +### Strict exception mode `chronos` currently offers minimal support for exception effects and `raises` annotations. In general, during the `async` transformation, a generic diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index b4d48af4e..428252c71 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -131,4 +131,4 @@ import ./internal/[asyncengine, asyncfutures, asyncmacro, errors] export asyncfutures, asyncengine, errors -export asyncmacro.async, asyncmacro.await, asyncmacro.awaitne, asyncraises +export asyncmacro.async, asyncmacro.await, asyncmacro.awaitne diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim index ebcc27850..23d7c6add 100644 --- a/chronos/internal/asyncengine.nim +++ b/chronos/internal/asyncengine.nim @@ -21,7 +21,7 @@ export Port export deques, errors, futures, timer, results export - asyncmacro.async, asyncmacro.await, asyncmacro.awaitne, asyncmacro.asyncraises + asyncmacro.async, asyncmacro.await, asyncmacro.awaitne const MaxEventsCount* = 64 diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index abf28c716..b144cea78 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -102,18 +102,12 @@ template newFuture*[T](fromProc: static[string] = "", else: newFutureImpl[T](getSrcLocation(fromProc), flags) -macro getFutureExceptions(T: typedesc): untyped = - if getTypeInst(T)[1].len > 2: - getTypeInst(T)[1][2] - else: - ident"void" - -template newInternalRaisesFuture*[T](fromProc: static[string] = ""): auto = +template newInternalRaisesFuture*[T, E](fromProc: static[string] = ""): auto = ## Creates a new future. ## ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. - newInternalRaisesFutureImpl[T, getFutureExceptions(typeof(result))](getSrcLocation(fromProc)) + newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc)) template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] = ## Create a new future which can hold/preserve GC sequence until future will @@ -476,7 +470,7 @@ macro internalCheckComplete*(f: InternalRaisesFuture): untyped = let e = getTypeInst(f)[2] let types = getType(e) - if types.eqIdent("void"): + if isNoRaises(types): return quote do: if not(isNil(`f`.internalError)): raiseAssert("Unhandled future exception: " & `f`.error.msg) @@ -484,7 +478,6 @@ macro internalCheckComplete*(f: InternalRaisesFuture): untyped = expectKind(types, nnkBracketExpr) expectKind(types[0], nnkSym) assert types[0].strVal == "tuple" - assert types.len > 1 let ifRaise = nnkIfExpr.newTree( nnkElifExpr.newTree( @@ -914,7 +907,7 @@ template cancel*(future: FutureBase) {. cancelSoon(future, nil, nil, getSrcLocation()) proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Perform cancellation ``future`` return Future which will be completed when ## ``future`` become finished (completed with value, failed or cancelled). ## @@ -938,7 +931,7 @@ template cancelAndWait*(future: FutureBase): Future[void] = ## Cancel ``future``. cancelAndWait(future, getSrcLocation()) -proc noCancel*[F: SomeFuture](future: F): auto = # asyncraises: asyncraiseOf(future) - CancelledError +proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: asyncraiseOf(future) - CancelledError ## Prevent cancellation requests from propagating to ``future`` while ## forwarding its value or error when it finishes. ## @@ -978,7 +971,7 @@ proc noCancel*[F: SomeFuture](future: F): auto = # asyncraises: asyncraiseOf(fut retFuture proc allFutures*(futs: varargs[FutureBase]): Future[void] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. ## @@ -1017,7 +1010,7 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] {. retFuture proc allFutures*[T](futs: varargs[Future[T]]): Future[void] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. ## @@ -1031,7 +1024,7 @@ proc allFutures*[T](futs: varargs[Future[T]]): Future[void] {. allFutures(nfuts) proc allFinished*[F: SomeFuture](futs: varargs[F]): Future[seq[F]] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete only when all futures in ``futs`` ## will be completed, failed or canceled. ## @@ -1072,7 +1065,7 @@ proc allFinished*[F: SomeFuture](futs: varargs[F]): Future[seq[F]] {. return retFuture proc one*[F: SomeFuture](futs: varargs[F]): Future[F] {. - asyncraises: [ValueError, CancelledError].} = + async: (raw: true, raises: [ValueError, CancelledError]).} = ## Returns a future which will complete and return completed Future[T] inside, ## when one of the futures in ``futs`` will be completed, failed or canceled. ## @@ -1121,7 +1114,7 @@ proc one*[F: SomeFuture](futs: varargs[F]): Future[F] {. return retFuture proc race*(futs: varargs[FutureBase]): Future[FutureBase] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete and return completed FutureBase, ## when one of the futures in ``futs`` will be completed, failed or canceled. ## @@ -1173,7 +1166,8 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] {. when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows): import std/os - proc waitSignal*(signal: int): Future[void] {.asyncraises: [AsyncError, CancelledError].} = + proc waitSignal*(signal: int): Future[void] {. + async: (raw: true, raises: [AsyncError, CancelledError]).} = var retFuture = newFuture[void]("chronos.waitSignal()") var signalHandle: Opt[SignalHandle] @@ -1208,7 +1202,7 @@ when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows): retFuture proc sleepAsync*(duration: Duration): Future[void] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Suspends the execution of the current async procedure for the next ## ``duration`` time. var retFuture = newFuture[void]("chronos.sleepAsync(Duration)") @@ -1228,10 +1222,12 @@ proc sleepAsync*(duration: Duration): Future[void] {. return retFuture proc sleepAsync*(ms: int): Future[void] {. - inline, deprecated: "Use sleepAsync(Duration)", asyncraises: [CancelledError].} = + inline, deprecated: "Use sleepAsync(Duration)", + async: (raw: true, raises: [CancelledError]).} = result = sleepAsync(ms.milliseconds()) -proc stepsAsync*(number: int): Future[void] {.asyncraises: [CancelledError].} = +proc stepsAsync*(number: int): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Suspends the execution of the current async procedure for the next ## ``number`` of asynchronous steps (``poll()`` calls). ## @@ -1258,7 +1254,8 @@ proc stepsAsync*(number: int): Future[void] {.asyncraises: [CancelledError].} = retFuture -proc idleAsync*(): Future[void] {.asyncraises: [CancelledError].} = +proc idleAsync*(): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Suspends the execution of the current asynchronous task until "idle" time. ## ## "idle" time its moment of time, when no network events were processed by @@ -1277,7 +1274,7 @@ proc idleAsync*(): Future[void] {.asyncraises: [CancelledError].} = retFuture proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. - asyncraises: [CancelledError].} = + async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete once ``fut`` completes or after ## ``timeout`` milliseconds has elapsed. ## diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index f313f6f36..c11084703 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -9,8 +9,9 @@ # import - std/[algorithm, macros, sequtils], - ../[futures, config] + std/[macros], + ../[futures, config], + ./raisesfutures proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} = case node.kind @@ -32,10 +33,10 @@ proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} node[i] = processBody(node[i], setResultSym, baseType) node -proc wrapInTryFinally(fut, baseType, body, raisesTuple: NimNode): NimNode {.compileTime.} = +proc wrapInTryFinally(fut, baseType, body, raises: NimNode): NimNode {.compileTime.} = # creates: # try: `body` - # [for raise in raisesTuple]: + # [for raise in raises]: # except `raise`: closureSucceeded = false; `castFutureSym`.fail(exc) # finally: # if closureSucceeded: @@ -91,7 +92,17 @@ proc wrapInTryFinally(fut, baseType, body, raisesTuple: NimNode): NimNode {.comp newCall(ident "fail", fut, excName) )) - for exc in raisesTuple: + let raises = if raises == nil: + const defaultException = + when defined(chronosStrictException): "CatchableError" + else: "Exception" + nnkTupleConstr.newTree(ident(defaultException)) + elif isNoRaises(raises): + nnkTupleConstr.newTree() + else: + raises + + for exc in raises: if exc.eqIdent("Exception"): addCancelledError addCatchableError @@ -182,42 +193,33 @@ proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} = for child in node: result.add(cleanupOpenSymChoice(child)) -proc getAsyncCfg(prc: NimNode): tuple[raises: bool, async: bool, raisesTuple: NimNode] = - # reads the pragmas to extract the useful data - # and removes them +proc decodeParams(params: NimNode): tuple[raw: bool, raises: NimNode] = + # decodes the parameter tuple given in `async: (name: value, ...)` to its + # recognised parts + params.expectKind(nnkTupleConstr) + var - foundRaises = -1 - foundAsync = -1 - - for index, pragma in pragma(prc): - if pragma.kind == nnkExprColonExpr and pragma[0] == ident "asyncraises": - foundRaises = index - elif pragma.eqIdent("async"): - foundAsync = index - elif pragma.kind == nnkExprColonExpr and pragma[0] == ident "raises": - warning("The raises pragma doesn't work on async procedure. " & - "Please remove it or use asyncraises instead") - - result.raises = foundRaises >= 0 - result.async = foundAsync >= 0 - result.raisesTuple = nnkTupleConstr.newTree() - - if foundRaises >= 0: - for possibleRaise in pragma(prc)[foundRaises][1]: - result.raisesTuple.add(possibleRaise) - if result.raisesTuple.len == 0: - result.raisesTuple = ident("void") - else: - when defined(chronosWarnMissingRaises): - warning("Async proc miss asyncraises") - const defaultException = - when defined(chronosStrictException): "CatchableError" - else: "Exception" - result.raisesTuple.add(ident(defaultException)) + raw = false + raises: NimNode = nil + + for param in params: + param.expectKind(nnkExprColonExpr) + + if param[0].eqIdent("raises"): + param[1].expectKind(nnkBracket) + if param[1].len == 0: + raises = makeNoRaises() + else: + raises = nnkTupleConstr.newTree() + for possibleRaise in param[1]: + raises.add(possibleRaise) + elif param[0].eqIdent("raw"): + # boolVal doesn't work in untyped macros it seems.. + raw = param[1].eqIdent("true") + else: + warning("Unrecognised async parameter: " & repr(param[0]), param) - let toRemoveList = @[foundRaises, foundAsync].filterIt(it >= 0).sorted().reversed() - for toRemove in toRemoveList: - pragma(prc).del(toRemove) + (raw, raises) proc isEmpty(n: NimNode): bool {.compileTime.} = # true iff node recursively contains only comments or empties @@ -230,13 +232,18 @@ proc isEmpty(n: NimNode): bool {.compileTime.} = else: false -proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = +proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = ## This macro transforms a single procedure into a closure iterator. ## The ``async`` macro supports a stmtList holding multiple async procedures. if prc.kind notin {nnkProcTy, nnkProcDef, nnkLambda, nnkMethodDef, nnkDo}: error("Cannot transform " & $prc.kind & " into an async proc." & " proc/method definition or lambda node expected.", prc) + for pragma in prc.pragma(): + if pragma.kind == nnkExprColonExpr and pragma[0].eqIdent("raises"): + warning("The raises pragma doesn't work on async procedures - use " & + "`async: (raises: [...]) instead.", prc) + let returnType = cleanupOpenSymChoice(prc.params2[0]) # Verify that the return type is a Future[T] @@ -254,22 +261,24 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = let baseTypeIsVoid = baseType.eqIdent("void") - futureVoidType = nnkBracketExpr.newTree(ident "Future", ident "void") - (hasRaises, isAsync, raisesTuple) = getAsyncCfg(prc) - - if hasRaises: - # Store `asyncraises` types in InternalRaisesFuture - prc.params2[0] = nnkBracketExpr.newTree( - newIdentNode("InternalRaisesFuture"), - baseType, - raisesTuple - ) - elif baseTypeIsVoid: - # Adds the implicit Future[void] - prc.params2[0] = + (raw, raises) = decodeParams(params) + internalFutureType = + if baseTypeIsVoid: newNimNode(nnkBracketExpr, prc). add(newIdentNode("Future")). - add(newIdentNode("void")) + add(baseType) + else: + returnType + internalReturnType = if raises == nil: + internalFutureType + else: + nnkBracketExpr.newTree( + newIdentNode("InternalRaisesFuture"), + baseType, + raises + ) + + prc.params2[0] = internalReturnType if prc.kind notin {nnkProcTy, nnkLambda}: # TODO: Nim bug? prc.addPragma(newColonExpr(ident "stackTrace", ident "off")) @@ -282,24 +291,28 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = # https://github.com/nim-lang/RFCs/issues/435 prc.addPragma(newIdentNode("gcsafe")) - if isAsync == false: # `asyncraises` without `async` - # type InternalRaisesFutureRaises {.used.} = `raisesTuple` - # `body` - prc.body = nnkStmtList.newTree( - nnkTypeSection.newTree( - nnkTypeDef.newTree( - nnkPragmaExpr.newTree( - ident"InternalRaisesFutureRaises", - nnkPragma.newTree( - newIdentNode("used") - ) - ), - newEmptyNode(), - raisesTuple - ) - ), - prc.body - ) + if raw: # raw async = body is left as-is + if raises != nil and prc.kind notin {nnkProcTy, nnkLambda} and not isEmpty(prc.body): + # Inject `raises` type marker that causes `newFuture` to return a raise- + # tracking future instead of an ordinary future: + # + # type InternalRaisesFutureRaises = `raisesTuple` + # `body` + prc.body = nnkStmtList.newTree( + nnkTypeSection.newTree( + nnkTypeDef.newTree( + nnkPragmaExpr.newTree( + ident"InternalRaisesFutureRaises", + nnkPragma.newTree(ident "used")), + newEmptyNode(), + raises, + ) + ), + prc.body + ) + + when chronosDumpAsync: + echo repr prc return prc @@ -311,9 +324,6 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = setResultSym = ident "setResult" procBody = prc.body.processBody(setResultSym, baseType) internalFutureSym = ident "chronosInternalRetFuture" - internalFutureType = - if baseTypeIsVoid: futureVoidType - else: returnType castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym) resultIdent = ident "result" @@ -396,7 +406,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = castFutureSym, baseType, if baseTypeIsVoid: procBody # shortcut for non-generic `void` else: newCall(setResultSym, procBody), - raisesTuple + raises ) closureBody = newStmtList(resultDecl, setResultDecl, completeDecl) @@ -431,19 +441,22 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = outerProcBody.add(closureIterator) - # -> let resultFuture = newInternalRaisesFuture[T]() + # -> let resultFuture = newInternalRaisesFuture[T, E]() # declared at the end to be sure that the closure # doesn't reference it, avoid cyclic ref (#203) let retFutureSym = ident "resultFuture" + newFutProc = if raises == nil: + newTree(nnkBracketExpr, ident "newFuture", baseType) + else: + newTree(nnkBracketExpr, ident "newInternalRaisesFuture", baseType, raises) retFutureSym.copyLineInfo(prc) # Do not change this code to `quote do` version because `instantiationInfo` # will be broken for `newFuture()` call. outerProcBody.add( newLetStmt( retFutureSym, - newCall(newTree(nnkBracketExpr, ident "newInternalRaisesFuture", baseType), - newLit(prcName)) + newCall(newFutProc, newLit(prcName)) ) ) # -> resultFuture.internalClosure = iterator @@ -465,6 +478,7 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} = when chronosDumpAsync: echo repr prc + prc template await*[T](f: Future[T]): untyped = @@ -490,32 +504,23 @@ template awaitne*[T](f: Future[T]): Future[T] = else: unsupported "awaitne is only available within {.async.}" -macro async*(prc: untyped): untyped = +macro async*(params, prc: untyped): untyped = ## Macro which processes async procedures into the appropriate ## iterators and yield statements. if prc.kind == nnkStmtList: result = newStmtList() for oneProc in prc: - oneProc.addPragma(ident"async") - result.add asyncSingleProc(oneProc) + result.add asyncSingleProc(oneProc, params) else: - prc.addPragma(ident"async") - result = asyncSingleProc(prc) + result = asyncSingleProc(prc, params) + +macro async*(prc: untyped): untyped = + ## Macro which processes async procedures into the appropriate + ## iterators and yield statements. -macro asyncraises*(possibleExceptions, prc: untyped): untyped = - # Add back the pragma and let asyncSingleProc handle it - # Exerimental / subject to change and/or removal if prc.kind == nnkStmtList: result = newStmtList() for oneProc in prc: - oneProc.addPragma(nnkExprColonExpr.newTree( - ident"asyncraises", - possibleExceptions - )) - result.add asyncSingleProc(oneProc) + result.add asyncSingleProc(oneProc, nnkTupleConstr.newTree()) else: - prc.addPragma(nnkExprColonExpr.newTree( - ident"asyncraises", - possibleExceptions - )) - result = asyncSingleProc(prc) + result = asyncSingleProc(prc, nnkTupleConstr.newTree()) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 6a855818b..ad811f72b 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -7,13 +7,23 @@ type ## Future with a tuple of possible exception types ## eg InternalRaisesFuture[void, (ValueError, OSError)] ## - ## This type gets injected by `asyncraises` and similar utilities and - ## should not be used manually as the internal exception representation is - ## subject to change in future chronos versions. + ## This type gets injected by `async: (raises: ...)` and similar utilities + ## and should not be used manually as the internal exception representation + ## is subject to change in future chronos versions. + +proc makeNoRaises*(): NimNode {.compileTime.} = + # An empty tuple would have been easier but... + # https://github.com/nim-lang/Nim/issues/22863 + # https://github.com/nim-lang/Nim/issues/22865 + + ident"void" + +proc isNoRaises*(n: NimNode): bool {.compileTime.} = + n.eqIdent("void") iterator members(tup: NimNode): NimNode = # Given a typedesc[tuple] = (A, B, C), yields the tuple members (A, B C) - if not tup.eqIdent("void"): + if not isNoRaises(tup): for n in getType(getTypeInst(tup)[1])[1..^1]: yield n @@ -40,7 +50,7 @@ macro prepend*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = result.add err if result.len == 0: - result = ident"void" + result = makeNoRaises() macro remove*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = result = nnkTupleConstr.newTree() @@ -49,7 +59,7 @@ macro remove*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = result.add err if result.len == 0: - result = ident"void" + result = makeNoRaises() macro union*(tup0: typedesc[tuple], tup1: typedesc[tuple]): typedesc = ## Join the types of the two tuples deduplicating the entries @@ -65,11 +75,13 @@ macro union*(tup0: typedesc[tuple], tup1: typedesc[tuple]): typedesc = for err2 in getType(getTypeInst(tup1)[1])[1..^1]: result.add err2 + if result.len == 0: + result = makeNoRaises() proc getRaises*(future: NimNode): NimNode {.compileTime.} = # Given InternalRaisesFuture[T, (A, B, C)], returns (A, B, C) let types = getType(getTypeInst(future)[2]) - if types.eqIdent("void"): + if isNoRaises(types): nnkBracketExpr.newTree(newEmptyNode()) else: expectKind(types, nnkBracketExpr) @@ -106,7 +118,7 @@ macro checkRaises*[T: CatchableError]( infix(error, "of", nnkBracketExpr.newTree(ident"typedesc", errorType))) let - errorMsg = "`fail`: `" & repr(toMatch) & "` incompatible with `asyncraises: " & repr(raises[1..^1]) & "`" + errorMsg = "`fail`: `" & repr(toMatch) & "` incompatible with `raises: " & repr(raises[1..^1]) & "`" warningMsg = "Can't verify `fail` exception type at compile time - expected one of " & repr(raises[1..^1]) & ", got `" & repr(toMatch) & "`" # A warning from this line means exception type will be verified at runtime warning = if warn: diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 2fc24be98..c9b45dd87 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -387,16 +387,16 @@ suite "Exceptions tracking": check (not compiles(body)) test "Can raise valid exception": proc test1 {.async.} = raise newException(ValueError, "hey") - proc test2 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") - proc test3 {.async, asyncraises: [IOError, ValueError].} = + proc test2 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey") + proc test3 {.async: (raises: [IOError, ValueError]).} = if 1 == 2: raise newException(ValueError, "hey") else: raise newException(IOError, "hey") - proc test4 {.async, asyncraises: [], used.} = raise newException(Defect, "hey") - proc test5 {.async, asyncraises: [].} = discard - proc test6 {.async, asyncraises: [].} = await test5() + proc test4 {.async: (raises: []), used.} = raise newException(Defect, "hey") + proc test5 {.async: (raises: []).} = discard + proc test6 {.async: (raises: []).} = await test5() expect(ValueError): waitFor test1() expect(ValueError): waitFor test2() @@ -405,15 +405,15 @@ suite "Exceptions tracking": test "Cannot raise invalid exception": checkNotCompiles: - proc test3 {.async, asyncraises: [IOError].} = raise newException(ValueError, "hey") + proc test3 {.async: (raises: [IOError]).} = raise newException(ValueError, "hey") test "Explicit return in non-raising proc": - proc test(): Future[int] {.async, asyncraises: [].} = return 12 + proc test(): Future[int] {.async: (raises: []).} = return 12 check: waitFor(test()) == 12 test "Non-raising compatibility": - proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") + proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey") let testVar: Future[void] = test1() proc test2 {.async.} = raise newException(ValueError, "hey") @@ -423,69 +423,64 @@ suite "Exceptions tracking": #let testVar3: proc: Future[void] = test1 test "Cannot store invalid future types": - proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") - proc test2 {.async, asyncraises: [IOError].} = raise newException(IOError, "hey") + proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey") + proc test2 {.async: (raises: [IOError]).} = raise newException(IOError, "hey") var a = test1() checkNotCompiles: a = test2() test "Await raises the correct types": - proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") - proc test2 {.async, asyncraises: [ValueError, CancelledError].} = await test1() + proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey") + proc test2 {.async: (raises: [ValueError, CancelledError]).} = await test1() checkNotCompiles: - proc test3 {.async, asyncraises: [CancelledError].} = await test1() + proc test3 {.async: (raises: [CancelledError]).} = await test1() test "Can create callbacks": - proc test1 {.async, asyncraises: [ValueError].} = raise newException(ValueError, "hey") - let callback: proc() {.async, asyncraises: [ValueError].} = test1 + proc test1 {.async: (raises: [ValueError]).} = raise newException(ValueError, "hey") + let callback: proc() {.async: (raises: [ValueError]).} = test1 test "Can return values": - proc test1: Future[int] {.async, asyncraises: [ValueError].} = + proc test1: Future[int] {.async: (raises: [ValueError]).} = if 1 == 0: raise newException(ValueError, "hey") return 12 - proc test2: Future[int] {.async, asyncraises: [ValueError, IOError, CancelledError].} = + proc test2: Future[int] {.async: (raises: [ValueError, IOError, CancelledError]).} = return await test1() checkNotCompiles: - proc test3: Future[int] {.async, asyncraises: [CancelledError].} = await test1() + proc test3: Future[int] {.async: (raises: [CancelledError]).} = await test1() check waitFor(test2()) == 12 test "Manual tracking": - proc test1: Future[int] {.asyncraises: [ValueError].} = + proc test1: Future[int] {.async: (raw: true, raises: [ValueError]).} = result = newFuture[int]() result.complete(12) check waitFor(test1()) == 12 - proc test2: Future[int] {.asyncraises: [IOError, OSError].} = + proc test2: Future[int] {.async: (raw: true, raises: [IOError, OSError]).} = result = newFuture[int]() result.fail(newException(IOError, "fail")) result.fail(newException(OSError, "fail")) checkNotCompiles: result.fail(newException(ValueError, "fail")) - proc test3: Future[void] {.asyncraises: [].} = + proc test3: Future[void] {.async: (raw: true, raises: []).} = checkNotCompiles: result.fail(newException(ValueError, "fail")) # Inheritance - proc test4: Future[void] {.asyncraises: [CatchableError].} = + proc test4: Future[void] {.async: (raw: true, raises: [CatchableError]).} = result.fail(newException(IOError, "fail")) - test "Reversed async, asyncraises": - proc test44 {.asyncraises: [ValueError], async.} = raise newException(ValueError, "hey") - checkNotCompiles: - proc test33 {.asyncraises: [IOError], async.} = raise newException(ValueError, "hey") - test "or errors": - proc testit {.asyncraises: [ValueError], async.} = + proc testit {.async: (raises: [ValueError]).} = raise (ref ValueError)() - proc testit2 {.asyncraises: [IOError], async.} = + proc testit2 {.async: (raises: [IOError]).} = raise (ref IOError)() - proc test {.async, asyncraises: [ValueError, IOError].} = + proc test {.async: (raises: [ValueError, IOError]).} = await testit() or testit2() proc noraises() {.raises: [].} = @@ -499,9 +494,10 @@ suite "Exceptions tracking": noraises() test "Wait errors": - proc testit {.asyncraises: [ValueError], async.} = raise newException(ValueError, "hey") + proc testit {.async: (raises: [ValueError]).} = + raise newException(ValueError, "hey") - proc test {.async, asyncraises: [ValueError, AsyncTimeoutError, CancelledError].} = + proc test {.async: (raises: [ValueError, AsyncTimeoutError, CancelledError]).} = await wait(testit(), 1000.milliseconds) proc noraises() {.raises: [].} = @@ -513,11 +509,11 @@ suite "Exceptions tracking": noraises() test "Nocancel errors": - proc testit {.asyncraises: [ValueError, CancelledError], async.} = + proc testit {.async: (raises: [ValueError, CancelledError]).} = await sleepAsync(5.milliseconds) raise (ref ValueError)() - proc test {.async, asyncraises: [ValueError].} = + proc test {.async: (raises: [ValueError]).} = await noCancel testit() proc noraises() {.raises: [].} = From 5ebd771d35464832eb9edf603b616fd34ad158cd Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 8 Nov 2023 15:12:32 +0100 Subject: [PATCH 077/146] per-function `Exception` handling (#457) This PR replaces the global strict exception mode with an option to handle `Exception` per function while at the same time enabling strict exception checking globally by default as has been planned for v4. `handleException` mode raises `AsyncExceptionError` to distinguish it from `ValueError` which may originate from user code. * remove obsolete 1.2 config options --- README.md | 105 ++++++++++++++--------- chronos/config.nim | 145 ++++++++++++++------------------ chronos/internal/asyncmacro.nim | 40 ++++++--- chronos/internal/errors.nim | 4 + tests/testmacro.nim | 11 +++ 5 files changed, 168 insertions(+), 137 deletions(-) diff --git a/README.md b/README.md index f59a6c83a..c80f8267c 100644 --- a/README.md +++ b/README.md @@ -9,12 +9,12 @@ Chronos is an efficient [async/await](https://en.wikipedia.org/wiki/Async/await) framework for Nim. Features include: -* Efficient dispatch pipeline for asynchronous execution +* Asynchronous socket and process I/O * HTTP server with SSL/TLS support out of the box (no OpenSSL needed) -* Cancellation support * Synchronization primitivies like queues, events and locks -* FIFO processing order of dispatch queue -* Minimal exception effect support (see [exception effects](#exception-effects)) +* Cancellation +* Efficient dispatch pipeline with excellent multi-platform support +* Exception effect support (see [exception effects](#exception-effects)) ## Installation @@ -152,16 +152,13 @@ feet, in a certain section, is to not use `await` in it. ### Error handling -Exceptions inheriting from `CatchableError` are caught by hidden `try` blocks -and placed in the `Future.error` field, changing the future's status to -`Failed`. +Exceptions inheriting from [`CatchableError`](https://nim-lang.org/docs/system.html#CatchableError) +interrupt execution of the `async` procedure. The exception is placed in the +`Future.error` field while changing the status of the `Future` to `Failed` +and callbacks are scheduled. -When a future is awaited, that exception is re-raised only to be caught again -by a hidden `try` block in the calling async procedure. That's how these -exceptions move up the async chain. - -A failed future's callbacks will still be scheduled, but it's not possible to -resume execution from the point an exception was raised. +When a future is awaited, the exception is re-raised, traversing the `async` +execution chain until handled. ```nim proc p1() {.async.} = @@ -206,11 +203,11 @@ proc p3() {.async.} = await fut2 ``` -Chronos does not allow that future continuations and other callbacks raise -`CatchableError` - as such, calls to `poll` will never raise exceptions caused -originating from tasks on the dispatcher queue. It is however possible that -`Defect` that happen in tasks bubble up through `poll` as these are not caught -by the transformation. +Because `chronos` ensures that all exceptions are re-routed to the `Future`, +`poll` will not itself raise exceptions. + +`poll` may still panic / raise `Defect` if such are raised in user code due to +undefined behavior. #### Checked exceptions @@ -230,6 +227,53 @@ proc p2(): Future[void] {.async, (raises: [IOError]).} = Under the hood, the return type of `p1` will be rewritten to an internal type which will convey raises informations to `await`. +#### The `Exception` type + +Exceptions deriving from `Exception` are not caught by default as these may +include `Defect` and other forms undefined or uncatchable behavior. + +Because exception effect tracking is turned on for `async` functions, this may +sometimes lead to compile errors around forward declarations, methods and +closures as Nim conservatively asssumes that any `Exception` might be raised +from those. + +Make sure to excplicitly annotate these with `{.raises.}`: + +```nim +# Forward declarations need to explicitly include a raises list: +proc myfunction() {.raises: [ValueError].} + +# ... as do `proc` types +type MyClosure = proc() {.raises: [ValueError].} + +proc myfunction() = + raise (ref ValueError)(msg: "Implementation here") + +let closure: MyClosure = myfunction +``` + +For compatibility, `async` functions can be instructed to handle `Exception` as +well, specifying `handleException: true`. `Exception` that is not a `Defect` and +not a `CatchableError` will then be caught and remapped to +`AsyncExceptionError`: + +```nim +proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} = + raise (ref Exception)(msg: "Raising Exception is UB") + +proc callRaiseException() {.async: (raises: []).} = + try: + raiseException() + except AsyncExceptionError as exc: + # The original Exception is available from the `parent` field + echo exc.parent.msg +``` + +This mode can be enabled globally with `-d:chronosHandleException` as a help +when porting code to `chronos` but should generally be avoided as global +configuration settings may interfere with libraries that use `chronos` leading +to unexpected behavior. + ### Raw functions Raw functions are those that interact with `chronos` via the `Future` type but @@ -302,27 +346,6 @@ annotated as raising `CatchableError` only raise on _some_ platforms - in order to work on all platforms, calling code must assume that they will raise even when they don't seem to do so on one platform. -### Strict exception mode - -`chronos` currently offers minimal support for exception effects and `raises` -annotations. In general, during the `async` transformation, a generic -`except CatchableError` handler is added around the entire function being -transformed, in order to catch any exceptions and transfer them to the `Future`. -Because of this, the effect system thinks no exceptions are "leaking" because in -fact, exception _handling_ is deferred to when the future is being read. - -Effectively, this means that while code can be compiled with -`{.push raises: []}`, the intended effect propagation and checking is -**disabled** for `async` functions. - -To enable checking exception effects in `async` code, enable strict mode with -`-d:chronosStrictException`. - -In the strict mode, `async` functions are checked such that they only raise -`CatchableError` and thus must make sure to explicitly specify exception -effects on forward declarations, callbacks and methods using -`{.raises: [CatchableError].}` (or more strict) annotations. - ### Cancellation support Any running `Future` can be cancelled. This can be used for timeouts, @@ -379,9 +402,9 @@ waitFor(cancellationExample()) Even if cancellation is initiated, it is not guaranteed that the operation gets cancelled - the future might still be completed or fail depending on the ordering of events and the specifics of -the operation. +the operation. -If the future indeed gets cancelled, `await` will raise a +If the future indeed gets cancelled, `await` will raise a `CancelledError` as is likely to happen in the following example: ```nim proc c1 {.async.} = diff --git a/chronos/config.nim b/chronos/config.nim index bd6c2b9d1..6af3e31bb 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -11,100 +11,77 @@ ## `chronosDebug` can be defined to enable several debugging helpers that come ## with a runtime cost - it is recommeneded to not enable these in production ## code. -when (NimMajor, NimMinor) >= (1, 4): - const - chronosStrictException* {.booldefine.}: bool = defined(chronosPreviewV4) - ## Require that `async` code raises only derivatives of `CatchableError` - ## and not `Exception` - forward declarations, methods and `proc` types - ## used from within `async` code may need to be be explicitly annotated - ## with `raises: [CatchableError]` when this mode is enabled. +const + chronosHandleException* {.booldefine.}: bool = false + ## Remap `Exception` to `AsyncExceptionError` for all `async` functions. + ## + ## This modes provides backwards compatibility when using functions with + ## inaccurate `{.raises.}` effects such as unannotated forward declarations, + ## methods and `proc` types - it is recommened to annotate such code + ## explicitly as the `Exception` handling mode may introduce surprising + ## behavior in exception handlers, should `Exception` actually be raised. + ## + ## The setting provides the default for the per-function-based + ## `handleException` parameter which has precedence over this global setting. + ## + ## `Exception` handling may be removed in future chronos versions. - chronosStrictFutureAccess* {.booldefine.}: bool = defined(chronosPreviewV4) + chronosStrictFutureAccess* {.booldefine.}: bool = defined(chronosPreviewV4) - chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug) - ## Include stack traces in futures for creation and completion points + chronosStackTrace* {.booldefine.}: bool = defined(chronosDebug) + ## Include stack traces in futures for creation and completion points - chronosFutureId* {.booldefine.}: bool = defined(chronosDebug) - ## Generate a unique `id` for every future - when disabled, the address of - ## the future will be used instead + chronosFutureId* {.booldefine.}: bool = defined(chronosDebug) + ## Generate a unique `id` for every future - when disabled, the address of + ## the future will be used instead - chronosFutureTracking* {.booldefine.}: bool = defined(chronosDebug) - ## Keep track of all pending futures and allow iterating over them - - ## useful for detecting hung tasks + chronosFutureTracking* {.booldefine.}: bool = defined(chronosDebug) + ## Keep track of all pending futures and allow iterating over them - + ## useful for detecting hung tasks - chronosDumpAsync* {.booldefine.}: bool = defined(nimDumpAsync) - ## Print code generated by {.async.} transformation + chronosDumpAsync* {.booldefine.}: bool = defined(nimDumpAsync) + ## Print code generated by {.async.} transformation - chronosProcShell* {.strdefine.}: string = - when defined(windows): - "cmd.exe" + chronosProcShell* {.strdefine.}: string = + when defined(windows): + "cmd.exe" + else: + when defined(android): + "/system/bin/sh" else: - when defined(android): - "/system/bin/sh" - else: - "/bin/sh" - ## Default shell binary path. - ## - ## The shell is used as command for command line when process started - ## using `AsyncProcessOption.EvalCommand` and API calls such as - ## ``execCommand(command)`` and ``execCommandEx(command)``. + "/bin/sh" + ## Default shell binary path. + ## + ## The shell is used as command for command line when process started + ## using `AsyncProcessOption.EvalCommand` and API calls such as + ## ``execCommand(command)`` and ``execCommandEx(command)``. - chronosEventsCount* {.intdefine.} = 64 - ## Number of OS poll events retrieved by syscall (epoll, kqueue, poll). + chronosEventsCount* {.intdefine.} = 64 + ## Number of OS poll events retrieved by syscall (epoll, kqueue, poll). - chronosInitialSize* {.intdefine.} = 64 - ## Initial size of Selector[T]'s array of file descriptors. + chronosInitialSize* {.intdefine.} = 64 + ## Initial size of Selector[T]'s array of file descriptors. - chronosEventEngine* {.strdefine.}: string = - when defined(linux) and not(defined(android) or defined(emscripten)): - "epoll" - elif defined(macosx) or defined(macos) or defined(ios) or - defined(freebsd) or defined(netbsd) or defined(openbsd) or - defined(dragonfly): - "kqueue" - elif defined(android) or defined(emscripten): - "poll" - elif defined(posix): - "poll" - else: - "" - ## OS polling engine type which is going to be used by chronos. + chronosEventEngine* {.strdefine.}: string = + when defined(linux) and not(defined(android) or defined(emscripten)): + "epoll" + elif defined(macosx) or defined(macos) or defined(ios) or + defined(freebsd) or defined(netbsd) or defined(openbsd) or + defined(dragonfly): + "kqueue" + elif defined(android) or defined(emscripten): + "poll" + elif defined(posix): + "poll" + else: + "" + ## OS polling engine type which is going to be used by chronos. -else: - # 1.2 doesn't support `booldefine` in `when` properly - const - chronosStrictException*: bool = - defined(chronosPreviewV4) or defined(chronosStrictException) - chronosStrictFutureAccess*: bool = - defined(chronosPreviewV4) or defined(chronosStrictFutureAccess) - chronosStackTrace*: bool = defined(chronosDebug) or defined(chronosStackTrace) - chronosFutureId*: bool = defined(chronosDebug) or defined(chronosFutureId) - chronosFutureTracking*: bool = - defined(chronosDebug) or defined(chronosFutureTracking) - chronosDumpAsync*: bool = defined(nimDumpAsync) - chronosProcShell* {.strdefine.}: string = - when defined(windows): - "cmd.exe" - else: - when defined(android): - "/system/bin/sh" - else: - "/bin/sh" - chronosEventsCount*: int = 64 - chronosInitialSize*: int = 64 - chronosEventEngine* {.strdefine.}: string = - when defined(linux) and not(defined(android) or defined(emscripten)): - "epoll" - elif defined(macosx) or defined(macos) or defined(ios) or - defined(freebsd) or defined(netbsd) or defined(openbsd) or - defined(dragonfly): - "kqueue" - elif defined(android) or defined(emscripten): - "poll" - elif defined(posix): - "poll" - else: - "" +when defined(chronosStrictException): + {.warning: "-d:chronosStrictException has been deprecated in favor of handleException".} + # In chronos v3, this setting was used as the opposite of + # `chronosHandleException` - the setting is deprecated to encourage + # migration to the new mode. when defined(debug) or defined(chronosConfig): import std/macros @@ -113,7 +90,7 @@ when defined(debug) or defined(chronosConfig): hint("Chronos configuration:") template printOption(name: string, value: untyped) = hint(name & ": " & $value) - printOption("chronosStrictException", chronosStrictException) + printOption("chronosHandleException", chronosHandleException) printOption("chronosStackTrace", chronosStackTrace) printOption("chronosFutureId", chronosFutureId) printOption("chronosFutureTracking", chronosFutureTracking) diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index c11084703..11daf3363 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -33,7 +33,9 @@ proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} node[i] = processBody(node[i], setResultSym, baseType) node -proc wrapInTryFinally(fut, baseType, body, raises: NimNode): NimNode {.compileTime.} = +proc wrapInTryFinally( + fut, baseType, body, raises: NimNode, + handleException: bool): NimNode {.compileTime.} = # creates: # try: `body` # [for raise in raises]: @@ -92,15 +94,15 @@ proc wrapInTryFinally(fut, baseType, body, raises: NimNode): NimNode {.compileTi newCall(ident "fail", fut, excName) )) - let raises = if raises == nil: - const defaultException = - when defined(chronosStrictException): "CatchableError" - else: "Exception" - nnkTupleConstr.newTree(ident(defaultException)) + var raises = if raises == nil: + nnkTupleConstr.newTree(ident"CatchableError") elif isNoRaises(raises): nnkTupleConstr.newTree() else: - raises + raises.copyNimTree() + + if handleException: + raises.add(ident"Exception") for exc in raises: if exc.eqIdent("Exception"): @@ -115,7 +117,9 @@ proc wrapInTryFinally(fut, baseType, body, raises: NimNode): NimNode {.compileTi newCall(ident "fail", fut, nnkStmtList.newTree( nnkAsgn.newTree(closureSucceeded, ident"false"), - quote do: (ref ValueError)(msg: `excName`.msg, parent: `excName`))) + quote do: + (ref AsyncExceptionError)( + msg: `excName`.msg, parent: `excName`))) ) elif exc.eqIdent("CancelledError"): addCancelledError @@ -132,6 +136,8 @@ proc wrapInTryFinally(fut, baseType, body, raises: NimNode): NimNode {.compileTi newCall(ident "fail", fut, excName) )) + addDefect # Must not complete future on defect + nTry.add nnkFinally.newTree( nnkIfStmt.newTree( nnkElifBranch.newTree( @@ -193,7 +199,13 @@ proc cleanupOpenSymChoice(node: NimNode): NimNode {.compileTime.} = for child in node: result.add(cleanupOpenSymChoice(child)) -proc decodeParams(params: NimNode): tuple[raw: bool, raises: NimNode] = +type + AsyncParams = tuple + raw: bool + raises: NimNode + handleException: bool + +proc decodeParams(params: NimNode): AsyncParams = # decodes the parameter tuple given in `async: (name: value, ...)` to its # recognised parts params.expectKind(nnkTupleConstr) @@ -201,6 +213,7 @@ proc decodeParams(params: NimNode): tuple[raw: bool, raises: NimNode] = var raw = false raises: NimNode = nil + handleException = chronosHandleException for param in params: param.expectKind(nnkExprColonExpr) @@ -216,10 +229,12 @@ proc decodeParams(params: NimNode): tuple[raw: bool, raises: NimNode] = elif param[0].eqIdent("raw"): # boolVal doesn't work in untyped macros it seems.. raw = param[1].eqIdent("true") + elif param[0].eqIdent("handleException"): + handleException = param[1].eqIdent("true") else: warning("Unrecognised async parameter: " & repr(param[0]), param) - (raw, raises) + (raw, raises, handleException) proc isEmpty(n: NimNode): bool {.compileTime.} = # true iff node recursively contains only comments or empties @@ -261,7 +276,7 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = let baseTypeIsVoid = baseType.eqIdent("void") - (raw, raises) = decodeParams(params) + (raw, raises, handleException) = decodeParams(params) internalFutureType = if baseTypeIsVoid: newNimNode(nnkBracketExpr, prc). @@ -406,7 +421,8 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = castFutureSym, baseType, if baseTypeIsVoid: procBody # shortcut for non-generic `void` else: newCall(setResultSym, procBody), - raises + raises, + handleException ) closureBody = newStmtList(resultDecl, setResultDecl, completeDecl) diff --git a/chronos/internal/errors.nim b/chronos/internal/errors.nim index 083f7a2c3..8e6443ebb 100644 --- a/chronos/internal/errors.nim +++ b/chronos/internal/errors.nim @@ -3,3 +3,7 @@ type ## Generic async exception AsyncTimeoutError* = object of AsyncError ## Timeout exception + + AsyncExceptionError* = object of AsyncError + ## Error raised in `handleException` mode - the original exception is + ## available from the `parent` field. diff --git a/tests/testmacro.nim b/tests/testmacro.nim index c9b45dd87..13611934a 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -533,3 +533,14 @@ suite "Exceptions tracking": expect(Defect): f.fail((ref CatchableError)(), warn = false) check: not f.finished() + + test "handleException behavior": + proc raiseException() {. + async: (handleException: true, raises: [AsyncExceptionError]).} = + raise (ref Exception)(msg: "Raising Exception is UB and support for it may change in the future") + + proc callCatchAll() {.async: (raises: []).} = + expect(AsyncExceptionError): + await raiseException() + + waitFor(callCatchAll()) From 53690f4717369d6b5bf2aa3eea97e35b5fe52e0a Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 8 Nov 2023 16:14:33 +0100 Subject: [PATCH 078/146] run tests outside of nim compilation (#463) else we need memory for both compiler and test --- chronos.nimble | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index f9e261700..e2fa99880 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -13,6 +13,8 @@ requires "nim >= 1.6.0", "httputils", "unittest2" +import os + let nimc = getEnv("NIMC", "nim") # Which nim compiler to use let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js) let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler @@ -44,7 +46,8 @@ proc build(args, path: string) = exec nimc & " " & lang & " " & cfg & " " & flags & " " & args & " " & path proc run(args, path: string) = - build args & " -r", path + build args, path + exec "build/" & path.splitPath[1] task test, "Run all tests": for args in testArguments: From c252ce68d8e36a705f1f72b21c195dbf4ebcb176 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 8 Nov 2023 16:15:11 +0100 Subject: [PATCH 079/146] verbose test output on actions rerun (#462) --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b7aa0fa14..2d2cace9b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,6 +52,12 @@ jobs: - name: Checkout uses: actions/checkout@v3 + - name: Enable debug verbosity + if: runner.debug == '1' + run: | + echo "V=1" >> $GITHUB_ENV + echo "UNITTEST2_OUTPUT_LVL=VERBOSE" >> $GITHUB_ENV + - name: Install build dependencies (Linux i386) if: runner.os == 'Linux' && matrix.target.cpu == 'i386' run: | From 9759f01016c5d6b511c6eae3bf8376cc456fc0de Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 8 Nov 2023 21:20:24 +0100 Subject: [PATCH 080/146] doc generation fixes (#464) * doc generation fixes * fix --- .github/workflows/doc.yml | 6 ++--- .gitignore | 1 + chronos/config.nim | 4 ++- chronos/internal/asyncengine.nim | 42 +++++++++++++++++++++++++------- chronos/osdefs.nim | 2 ++ chronos/selectors2.nim | 17 ++++++------- chronos/transports/common.nim | 2 +- 7 files changed, 51 insertions(+), 23 deletions(-) diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 1668eb037..6e1510aca 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -15,13 +15,13 @@ jobs: continue-on-error: true steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: true - uses: jiro4989/setup-nim-action@v1 with: - nim-version: '1.6.6' + nim-version: '1.6.16' - name: Generate doc run: | @@ -35,7 +35,7 @@ jobs: ls docs - name: Clone the gh-pages branch - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: status-im/nim-chronos ref: gh-pages diff --git a/.gitignore b/.gitignore index c63155181..b59953684 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ nimble.develop nimble.paths /build/ nimbledeps +/docs diff --git a/chronos/config.nim b/chronos/config.nim index 6af3e31bb..4055361f3 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -63,7 +63,9 @@ const ## Initial size of Selector[T]'s array of file descriptors. chronosEventEngine* {.strdefine.}: string = - when defined(linux) and not(defined(android) or defined(emscripten)): + when defined(nimdoc): + "" + elif defined(linux) and not(defined(android) or defined(emscripten)): "epoll" elif defined(macosx) or defined(macos) or defined(ios) or defined(freebsd) or defined(netbsd) or defined(openbsd) or diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim index 23d7c6add..0a15799fd 100644 --- a/chronos/internal/asyncengine.nim +++ b/chronos/internal/asyncengine.nim @@ -169,7 +169,36 @@ func toException*(v: OSErrorCode): ref OSError = newOSError(v) # This helper will allow to use `tryGet()` and raise OSError for # Result[T, OSErrorCode] values. -when defined(windows): +when defined(nimdoc): + type + PDispatcher* = ref object of PDispatcherBase + AsyncFD* = distinct cint + + var gDisp {.threadvar.}: PDispatcher + + proc newDispatcher*(): PDispatcher = discard + proc poll*() = discard + ## Perform single asynchronous step, processing timers and completing + ## tasks. Blocks until at least one event has completed. + ## + ## Exceptions raised during `async` task exection are stored as outcome + ## in the corresponding `Future` - `poll` itself does not raise. + + proc register2*(fd: AsyncFD): Result[void, OSErrorCode] = discard + proc unregister2*(fd: AsyncFD): Result[void, OSErrorCode] = discard + proc addReader2*(fd: AsyncFD, cb: CallbackFunc, + udata: pointer = nil): Result[void, OSErrorCode] = discard + proc removeReader2*(fd: AsyncFD): Result[void, OSErrorCode] = discard + proc addWriter2*(fd: AsyncFD, cb: CallbackFunc, + udata: pointer = nil): Result[void, OSErrorCode] = discard + proc removeWriter2*(fd: AsyncFD): Result[void, OSErrorCode] = discard + proc closeHandle*(fd: AsyncFD, aftercb: CallbackFunc = nil) = discard + proc closeSocket*(fd: AsyncFD, aftercb: CallbackFunc = nil) = discard + proc unregisterAndCloseFd*(fd: AsyncFD): Result[void, OSErrorCode] = discard + + proc `==`*(x: AsyncFD, y: AsyncFD): bool {.borrow, gcsafe.} + +elif defined(windows): {.pragma: stdcallbackFunc, stdcall, gcsafe, raises: [].} export SIGINT, SIGQUIT, SIGTERM @@ -551,12 +580,6 @@ when defined(windows): raise newException(ValueError, osErrorMsg(res.error())) proc poll*() = - ## Perform single asynchronous step, processing timers and completing - ## tasks. Blocks until at least one event has completed. - ## - ## Exceptions raised here indicate that waiting for tasks to be unblocked - ## failed - exceptions from within tasks are instead propagated through - ## their respective futures and not allowed to interrrupt the poll call. let loop = getThreadDispatcher() var curTime = Moment.now() @@ -1241,5 +1264,6 @@ when chronosFutureTracking: ## completed, cancelled or failed). futureList.count -# Perform global per-module initialization. -globalInit() +when not defined(nimdoc): + # Perform global per-module initialization. + globalInit() diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index ab0772112..40a6365ad 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -1526,6 +1526,8 @@ when defined(posix): INVALID_HANDLE_VALUE* = cint(-1) proc `==`*(x: SocketHandle, y: int): bool = int(x) == y +when defined(nimdoc): + proc `==`*(x: SocketHandle, y: SocketHandle): bool {.borrow.} when defined(macosx) or defined(macos) or defined(bsd): const diff --git a/chronos/selectors2.nim b/chronos/selectors2.nim index c5918fdf5..5cb8a570f 100644 --- a/chronos/selectors2.nim +++ b/chronos/selectors2.nim @@ -36,7 +36,6 @@ import config, osdefs, osutils, oserrno export results, oserrno when defined(nimdoc): - type Selector*[T] = ref object ## An object which holds descriptors to be checked for read/write status @@ -306,11 +305,11 @@ else: doAssert((timeout >= min) and (timeout <= max), "Cannot select with incorrect timeout value, got " & $timeout) -when chronosEventEngine == "epoll": - include ./ioselects/ioselectors_epoll -elif chronosEventEngine == "kqueue": - include ./ioselects/ioselectors_kqueue -elif chronosEventEngine == "poll": - include ./ioselects/ioselectors_poll -else: - {.fatal: "Event engine `" & chronosEventEngine & "` is not supported!".} + when chronosEventEngine == "epoll": + include ./ioselects/ioselectors_epoll + elif chronosEventEngine == "kqueue": + include ./ioselects/ioselectors_kqueue + elif chronosEventEngine == "poll": + include ./ioselects/ioselectors_poll + else: + {.fatal: "Event engine `" & chronosEventEngine & "` is not supported!".} diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index b7776e535..d8263af2e 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -199,7 +199,7 @@ proc `$`*(address: TransportAddress): string = "None" proc toHex*(address: TransportAddress): string = - ## Returns hexadecimal representation of ``address`. + ## Returns hexadecimal representation of ``address``. case address.family of AddressFamily.IPv4: "0x" & address.address_v4.toHex() From 9896316599290a820289329f254c914303f7251b Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 9 Nov 2023 18:01:43 +0200 Subject: [PATCH 081/146] Remove deprecated AsyncEventBus. (#461) * Remove deprecated AsyncEventBus. Change number of tests for ThreadSignal. * Recover 1000 tests count. --- chronos/asyncsync.nim | 228 ------------------------------------------ 1 file changed, 228 deletions(-) diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index 0feb51e17..fa23471a5 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -62,50 +62,6 @@ type AsyncLockError* = object of AsyncError ## ``AsyncLock`` is either locked or unlocked. - EventBusSubscription*[T] = proc(bus: AsyncEventBus, - payload: EventPayload[T]): Future[void] {. - gcsafe, raises: [].} - ## EventBus subscription callback type. - - EventBusAllSubscription* = proc(bus: AsyncEventBus, - event: AwaitableEvent): Future[void] {. - gcsafe, raises: [].} - ## EventBus subscription callback type. - - EventBusCallback = proc(bus: AsyncEventBus, event: string, key: EventBusKey, - data: EventPayloadBase) {. - gcsafe, raises: [].} - - EventBusKey* = object - ## Unique subscription key. - eventName: string - typeName: string - unique: uint64 - cb: EventBusCallback - - EventItem = object - waiters: seq[FutureBase] - subscribers: seq[EventBusKey] - - AsyncEventBus* = ref object of RootObj - ## An eventbus object. - counter: uint64 - events: Table[string, EventItem] - subscribers: seq[EventBusKey] - waiters: seq[Future[AwaitableEvent]] - - EventPayloadBase* = ref object of RootObj - loc: ptr SrcLoc - - EventPayload*[T] = ref object of EventPayloadBase - ## Eventbus' event payload object - value: T - - AwaitableEvent* = object - ## Eventbus' event payload object - eventName: string - payload: EventPayloadBase - AsyncEventQueueFullError* = object of AsyncError EventQueueKey* = distinct uint64 @@ -471,190 +427,6 @@ proc `$`*[T](aq: AsyncQueue[T]): string = res.add("]") res -template generateKey(typeName, eventName: string): string = - "type[" & typeName & "]-key[" & eventName & "]" - -proc newAsyncEventBus*(): AsyncEventBus {. - deprecated: "Implementation has unfixable flaws, please use" & - "AsyncEventQueue[T] instead".} = - ## Creates new ``AsyncEventBus``. - AsyncEventBus(counter: 0'u64, events: initTable[string, EventItem]()) - -template get*[T](payload: EventPayload[T]): T = - ## Returns event payload data. - payload.value - -template location*(payload: EventPayloadBase): SrcLoc = - ## Returns source location address of event emitter. - payload.loc[] - -proc get*(event: AwaitableEvent, T: typedesc): T {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue[T] instead".} = - ## Returns event's payload of type ``T`` from event ``event``. - cast[EventPayload[T]](event.payload).value - -template event*(event: AwaitableEvent): string = - ## Returns event's name from event ``event``. - event.eventName - -template location*(event: AwaitableEvent): SrcLoc = - ## Returns source location address of event emitter. - event.payload.loc[] - -proc waitEvent*(bus: AsyncEventBus, T: typedesc, event: string): Future[T] {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue[T] instead".} = - ## Wait for the event from AsyncEventBus ``bus`` with name ``event``. - ## - ## Returned ``Future[T]`` will hold event's payload of type ``T``. - var default: EventItem - var retFuture = newFuture[T]("AsyncEventBus.waitEvent") - let eventKey = generateKey(T.name, event) - proc cancellation(udata: pointer) {.gcsafe, raises: [].} = - if not(retFuture.finished()): - bus.events.withValue(eventKey, item): - item.waiters.keepItIf(it != cast[FutureBase](retFuture)) - retFuture.cancelCallback = cancellation - let baseFuture = cast[FutureBase](retFuture) - bus.events.mgetOrPut(eventKey, default).waiters.add(baseFuture) - retFuture - -proc waitAllEvents*(bus: AsyncEventBus): Future[AwaitableEvent] {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue[T] instead".} = - ## Wait for any event from AsyncEventBus ``bus``. - ## - ## Returns ``Future`` which holds helper object. Using this object you can - ## retrieve event's name and payload. - var retFuture = newFuture[AwaitableEvent]("AsyncEventBus.waitAllEvents") - proc cancellation(udata: pointer) {.gcsafe, raises: [].} = - if not(retFuture.finished()): - bus.waiters.keepItIf(it != retFuture) - retFuture.cancelCallback = cancellation - bus.waiters.add(retFuture) - retFuture - -proc subscribe*[T](bus: AsyncEventBus, event: string, - callback: EventBusSubscription[T]): EventBusKey {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue[T] instead".} = - ## Subscribe to the event ``event`` passed through eventbus ``bus`` with - ## callback ``callback``. - ## - ## Returns key that can be used to unsubscribe. - proc trampoline(tbus: AsyncEventBus, event: string, key: EventBusKey, - data: EventPayloadBase) {.gcsafe, raises: [].} = - let payload = cast[EventPayload[T]](data) - asyncSpawn callback(bus, payload) - - let subkey = - block: - inc(bus.counter) - EventBusKey(eventName: event, typeName: T.name, unique: bus.counter, - cb: trampoline) - - var default: EventItem - let eventKey = generateKey(T.name, event) - bus.events.mgetOrPut(eventKey, default).subscribers.add(subkey) - subkey - -proc subscribeAll*(bus: AsyncEventBus, - callback: EventBusAllSubscription): EventBusKey {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue instead".} = - ## Subscribe to all events passed through eventbus ``bus`` with callback - ## ``callback``. - ## - ## Returns key that can be used to unsubscribe. - proc trampoline(tbus: AsyncEventBus, event: string, key: EventBusKey, - data: EventPayloadBase) {.gcsafe, raises: [].} = - let event = AwaitableEvent(eventName: event, payload: data) - asyncSpawn callback(bus, event) - - let subkey = - block: - inc(bus.counter) - EventBusKey(eventName: "", typeName: "", unique: bus.counter, - cb: trampoline) - bus.subscribers.add(subkey) - subkey - -proc unsubscribe*(bus: AsyncEventBus, key: EventBusKey) {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue instead".} = - ## Cancel subscription of subscriber with key ``key`` from eventbus ``bus``. - let eventKey = generateKey(key.typeName, key.eventName) - - # Clean event's subscribers. - bus.events.withValue(eventKey, item): - item.subscribers.keepItIf(it.unique != key.unique) - - # Clean subscribers subscribed to all events. - bus.subscribers.keepItIf(it.unique != key.unique) - -proc emit[T](bus: AsyncEventBus, event: string, data: T, loc: ptr SrcLoc) = - let - eventKey = generateKey(T.name, event) - payload = - block: - var data = EventPayload[T](value: data, loc: loc) - cast[EventPayloadBase](data) - - # Used to capture the "subscriber" variable in the loops - # sugar.capture doesn't work in Nim <1.6 - proc triggerSubscriberCallback(subscriber: EventBusKey) = - callSoon(proc(udata: pointer) = - subscriber.cb(bus, event, subscriber, payload) - ) - - bus.events.withValue(eventKey, item): - # Schedule waiters which are waiting for the event ``event``. - for waiter in item.waiters: - var fut = cast[Future[T]](waiter) - fut.complete(data) - # Clear all the waiters. - item.waiters.setLen(0) - - # Schedule subscriber's callbacks, which are subscribed to the event. - for subscriber in item.subscribers: - triggerSubscriberCallback(subscriber) - - # Schedule waiters which are waiting all events - for waiter in bus.waiters: - waiter.complete(AwaitableEvent(eventName: event, payload: payload)) - # Clear all the waiters. - bus.waiters.setLen(0) - - # Schedule subscriber's callbacks which are subscribed to all events. - for subscriber in bus.subscribers: - triggerSubscriberCallback(subscriber) - -template emit*[T](bus: AsyncEventBus, event: string, data: T) {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue instead".} = - ## Emit new event ``event`` to the eventbus ``bus`` with payload ``data``. - emit(bus, event, data, getSrcLocation()) - -proc emitWait[T](bus: AsyncEventBus, event: string, data: T, - loc: ptr SrcLoc): Future[void] = - var retFuture = newFuture[void]("AsyncEventBus.emitWait") - proc continuation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - retFuture.complete() - emit(bus, event, data, loc) - callSoon(continuation) - return retFuture - -template emitWait*[T](bus: AsyncEventBus, event: string, - data: T): Future[void] {. - deprecated: "Implementation has unfixable flaws, please use " & - "AsyncEventQueue instead".} = - ## Emit new event ``event`` to the eventbus ``bus`` with payload ``data`` and - ## wait until all the subscribers/waiters will receive notification about - ## event. - emitWait(bus, event, data, getSrcLocation()) - proc `==`(a, b: EventQueueKey): bool {.borrow.} proc compact(ab: AsyncEventQueue) {.raises: [].} = From 8156e2997a12006b70e84a96f1c81b225eb04b93 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 10 Nov 2023 08:42:36 +0200 Subject: [PATCH 082/146] Fix not enough memory on i386. (#467) * Fix waitFor() should not exit earlier last callback will be scheduled. * Tune tests to use less memory. * Fix `testutils`. There is no more last poll() needed. * Update chronos/internal/asyncfutures.nim --------- Co-authored-by: Jacek Sieka --- chronos/internal/asyncfutures.nim | 10 +++++++--- tests/testfut.nim | 14 ++++++-------- tests/testhttpclient.nim | 4 ++-- tests/testproc.bat | 2 +- tests/testproc.nim | 4 ++-- tests/testproc.sh | 2 +- tests/testutils.nim | 5 ----- 7 files changed, 19 insertions(+), 22 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index b144cea78..c4a737477 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -560,9 +560,13 @@ proc waitFor*[T](fut: Future[T]): T {.raises: [CatchableError].} = ## **Blocks** the current thread until the specified future finishes and ## reads it, potentially raising an exception if the future failed or was ## cancelled. - while not(fut.finished()): - poll() - + var finished = false + # Ensure that callbacks currently scheduled on the future run before returning + proc continuation(udata: pointer) {.gcsafe.} = finished = true + if not(fut.finished()): + fut.addCallback(continuation) + while not(finished): + poll() fut.read() proc asyncSpawn*(future: Future[void]) = diff --git a/tests/testfut.nim b/tests/testfut.nim index fc9d48288..1297dc454 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -54,7 +54,6 @@ suite "Future[T] behavior test suite": fut.addCallback proc(udata: pointer) = testResult &= "5" discard waitFor(fut) - poll() check: fut.finished @@ -80,7 +79,6 @@ suite "Future[T] behavior test suite": fut.addCallback cb5 fut.removeCallback cb3 discard waitFor(fut) - poll() check: fut.finished testResult == "1245" @@ -1260,12 +1258,12 @@ suite "Future[T] behavior test suite": (loc.procedure == procedure) check: - chk(loc10, "testfut.nim", 1227, "macroFuture") - chk(loc11, "testfut.nim", 1230, "") - chk(loc20, "testfut.nim", 1239, "template") - chk(loc21, "testfut.nim", 1242, "") - chk(loc30, "testfut.nim", 1236, "procedure") - chk(loc31, "testfut.nim", 1243, "") + chk(loc10, "testfut.nim", 1225, "macroFuture") + chk(loc11, "testfut.nim", 1228, "") + chk(loc20, "testfut.nim", 1237, "template") + chk(loc21, "testfut.nim", 1240, "") + chk(loc30, "testfut.nim", 1234, "procedure") + chk(loc31, "testfut.nim", 1241, "") asyncTest "withTimeout(fut) should wait cancellation test": proc futureNeverEnds(): Future[void] = diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index e10892ebc..f08b3c5b2 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -187,11 +187,11 @@ suite "HTTP client testing suite": let ResponseTests = [ (MethodGet, "/test/short_size_response", 65600, 1024, "SHORTSIZERESPONSE"), - (MethodGet, "/test/long_size_response", 262400, 1024, + (MethodGet, "/test/long_size_response", 131200, 1024, "LONGSIZERESPONSE"), (MethodGet, "/test/short_chunked_response", 65600, 1024, "SHORTCHUNKRESPONSE"), - (MethodGet, "/test/long_chunked_response", 262400, 1024, + (MethodGet, "/test/long_chunked_response", 131200, 1024, "LONGCHUNKRESPONSE") ] proc process(r: RequestFence): Future[HttpResponseRef] {. diff --git a/tests/testproc.bat b/tests/testproc.bat index 11b4047ee..058403954 100644 --- a/tests/testproc.bat +++ b/tests/testproc.bat @@ -34,7 +34,7 @@ ping -n 10 127.0.0.1 > NUL EXIT 0 :BIGDATA -FOR /L %%G IN (1, 1, 400000) DO ECHO ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO +FOR /L %%G IN (1, 1, 100000) DO ECHO ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO EXIT 0 :ENVTEST diff --git a/tests/testproc.nim b/tests/testproc.nim index 288ec181d..588e30877 100644 --- a/tests/testproc.nim +++ b/tests/testproc.nim @@ -214,9 +214,9 @@ suite "Asynchronous process management test suite": "tests/testproc.sh bigdata" let expect = when defined(windows): - 400_000 * (64 + 2) + 100_000 * (64 + 2) else: - 400_000 * (64 + 1) + 100_000 * (64 + 1) let process = await startProcess(command, options = options, stdoutHandle = AsyncProcess.Pipe, stderrHandle = AsyncProcess.Pipe) diff --git a/tests/testproc.sh b/tests/testproc.sh index c5e7e0ac2..e525da59b 100755 --- a/tests/testproc.sh +++ b/tests/testproc.sh @@ -12,7 +12,7 @@ elif [ "$1" == "timeout2" ]; then elif [ "$1" == "timeout10" ]; then sleep 10 elif [ "$1" == "bigdata" ]; then - for i in {1..400000} + for i in {1..100000} do echo "ALICEWASBEGINNINGTOGETVERYTIREDOFSITTINGBYHERSISTERONTHEBANKANDO" done diff --git a/tests/testutils.nim b/tests/testutils.nim index fb5de50e9..49072ebe4 100644 --- a/tests/testutils.nim +++ b/tests/testutils.nim @@ -75,11 +75,6 @@ suite "Asynchronous utilities test suite": pendingFuturesCount() == 2'u waitFor fut - check: - getCount() == 1'u - pendingFuturesCount() == 1'u - - poll() check: getCount() == 0'u pendingFuturesCount() == 0'u From f0eb7a0ae9ef02fa9afdbb80ff4f0f4f42fe4dcc Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 13 Nov 2023 10:54:37 +0100 Subject: [PATCH 083/146] simplify tests (#469) * simplify tests `chronosPreviewV4` is obsolete * oops --- chronos.nimble | 10 +++------- nim.cfg | 1 + 2 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 nim.cfg diff --git a/chronos.nimble b/chronos.nimble index e2fa99880..667d1daa0 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -23,24 +23,20 @@ let testArguments = when defined(windows): [ "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", - "-d:debug -d:chronosPreviewV4", "-d:release", - "-d:release -d:chronosPreviewV4" ] else: [ "-d:debug -d:chronosDebug -d:useSysAssert -d:useGcAssert", - "-d:debug -d:chronosPreviewV4", "-d:debug -d:chronosDebug -d:chronosEventEngine=poll -d:useSysAssert -d:useGcAssert", "-d:release", - "-d:release -d:chronosPreviewV4" ] -let styleCheckStyle = if (NimMajor, NimMinor) < (1, 6): "hint" else: "error" let cfg = - " --styleCheck:usages --styleCheck:" & styleCheckStyle & + " --styleCheck:usages --styleCheck:error" & (if verbose: "" else: " --verbosity:0 --hints:off") & - " --skipParentCfg --skipUserCfg --outdir:build --nimcache:build/nimcache -f" + " --skipParentCfg --skipUserCfg --outdir:build " & + quoteShell("--nimcache:build/nimcache/$projectName") proc build(args, path: string) = exec nimc & " " & lang & " " & cfg & " " & flags & " " & args & " " & path diff --git a/nim.cfg b/nim.cfg new file mode 100644 index 000000000..45d538b89 --- /dev/null +++ b/nim.cfg @@ -0,0 +1 @@ +nimcache = "build/nimcache/$projectName" From 0d55475c29f232b849c9d9456bcb21287b046cda Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 13 Nov 2023 10:56:19 +0100 Subject: [PATCH 084/146] `stew/results` -> `results` (#468) --- chronos.nimble | 1 + chronos/apps/http/httpclient.nim | 4 ++-- chronos/apps/http/httpcommon.nim | 2 +- chronos/apps/http/httpdebug.nim | 2 +- chronos/apps/http/httpserver.nim | 4 ++-- chronos/apps/http/multipart.nim | 4 ++-- chronos/asyncproc.nim | 2 +- chronos/handles.nim | 2 +- chronos/internal/asyncengine.nim | 2 +- chronos/osutils.nim | 4 ++-- chronos/selectors2.nim | 4 ++-- chronos/streams/boundstream.nim | 2 +- chronos/streams/chunkstream.nim | 2 +- chronos/threadsync.nim | 2 +- 14 files changed, 19 insertions(+), 18 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index 667d1daa0..b2fdb3ae6 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -8,6 +8,7 @@ license = "MIT or Apache License 2.0" skipDirs = @["tests"] requires "nim >= 1.6.0", + "results", "stew", "bearssl", "httputils", diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 34089c70d..83d1ddfc7 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -7,13 +7,13 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[uri, tables, sequtils] -import stew/[results, base10, base64, byteutils], httputils +import stew/[base10, base64, byteutils], httputils, results import ../../asyncloop, ../../asyncsync import ../../streams/[asyncstream, tlsstream, chunkstream, boundstream] import httptable, httpcommon, httpagent, httpbodyrw, multipart export results, asyncloop, asyncsync, asyncstream, tlsstream, chunkstream, boundstream, httptable, httpcommon, httpagent, httpbodyrw, multipart, - httputils, uri + httputils, uri, results export SocketFlags const diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index c01c1c3cf..da5e03f61 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -7,7 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[strutils, uri] -import stew/results, httputils +import results, httputils import ../../asyncloop, ../../asyncsync import ../../streams/[asyncstream, boundstream] export asyncloop, asyncsync, results, httputils, strutils diff --git a/chronos/apps/http/httpdebug.nim b/chronos/apps/http/httpdebug.nim index a1dc02287..d343265ba 100644 --- a/chronos/apps/http/httpdebug.nim +++ b/chronos/apps/http/httpdebug.nim @@ -7,7 +7,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/tables -import stew/results +import results import ../../timer import httpserver, shttpserver from httpclient import HttpClientScheme diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 2ab53178d..1e307a07e 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -7,10 +7,10 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[tables, uri, strutils] -import stew/[results, base10], httputils +import stew/[base10], httputils, results import ../../asyncloop, ../../asyncsync import ../../streams/[asyncstream, boundstream, chunkstream] -import httptable, httpcommon, multipart +import "."/[httptable, httpcommon, multipart] export asyncloop, asyncsync, httptable, httpcommon, httputils, multipart, asyncstream, boundstream, chunkstream, uri, tables, results diff --git a/chronos/apps/http/multipart.nim b/chronos/apps/http/multipart.nim index 45506a2a1..b936996bf 100644 --- a/chronos/apps/http/multipart.nim +++ b/chronos/apps/http/multipart.nim @@ -8,10 +8,10 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import std/[monotimes, strutils] -import stew/results, httputils +import results, httputils import ../../asyncloop import ../../streams/[asyncstream, boundstream, chunkstream] -import httptable, httpcommon, httpbodyrw +import "."/[httptable, httpcommon, httpbodyrw] export asyncloop, httptable, httpcommon, httpbodyrw, asyncstream, httputils const diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index 3e2df88c6..8615c570a 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -13,7 +13,7 @@ import std/strtabs import "."/[config, asyncloop, handles, osdefs, osutils, oserrno], streams/asyncstream -import stew/[results, byteutils] +import stew/[byteutils], results from std/os import quoteShell, quoteShellWindows, quoteShellPosix, envPairs export strtabs, results diff --git a/chronos/handles.nim b/chronos/handles.nim index afa57fb27..72b0751e8 100644 --- a/chronos/handles.nim +++ b/chronos/handles.nim @@ -10,7 +10,7 @@ {.push raises: [].} import "."/[asyncloop, osdefs, osutils] -import stew/results +import results from nativesockets import Domain, Protocol, SockType, toInt export Domain, Protocol, SockType, results diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim index 0a15799fd..d4e803cf4 100644 --- a/chronos/internal/asyncengine.nim +++ b/chronos/internal/asyncengine.nim @@ -12,7 +12,7 @@ from nativesockets import Port import std/[tables, heapqueue, deques] -import stew/results +import results import ".."/[config, futures, osdefs, oserrno, osutils, timer] import ./[asyncmacro, errors] diff --git a/chronos/osutils.nim b/chronos/osutils.nim index f9c09f257..d93c2619c 100644 --- a/chronos/osutils.nim +++ b/chronos/osutils.nim @@ -6,8 +6,8 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) -import stew/results -import osdefs, oserrno +import results +import "."/[osdefs, oserrno] export results diff --git a/chronos/selectors2.nim b/chronos/selectors2.nim index 5cb8a570f..db8791a59 100644 --- a/chronos/selectors2.nim +++ b/chronos/selectors2.nim @@ -31,8 +31,8 @@ # support - changes could potentially be backported to nim but are not # backwards-compatible. -import stew/results -import config, osdefs, osutils, oserrno +import results +import "."/[config, osdefs, osutils, oserrno] export results, oserrno when defined(nimdoc): diff --git a/chronos/streams/boundstream.nim b/chronos/streams/boundstream.nim index 73321ebf7..dbb36ef09 100644 --- a/chronos/streams/boundstream.nim +++ b/chronos/streams/boundstream.nim @@ -14,7 +14,7 @@ ## ## For stream writing it means that you should write exactly bounded size ## of bytes. -import stew/results +import results import ../asyncloop, ../timer import asyncstream, ../transports/stream, ../transports/common export asyncloop, asyncstream, stream, timer, common diff --git a/chronos/streams/chunkstream.nim b/chronos/streams/chunkstream.nim index 729d8de08..c0269a2ae 100644 --- a/chronos/streams/chunkstream.nim +++ b/chronos/streams/chunkstream.nim @@ -10,7 +10,7 @@ ## This module implements HTTP/1.1 chunked-encoded stream reading and writing. import ../asyncloop, ../timer import asyncstream, ../transports/stream, ../transports/common -import stew/results +import results export asyncloop, asyncstream, stream, timer, common, results const diff --git a/chronos/threadsync.nim b/chronos/threadsync.nim index d41418121..bbff18bd1 100644 --- a/chronos/threadsync.nim +++ b/chronos/threadsync.nim @@ -8,7 +8,7 @@ # MIT license (LICENSE-MIT) ## This module implements some core async thread synchronization primitives. -import stew/results +import results import "."/[timer, asyncloop] export results From 9c93ab48deb0a0c5179b941cc036f373eb30ed6e Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Mon, 13 Nov 2023 13:14:21 +0200 Subject: [PATCH 085/146] Attempt to fix CI crash at Windows. (#465) * Attempt to fix CI crash at Windows. Remove all cast[string] and cast[seq[byte]] from the codebase. * Address review comments. --- chronos/internal/asyncengine.nim | 20 +++++++----- tests/testasyncstream.nim | 55 ++++++++++++++++---------------- tests/testdatagram.nim | 9 +++--- tests/testhttpclient.nim | 28 ++++++++-------- 4 files changed, 59 insertions(+), 53 deletions(-) diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim index d4e803cf4..578bfdf96 100644 --- a/chronos/internal/asyncengine.nim +++ b/chronos/internal/asyncengine.nim @@ -1125,14 +1125,18 @@ proc addTimer*(at: uint64, cb: CallbackFunc, udata: pointer = nil) {. proc removeTimer*(at: Moment, cb: CallbackFunc, udata: pointer = nil) = ## Remove timer callback ``cb`` with absolute timestamp ``at`` from waiting ## queue. - let loop = getThreadDispatcher() - var list = cast[seq[TimerCallback]](loop.timers) - var index = -1 - for i in 0.. Date: Wed, 15 Nov 2023 09:06:37 +0100 Subject: [PATCH 086/146] move docs to docs (#466) * introduce user guide based on `mdbook` * set up structure for adding simple `chronos` usage examples * move most readme content to book * ci deploys book and api guide automatically * remove most of existing engine docs (obsolete) --- .appveyor.yml | 40 --- .github/workflows/ci.yml | 1 + .github/workflows/doc.yml | 56 ++-- .gitignore | 1 - .travis.yml | 27 -- README.md | 449 ++---------------------------- chronos.nim | 5 + chronos.nimble | 13 +- chronos/asyncloop.nim | 118 -------- chronos/internal/asyncengine.nim | 4 + docs/.gitignore | 1 + docs/book.toml | 20 ++ docs/examples/cancellation.nim | 21 ++ docs/examples/discards.nim | 28 ++ docs/examples/httpget.nim | 15 + docs/examples/nim.cfg | 1 + docs/examples/timeoutcomposed.nim | 25 ++ docs/examples/timeoutsimple.nim | 20 ++ docs/examples/twogets.nim | 24 ++ docs/open-in.css | 7 + docs/src/SUMMARY.md | 10 + docs/src/async_procs.md | 112 ++++++++ docs/src/concepts.md | 126 +++++++++ docs/src/error_handling.md | 134 +++++++++ docs/src/getting_started.md | 19 ++ docs/src/introduction.md | 32 +++ docs/src/porting.md | 54 ++++ docs/src/tips.md | 34 +++ docs/theme/highlight.js | 53 ++++ 29 files changed, 809 insertions(+), 641 deletions(-) delete mode 100644 .appveyor.yml delete mode 100644 .travis.yml create mode 100644 docs/.gitignore create mode 100644 docs/book.toml create mode 100644 docs/examples/cancellation.nim create mode 100644 docs/examples/discards.nim create mode 100644 docs/examples/httpget.nim create mode 100644 docs/examples/nim.cfg create mode 100644 docs/examples/timeoutcomposed.nim create mode 100644 docs/examples/timeoutsimple.nim create mode 100644 docs/examples/twogets.nim create mode 100644 docs/open-in.css create mode 100644 docs/src/SUMMARY.md create mode 100644 docs/src/async_procs.md create mode 100644 docs/src/concepts.md create mode 100644 docs/src/error_handling.md create mode 100644 docs/src/getting_started.md create mode 100644 docs/src/introduction.md create mode 100644 docs/src/porting.md create mode 100644 docs/src/tips.md create mode 100644 docs/theme/highlight.js diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 768c2619a..000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: '{build}' - -image: Visual Studio 2015 - -cache: -- NimBinaries - -matrix: - # We always want 32 and 64-bit compilation - fast_finish: false - -platform: - - x86 - - x64 - -# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X" -clone_depth: 10 - -install: - # use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin - - IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH% - - IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH% - - # build nim from our own branch - this to avoid the day-to-day churn and - # regressions of the fast-paced Nim development while maintaining the - # flexibility to apply patches - - curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh - - env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries - - SET PATH=%CD%\Nim\bin;%PATH% - -build_script: - - cd C:\projects\%APPVEYOR_PROJECT_SLUG% - - nimble install -y --depsOnly - - nimble install -y libbacktrace - -test_script: - - nimble test - -deploy: off - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2d2cace9b..e64f75439 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -165,3 +165,4 @@ jobs: nimble install -y libbacktrace nimble test nimble test_libbacktrace + nimble examples diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 6e1510aca..dc718f8de 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -18,6 +18,26 @@ jobs: uses: actions/checkout@v3 with: submodules: true + - uses: actions-rs/install@v0.1 + with: + crate: mdbook + use-tool-cache: true + version: "0.4.35" + - uses: actions-rs/install@v0.1 + with: + crate: mdbook-toc + use-tool-cache: true + version: "0.14.1" + - uses: actions-rs/install@v0.1 + with: + crate: mdbook-open-on-gh + use-tool-cache: true + version: "2.4.1" + - uses: actions-rs/install@v0.1 + with: + crate: mdbook-admonish + use-tool-cache: true + version: "1.13.1" - uses: jiro4989/setup-nim-action@v1 with: @@ -28,35 +48,11 @@ jobs: nim --version nimble --version nimble install -dy - # nim doc can "fail", but the doc is still generated - nim doc --git.url:https://github.com/status-im/nim-chronos --git.commit:master --outdir:docs --project chronos || true - - # check that the folder exists - ls docs + nimble docs || true - - name: Clone the gh-pages branch - uses: actions/checkout@v3 + - name: Deploy + uses: peaceiris/actions-gh-pages@v3 with: - repository: status-im/nim-chronos - ref: gh-pages - path: subdoc - submodules: true - fetch-depth: 0 - - - name: Commit & push - run: | - cd subdoc - - # Update / create this branch doc - rm -rf docs - mv ../docs . - - # Remove .idx files - # NOTE: git also uses idx files in his - # internal folder, hence the `*` instead of `.` - find * -name "*.idx" -delete - git add . - git config --global user.email "${{ github.actor }}@users.noreply.github.com" - git config --global user.name = "${{ github.actor }}" - git commit -a -m "update docs" - git push origin gh-pages + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./docs/book + force_orphan: true diff --git a/.gitignore b/.gitignore index b59953684..c63155181 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,3 @@ nimble.develop nimble.paths /build/ nimbledeps -/docs diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 1a5bcd3d1..000000000 --- a/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: c - -# https://docs.travis-ci.com/user/caching/ -cache: - directories: - - NimBinaries - -git: - # when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X" - depth: 10 - -os: - - linux - - osx - -install: - # build nim from our own branch - this to avoid the day-to-day churn and - # regressions of the fast-paced Nim development while maintaining the - # flexibility to apply patches - - curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh - - env MAKE="make -j2" bash build_nim.sh Nim csources dist/nimble NimBinaries - - export PATH="$PWD/Nim/bin:$PATH" - -script: - - nimble install -y - - nimble test - diff --git a/README.md b/README.md index c80f8267c..b3a80fe8b 100644 --- a/README.md +++ b/README.md @@ -14,11 +14,11 @@ Chronos is an efficient [async/await](https://en.wikipedia.org/wiki/Async/await) * Synchronization primitivies like queues, events and locks * Cancellation * Efficient dispatch pipeline with excellent multi-platform support -* Exception effect support (see [exception effects](#exception-effects)) +* Exceptional error handling features, including `raises` tracking -## Installation +## Getting started -You can use Nim's official package manager Nimble to install Chronos: +Install `chronos` using `nimble`: ```text nimble install chronos @@ -30,438 +30,43 @@ or add a dependency to your `.nimble` file: requires "chronos" ``` -## Projects using `chronos` - -* [libp2p](https://github.com/status-im/nim-libp2p) - Peer-to-Peer networking stack implemented in many languages -* [presto](https://github.com/status-im/nim-presto) - REST API framework -* [Scorper](https://github.com/bung87/scorper) - Web framework -* [2DeFi](https://github.com/gogolxdong/2DeFi) - Decentralised file system -* [websock](https://github.com/status-im/nim-websock/) - WebSocket library with lots of features - -`chronos` is available in the [Nim Playground](https://play.nim-lang.org/#ix=2TpS) - -Submit a PR to add yours! - -## Documentation - -### Concepts - -Chronos implements the async/await paradigm in a self-contained library using -the macro and closure iterator transformation features provided by Nim. - -The event loop is called a "dispatcher" and a single instance per thread is -created, as soon as one is needed. - -To trigger a dispatcher's processing step, we need to call `poll()` - either -directly or through a wrapper like `runForever()` or `waitFor()`. Each step -handles any file descriptors, timers and callbacks that are ready to be -processed. - -`Future` objects encapsulate the result of an `async` procedure upon successful -completion, and a list of callbacks to be scheduled after any type of -completion - be that success, failure or cancellation. - -(These explicit callbacks are rarely used outside Chronos, being replaced by -implicit ones generated by async procedure execution and `await` chaining.) - -Async procedures (those using the `{.async.}` pragma) return `Future` objects. - -Inside an async procedure, you can `await` the future returned by another async -procedure. At this point, control will be handled to the event loop until that -future is completed. - -Future completion is tested with `Future.finished()` and is defined as success, -failure or cancellation. This means that a future is either pending or completed. - -To differentiate between completion states, we have `Future.failed()` and -`Future.cancelled()`. - -### Dispatcher - -You can run the "dispatcher" event loop forever, with `runForever()` which is defined as: - -```nim -proc runForever*() = - while true: - poll() -``` - -You can also run it until a certain future is completed, with `waitFor()` which -will also call `Future.read()` on it: - -```nim -proc p(): Future[int] {.async.} = - await sleepAsync(100.milliseconds) - return 1 - -echo waitFor p() # prints "1" -``` - -`waitFor()` is defined like this: - -```nim -proc waitFor*[T](fut: Future[T]): T = - while not(fut.finished()): - poll() - return fut.read() -``` - -### Async procedures and methods - -The `{.async.}` pragma will transform a procedure (or a method) returning a -specialised `Future` type into a closure iterator. If there is no return type -specified, a `Future[void]` is returned. - -```nim -proc p() {.async.} = - await sleepAsync(100.milliseconds) - -echo p().type # prints "Future[system.void]" -``` - -Whenever `await` is encountered inside an async procedure, control is passed -back to the dispatcher for as many steps as it's necessary for the awaited -future to complete successfully, fail or be cancelled. `await` calls the -equivalent of `Future.read()` on the completed future and returns the -encapsulated value. - -```nim -proc p1() {.async.} = - await sleepAsync(1.seconds) - -proc p2() {.async.} = - await sleepAsync(1.seconds) - -proc p3() {.async.} = - let - fut1 = p1() - fut2 = p2() - # Just by executing the async procs, both resulting futures entered the - # dispatcher's queue and their "clocks" started ticking. - await fut1 - await fut2 - # Only one second passed while awaiting them both, not two. - -waitFor p3() -``` - -Don't let `await`'s behaviour of giving back control to the dispatcher surprise -you. If an async procedure modifies global state, and you can't predict when it -will start executing, the only way to avoid that state changing underneath your -feet, in a certain section, is to not use `await` in it. - -### Error handling - -Exceptions inheriting from [`CatchableError`](https://nim-lang.org/docs/system.html#CatchableError) -interrupt execution of the `async` procedure. The exception is placed in the -`Future.error` field while changing the status of the `Future` to `Failed` -and callbacks are scheduled. - -When a future is awaited, the exception is re-raised, traversing the `async` -execution chain until handled. - -```nim -proc p1() {.async.} = - await sleepAsync(1.seconds) - raise newException(ValueError, "ValueError inherits from CatchableError") - -proc p2() {.async.} = - await sleepAsync(1.seconds) - -proc p3() {.async.} = - let - fut1 = p1() - fut2 = p2() - await fut1 - echo "unreachable code here" - await fut2 - -# `waitFor()` would call `Future.read()` unconditionally, which would raise the -# exception in `Future.error`. -let fut3 = p3() -while not(fut3.finished()): - poll() - -echo "fut3.state = ", fut3.state # "Failed" -if fut3.failed(): - echo "p3() failed: ", fut3.error.name, ": ", fut3.error.msg - # prints "p3() failed: ValueError: ValueError inherits from CatchableError" -``` - -You can put the `await` in a `try` block, to deal with that exception sooner: - -```nim -proc p3() {.async.} = - let - fut1 = p1() - fut2 = p2() - try: - await fut1 - except CachableError: - echo "p1() failed: ", fut1.error.name, ": ", fut1.error.msg - echo "reachable code here" - await fut2 -``` - -Because `chronos` ensures that all exceptions are re-routed to the `Future`, -`poll` will not itself raise exceptions. - -`poll` may still panic / raise `Defect` if such are raised in user code due to -undefined behavior. - -#### Checked exceptions - -By specifying a `raises` list to an async procedure, you can check which -exceptions can be raised by it: - -```nim -proc p1(): Future[void] {.async: (raises: [IOError]).} = - assert not (compiles do: raise newException(ValueError, "uh-uh")) - raise newException(IOError, "works") # Or any child of IOError - -proc p2(): Future[void] {.async, (raises: [IOError]).} = - await p1() # Works, because await knows that p1 - # can only raise IOError -``` - -Under the hood, the return type of `p1` will be rewritten to an internal type -which will convey raises informations to `await`. - -#### The `Exception` type - -Exceptions deriving from `Exception` are not caught by default as these may -include `Defect` and other forms undefined or uncatchable behavior. - -Because exception effect tracking is turned on for `async` functions, this may -sometimes lead to compile errors around forward declarations, methods and -closures as Nim conservatively asssumes that any `Exception` might be raised -from those. - -Make sure to excplicitly annotate these with `{.raises.}`: - -```nim -# Forward declarations need to explicitly include a raises list: -proc myfunction() {.raises: [ValueError].} - -# ... as do `proc` types -type MyClosure = proc() {.raises: [ValueError].} - -proc myfunction() = - raise (ref ValueError)(msg: "Implementation here") - -let closure: MyClosure = myfunction -``` - -For compatibility, `async` functions can be instructed to handle `Exception` as -well, specifying `handleException: true`. `Exception` that is not a `Defect` and -not a `CatchableError` will then be caught and remapped to -`AsyncExceptionError`: - -```nim -proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} = - raise (ref Exception)(msg: "Raising Exception is UB") - -proc callRaiseException() {.async: (raises: []).} = - try: - raiseException() - except AsyncExceptionError as exc: - # The original Exception is available from the `parent` field - echo exc.parent.msg -``` - -This mode can be enabled globally with `-d:chronosHandleException` as a help -when porting code to `chronos` but should generally be avoided as global -configuration settings may interfere with libraries that use `chronos` leading -to unexpected behavior. - -### Raw functions - -Raw functions are those that interact with `chronos` via the `Future` type but -whose body does not go through the async transformation. - -Such functions are created by adding `raw: true` to the `async` parameters: - -```nim -proc rawAsync(): Future[void] {.async: (raw: true).} = - let future = newFuture[void]("rawAsync") - future.complete() - return future -``` - -Raw functions must not raise exceptions directly - they are implicitly declared -as `raises: []` - instead they should store exceptions in the returned `Future`: - -```nim -proc rawFailure(): Future[void] {.async: (raw: true).} = - let future = newFuture[void]("rawAsync") - future.fail((ref ValueError)(msg: "Oh no!")) - return future -``` - -Raw functions can also use checked exceptions: - -```nim -proc rawAsyncRaises(): Future[void] {.async: (raw: true, raises: [IOError]).} = - let fut = newFuture[void]() - assert not (compiles do: fut.fail((ref ValueError)(msg: "uh-uh"))) - fut.fail((ref IOError)(msg: "IO")) - return fut -``` - -### Callbacks and closures - -Callback/closure types are declared using the `async` annotation as usual: - -```nim -type MyCallback = proc(): Future[void] {.async.} - -proc runCallback(cb: MyCallback) {.async: (raises: []).} = - try: - await cb() - except CatchableError: - discard # handle errors as usual -``` - -When calling a callback, it is important to remember that the given function -may raise and exceptions need to be handled. - -Checked exceptions can be used to limit the exceptions that a callback can -raise: - -```nim -type MyEasyCallback = proc: Future[void] {.async: (raises: []).} - -proc runCallback(cb: MyEasyCallback) {.async: (raises: [])} = - await cb() -``` - -### Platform independence - -Several functions in `chronos` are backed by the operating system, such as -waiting for network events, creating files and sockets etc. The specific -exceptions that are raised by the OS is platform-dependent, thus such functions -are declared as raising `CatchableError` but will in general raise something -more specific. In particular, it's possible that some functions that are -annotated as raising `CatchableError` only raise on _some_ platforms - in order -to work on all platforms, calling code must assume that they will raise even -when they don't seem to do so on one platform. - -### Cancellation support - -Any running `Future` can be cancelled. This can be used for timeouts, -to let a user cancel a running task, to start multiple futures in parallel -and cancel them as soon as one finishes, etc. +and start using it: ```nim import chronos/apps/http/httpclient -proc cancellationExample() {.async.} = - # Simple cancellation - let future = sleepAsync(10.minutes) - future.cancelSoon() - # `cancelSoon` will not wait for the cancellation - # to be finished, so the Future could still be - # pending at this point. - - # Wait for cancellation - let future2 = sleepAsync(10.minutes) - await future2.cancelAndWait() - # Using `cancelAndWait`, we know that future2 isn't - # pending anymore. However, it could have completed - # before cancellation happened (in which case, it - # will hold a value) - - # Race between futures - proc retrievePage(uri: string): Future[string] {.async.} = - let httpSession = HttpSessionRef.new() - try: - let resp = await httpSession.fetch(parseUri(uri)) - return bytesToString(resp.data) - finally: - # be sure to always close the session - # `finally` will run also during cancellation - - # `noCancel` ensures that `closeWait` doesn't get cancelled - await noCancel(httpSession.closeWait()) - - let - futs = - @[ - retrievePage("https://duckduckgo.com/?q=chronos"), - retrievePage("https://www.google.fr/search?q=chronos") - ] - - let finishedFut = await one(futs) - for fut in futs: - if not fut.finished: - fut.cancelSoon() - echo "Result: ", await finishedFut - -waitFor(cancellationExample()) -``` - -Even if cancellation is initiated, it is not guaranteed that -the operation gets cancelled - the future might still be completed -or fail depending on the ordering of events and the specifics of -the operation. - -If the future indeed gets cancelled, `await` will raise a -`CancelledError` as is likely to happen in the following example: -```nim -proc c1 {.async.} = - echo "Before sleep" +proc retrievePage(uri: string): Future[string] {.async.} = + # Create a new HTTP session + let httpSession = HttpSessionRef.new() try: - await sleepAsync(10.minutes) - echo "After sleep" # not reach due to cancellation - except CancelledError as exc: - echo "We got cancelled!" - raise exc - -proc c2 {.async.} = - await c1() - echo "Never reached, since the CancelledError got re-raised" + # Fetch page contents + let resp = await httpSession.fetch(parseUri(uri)) + # Convert response to a string, assuming its encoding matches the terminal! + bytesToString(resp.data) + finally: # Close the session + await noCancel(httpSession.closeWait()) -let work = c2() -waitFor(work.cancelAndWait()) +echo waitFor retrievePage( + "https://raw.githubusercontent.com/status-im/nim-chronos/master/README.md") ``` -The `CancelledError` will now travel up the stack like any other exception. -It can be caught and handled (for instance, freeing some resources) - -### Multiple async backend support - -Thanks to its powerful macro support, Nim allows `async`/`await` to be -implemented in libraries with only minimal support from the language - as such, -multiple `async` libraries exist, including `chronos` and `asyncdispatch`, and -more may come to be developed in the futures. - -Libraries built on top of `async`/`await` may wish to support multiple async -backends - the best way to do so is to create separate modules for each backend -that may be imported side-by-side - see [nim-metrics](https://github.com/status-im/nim-metrics/blob/master/metrics/) -for an example. - -An alternative way is to select backend using a global compile flag - this -method makes it diffucult to compose applications that use both backends as may -happen with transitive dependencies, but may be appropriate in some cases - -libraries choosing this path should call the flag `asyncBackend`, allowing -applications to choose the backend with `-d:asyncBackend=`. - -Known `async` backends include: +## Documentation -* `chronos` - this library (`-d:asyncBackend=chronos`) -* `asyncdispatch` the standard library `asyncdispatch` [module](https://nim-lang.org/docs/asyncdispatch.html) (`-d:asyncBackend=asyncdispatch`) -* `none` - ``-d:asyncBackend=none`` - disable ``async`` support completely +See the [user guide](https://status-im.github.io/nim-chronos/). -``none`` can be used when a library supports both a synchronous and -asynchronous API, to disable the latter. +## Projects using `chronos` -### Compile-time configuration +* [libp2p](https://github.com/status-im/nim-libp2p) - Peer-to-Peer networking stack implemented in many languages +* [presto](https://github.com/status-im/nim-presto) - REST API framework +* [Scorper](https://github.com/bung87/scorper) - Web framework +* [2DeFi](https://github.com/gogolxdong/2DeFi) - Decentralised file system +* [websock](https://github.com/status-im/nim-websock/) - WebSocket library with lots of features -`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant. +`chronos` is available in the [Nim Playground](https://play.nim-lang.org/#ix=2TpS) -Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information. +Submit a PR to add yours! ## TODO - * Pipe/Subprocess Transports. * Multithreading Stream/Datagram servers ## Contributing @@ -470,10 +75,6 @@ When submitting pull requests, please add test cases for any new features or fix `chronos` follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/). -## Other resources - -* [Historical differences with asyncdispatch](https://github.com/status-im/nim-chronos/wiki/AsyncDispatch-comparison) - ## License Licensed and distributed under either of diff --git a/chronos.nim b/chronos.nim index 8295924dd..c044f425c 100644 --- a/chronos.nim +++ b/chronos.nim @@ -5,5 +5,10 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +## `async`/`await` framework for [Nim](https://nim-lang.org) +## +## See https://status-im.github.io/nim-chronos/ for documentation + import chronos/[asyncloop, asyncsync, handles, transport, timer, debugutils] export asyncloop, asyncsync, handles, transport, timer, debugutils diff --git a/chronos.nimble b/chronos.nimble index b2fdb3ae6..e43588329 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -14,7 +14,7 @@ requires "nim >= 1.6.0", "httputils", "unittest2" -import os +import os, strutils let nimc = getEnv("NIMC", "nim") # Which nim compiler to use let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js) @@ -46,12 +46,19 @@ proc run(args, path: string) = build args, path exec "build/" & path.splitPath[1] +task examples, "Build examples": + # Build book examples + for file in listFiles("docs/examples"): + if file.endsWith(".nim"): + build "", file + task test, "Run all tests": for args in testArguments: run args, "tests/testall" if (NimMajor, NimMinor) > (1, 6): run args & " --mm:refc", "tests/testall" + task test_libbacktrace, "test with libbacktrace": var allArgs = @[ "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", @@ -59,3 +66,7 @@ task test_libbacktrace, "test with libbacktrace": for args in allArgs: run args, "tests/testall" + +task docs, "Generate API documentation": + exec "mdbook build docs" + exec nimc & " doc " & "--git.url:https://github.com/status-im/nim-chronos --git.commit:master --outdir:docs/book/api --project chronos" diff --git a/chronos/asyncloop.nim b/chronos/asyncloop.nim index 428252c71..7204226ba 100644 --- a/chronos/asyncloop.nim +++ b/chronos/asyncloop.nim @@ -10,124 +10,6 @@ {.push raises: [].} -## Chronos -## ************* -## -## This module implements asynchronous IO. This includes a dispatcher, -## a ``Future`` type implementation, and an ``async`` macro which allows -## asynchronous code to be written in a synchronous style with the ``await`` -## keyword. -## -## The dispatcher acts as a kind of event loop. You must call ``poll`` on it -## (or a function which does so for you such as ``waitFor`` or ``runForever``) -## in order to poll for any outstanding events. The underlying implementation -## is based on epoll on Linux, IO Completion Ports on Windows and select on -## other operating systems. -## -## The ``poll`` function will not, on its own, return any events. Instead -## an appropriate ``Future`` object will be completed. A ``Future`` is a -## type which holds a value which is not yet available, but which *may* be -## available in the future. You can check whether a future is finished -## by using the ``finished`` function. When a future is finished it means that -## either the value that it holds is now available or it holds an error instead. -## The latter situation occurs when the operation to complete a future fails -## with an exception. You can distinguish between the two situations with the -## ``failed`` function. -## -## Future objects can also store a callback procedure which will be called -## automatically once the future completes. -## -## Futures therefore can be thought of as an implementation of the proactor -## pattern. In this -## pattern you make a request for an action, and once that action is fulfilled -## a future is completed with the result of that action. Requests can be -## made by calling the appropriate functions. For example: calling the ``recv`` -## function will create a request for some data to be read from a socket. The -## future which the ``recv`` function returns will then complete once the -## requested amount of data is read **or** an exception occurs. -## -## Code to read some data from a socket may look something like this: -## -## .. code-block::nim -## var future = socket.recv(100) -## future.addCallback( -## proc () = -## echo(future.read) -## ) -## -## All asynchronous functions returning a ``Future`` will not block. They -## will not however return immediately. An asynchronous function will have -## code which will be executed before an asynchronous request is made, in most -## cases this code sets up the request. -## -## In the above example, the ``recv`` function will return a brand new -## ``Future`` instance once the request for data to be read from the socket -## is made. This ``Future`` instance will complete once the requested amount -## of data is read, in this case it is 100 bytes. The second line sets a -## callback on this future which will be called once the future completes. -## All the callback does is write the data stored in the future to ``stdout``. -## The ``read`` function is used for this and it checks whether the future -## completes with an error for you (if it did it will simply raise the -## error), if there is no error however it returns the value of the future. -## -## Asynchronous procedures -## ----------------------- -## -## Asynchronous procedures remove the pain of working with callbacks. They do -## this by allowing you to write asynchronous code the same way as you would -## write synchronous code. -## -## An asynchronous procedure is marked using the ``{.async.}`` pragma. -## When marking a procedure with the ``{.async.}`` pragma it must have a -## ``Future[T]`` return type or no return type at all. If you do not specify -## a return type then ``Future[void]`` is assumed. -## -## Inside asynchronous procedures ``await`` can be used to call any -## procedures which return a -## ``Future``; this includes asynchronous procedures. When a procedure is -## "awaited", the asynchronous procedure it is awaited in will -## suspend its execution -## until the awaited procedure's Future completes. At which point the -## asynchronous procedure will resume its execution. During the period -## when an asynchronous procedure is suspended other asynchronous procedures -## will be run by the dispatcher. -## -## The ``await`` call may be used in many contexts. It can be used on the right -## hand side of a variable declaration: ``var data = await socket.recv(100)``, -## in which case the variable will be set to the value of the future -## automatically. It can be used to await a ``Future`` object, and it can -## be used to await a procedure returning a ``Future[void]``: -## ``await socket.send("foobar")``. -## -## If an awaited future completes with an error, then ``await`` will re-raise -## this error. -## -## Handling Exceptions -## ------------------- -## -## The ``async`` procedures also offer support for the try statement. -## -## .. code-block:: Nim -## try: -## let data = await sock.recv(100) -## echo("Received ", data) -## except CancelledError as exc: -## # Handle exc -## -## Discarding futures -## ------------------ -## -## Futures should **never** be discarded. This is because they may contain -## errors. If you do not care for the result of a Future then you should -## use the ``asyncSpawn`` procedure instead of the ``discard`` keyword. -## ``asyncSpawn`` will transform any exception thrown by the called procedure -## to a Defect -## -## Limitations/Bugs -## ---------------- -## -## * The effect system (``raises: []``) does not work with async procedures. - import ./internal/[asyncengine, asyncfutures, asyncmacro, errors] export asyncfutures, asyncengine, errors diff --git a/chronos/internal/asyncengine.nim b/chronos/internal/asyncengine.nim index 578bfdf96..d794f72b6 100644 --- a/chronos/internal/asyncengine.nim +++ b/chronos/internal/asyncengine.nim @@ -10,6 +10,10 @@ {.push raises: [].} +## This module implements the core asynchronous engine / dispatcher. +## +## For more information, see the `Concepts` chapter of the guide. + from nativesockets import Port import std/[tables, heapqueue, deques] import results diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..7585238ef --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +book diff --git a/docs/book.toml b/docs/book.toml new file mode 100644 index 000000000..570b8f48a --- /dev/null +++ b/docs/book.toml @@ -0,0 +1,20 @@ +[book] +authors = ["Jacek Sieka"] +language = "en" +multilingual = false +src = "src" +title = "Chronos" + +[preprocessor.toc] +command = "mdbook-toc" +renderer = ["html"] +max-level = 2 + +[preprocessor.open-on-gh] +command = "mdbook-open-on-gh" +renderer = ["html"] + +[output.html] +git-repository-url = "https://github.com/status-im/nim-chronos/" +git-branch = "master" +additional-css = ["open-in.css"] diff --git a/docs/examples/cancellation.nim b/docs/examples/cancellation.nim new file mode 100644 index 000000000..5feec31fc --- /dev/null +++ b/docs/examples/cancellation.nim @@ -0,0 +1,21 @@ +## Simple cancellation example + +import chronos + +proc someTask() {.async.} = await sleepAsync(10.minutes) + +proc cancellationExample() {.async.} = + # Start a task but don't wait for it to finish + let future = someTask() + future.cancelSoon() + # `cancelSoon` schedules but does not wait for the future to get cancelled - + # it might still be pending here + + let future2 = someTask() # Start another task concurrently + await future2.cancelAndWait() + # Using `cancelAndWait`, we can be sure that `future2` is either + # complete, failed or cancelled at this point. `future` could still be + # pending! + assert future2.finished() + +waitFor(cancellationExample()) diff --git a/docs/examples/discards.nim b/docs/examples/discards.nim new file mode 100644 index 000000000..990acfc5a --- /dev/null +++ b/docs/examples/discards.nim @@ -0,0 +1,28 @@ +## The peculiarities of `discard` in `async` procedures +import chronos + +proc failingOperation() {.async.} = + echo "Raising!" + raise (ref ValueError)(msg: "My error") + +proc myApp() {.async.} = + # This style of discard causes the `ValueError` to be discarded, hiding the + # failure of the operation - avoid! + discard failingOperation() + + proc runAsTask(fut: Future[void]): Future[void] {.async: (raises: []).} = + # runAsTask uses `raises: []` to ensure at compile-time that no exceptions + # escape it! + try: + await fut + except CatchableError as exc: + echo "The task failed! ", exc.msg + + # asyncSpawn ensures that errors don't leak unnoticed from tasks without + # blocking: + asyncSpawn runAsTask(failingOperation()) + + # If we didn't catch the exception with `runAsTask`, the program will crash: + asyncSpawn failingOperation() + +waitFor myApp() diff --git a/docs/examples/httpget.nim b/docs/examples/httpget.nim new file mode 100644 index 000000000..4ddf04a79 --- /dev/null +++ b/docs/examples/httpget.nim @@ -0,0 +1,15 @@ +import chronos/apps/http/httpclient + +proc retrievePage*(uri: string): Future[string] {.async.} = + # Create a new HTTP session + let httpSession = HttpSessionRef.new() + try: + # Fetch page contents + let resp = await httpSession.fetch(parseUri(uri)) + # Convert response to a string, assuming its encoding matches the terminal! + bytesToString(resp.data) + finally: # Close the session + await noCancel(httpSession.closeWait()) + +echo waitFor retrievePage( + "https://raw.githubusercontent.com/status-im/nim-chronos/master/README.md") diff --git a/docs/examples/nim.cfg b/docs/examples/nim.cfg new file mode 100644 index 000000000..80e5d9bc7 --- /dev/null +++ b/docs/examples/nim.cfg @@ -0,0 +1 @@ +path = "../.." \ No newline at end of file diff --git a/docs/examples/timeoutcomposed.nim b/docs/examples/timeoutcomposed.nim new file mode 100644 index 000000000..8533af57e --- /dev/null +++ b/docs/examples/timeoutcomposed.nim @@ -0,0 +1,25 @@ +## Single timeout for several operations +import chronos + +proc shortTask {.async.} = + try: + await sleepAsync(1.seconds) + except CancelledError as exc: + echo "Short task was cancelled!" + raise exc # Propagate cancellation to the next operation + +proc composedTimeout() {.async.} = + let + # Common timout for several sub-tasks + timeout = sleepAsync(10.seconds) + + while not timeout.finished(): + let task = shortTask() # Start a task but don't `await` it + if (await race(task, timeout)) == task: + echo "Ran one more task" + else: + # This cancellation may or may not happen as task might have finished + # right at the timeout! + task.cancelSoon() + +waitFor composedTimeout() diff --git a/docs/examples/timeoutsimple.nim b/docs/examples/timeoutsimple.nim new file mode 100644 index 000000000..ce6a12a92 --- /dev/null +++ b/docs/examples/timeoutsimple.nim @@ -0,0 +1,20 @@ +## Simple timeouts +import chronos + +proc longTask {.async.} = + try: + await sleepAsync(10.minutes) + except CancelledError as exc: + echo "Long task was cancelled!" + raise exc # Propagate cancellation to the next operation + +proc simpleTimeout() {.async.} = + let + task = longTask() # Start a task but don't `await` it + + if not await task.withTimeout(1.seconds): + echo "Timeout reached - withTimeout should have cancelled the task" + else: + echo "Task completed" + +waitFor simpleTimeout() diff --git a/docs/examples/twogets.nim b/docs/examples/twogets.nim new file mode 100644 index 000000000..00ebab4d3 --- /dev/null +++ b/docs/examples/twogets.nim @@ -0,0 +1,24 @@ +## Make two http requests concurrently and output the one that wins + +import chronos +import ./httpget + +proc twoGets() {.async.} = + let + futs = @[ + # Both pages will start downloading concurrently... + httpget.retrievePage("https://duckduckgo.com/?q=chronos"), + httpget.retrievePage("https://www.google.fr/search?q=chronos") + ] + + # Wait for at least one request to finish.. + let winner = await one(futs) + # ..and cancel the others since we won't need them + for fut in futs: + # Trying to cancel an already-finished future is harmless + fut.cancelSoon() + + # An exception could be raised here if the winning request failed! + echo "Result: ", winner.read() + +waitFor(twoGets()) diff --git a/docs/open-in.css b/docs/open-in.css new file mode 100644 index 000000000..aeb951f09 --- /dev/null +++ b/docs/open-in.css @@ -0,0 +1,7 @@ +footer { + font-size: 0.8em; + text-align: center; + border-top: 1px solid black; + padding: 5px 0; +} + diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md new file mode 100644 index 000000000..186fadd08 --- /dev/null +++ b/docs/src/SUMMARY.md @@ -0,0 +1,10 @@ +- [Introduction](./introduction.md) +- [Getting started](./getting_started.md) + +# User guide + +- [Core concepts](./concepts.md) +- [`async` functions](async_procs.md) +- [Errors and exceptions](./error_handling.md) +- [Tips, tricks and best practices](./tips.md) +- [Porting code to `chronos`](./porting.md) diff --git a/docs/src/async_procs.md b/docs/src/async_procs.md new file mode 100644 index 000000000..ae8eb51bf --- /dev/null +++ b/docs/src/async_procs.md @@ -0,0 +1,112 @@ +# Async procedures + + + +## The `async` pragma + +The `{.async.}` pragma will transform a procedure (or a method) returning a +`Future` into a closure iterator. If there is no return type specified, +`Future[void]` is returned. + +```nim +proc p() {.async.} = + await sleepAsync(100.milliseconds) + +echo p().type # prints "Future[system.void]" +``` + +## `await` keyword + +Whenever `await` is encountered inside an async procedure, control is given +back to the dispatcher for as many steps as it's necessary for the awaited +future to complete, fail or be cancelled. `await` calls the +equivalent of `Future.read()` on the completed future and returns the +encapsulated value. + +```nim +proc p1() {.async.} = + await sleepAsync(1.seconds) + +proc p2() {.async.} = + await sleepAsync(1.seconds) + +proc p3() {.async.} = + let + fut1 = p1() + fut2 = p2() + # Just by executing the async procs, both resulting futures entered the + # dispatcher queue and their "clocks" started ticking. + await fut1 + await fut2 + # Only one second passed while awaiting them both, not two. + +waitFor p3() +``` + +```admonition warning +Because `async` procedures are executed concurrently, they are subject to many +of the same risks that typically accompany multithreaded programming + +In particular, if two `async` procedures have access to the same mutable state, +the value before and after `await` might not be the same as the order of execution is not guaranteed! +``` + +## Raw functions + +Raw functions are those that interact with `chronos` via the `Future` type but +whose body does not go through the async transformation. + +Such functions are created by adding `raw: true` to the `async` parameters: + +```nim +proc rawAsync(): Future[void] {.async: (raw: true).} = + let fut = newFuture[void]("rawAsync") + fut.complete() + fut +``` + +Raw functions must not raise exceptions directly - they are implicitly declared +as `raises: []` - instead they should store exceptions in the returned `Future`: + +```nim +proc rawFailure(): Future[void] {.async: (raw: true).} = + let fut = newFuture[void]("rawAsync") + fut.fail((ref ValueError)(msg: "Oh no!")) + fut +``` + +Raw functions can also use checked exceptions: + +```nim +proc rawAsyncRaises(): Future[void] {.async: (raw: true, raises: [IOError]).} = + let fut = newFuture[void]() + assert not (compiles do: fut.fail((ref ValueError)(msg: "uh-uh"))) + fut.fail((ref IOError)(msg: "IO")) + fut +``` + +## Callbacks and closures + +Callback/closure types are declared using the `async` annotation as usual: + +```nim +type MyCallback = proc(): Future[void] {.async.} + +proc runCallback(cb: MyCallback) {.async: (raises: []).} = + try: + await cb() + except CatchableError: + discard # handle errors as usual +``` + +When calling a callback, it is important to remember that it may raise exceptions that need to be handled. + +Checked exceptions can be used to limit the exceptions that a callback can +raise: + +```nim +type MyEasyCallback = proc(): Future[void] {.async: (raises: []).} + +proc runCallback(cb: MyEasyCallback) {.async: (raises: [])} = + await cb() +``` diff --git a/docs/src/concepts.md b/docs/src/concepts.md new file mode 100644 index 000000000..fcc33afce --- /dev/null +++ b/docs/src/concepts.md @@ -0,0 +1,126 @@ +# Concepts + + + +## The dispatcher + +Async/await programming relies on cooperative multitasking to coordinate the +concurrent execution of procedures, using event notifications from the operating system to resume execution. + +The event handler loop is called a "dispatcher" and a single instance per +thread is created, as soon as one is needed. + +Scheduling is done by calling [async procedures](./async_procs.md) that return +`Future` objects - each time a procedure is unable to make further +progress, for example because it's waiting for some data to arrive, it hands +control back to the dispatcher which ensures that the procedure is resumed when +ready. + +## The `Future` type + +`Future` objects encapsulate the outcome of executing an `async` procedure. The +`Future` may be `pending` meaning that the outcome is not yet known or +`finished` meaning that the return value is available, the operation failed +with an exception or was cancelled. + +Inside an async procedure, you can `await` the outcome of another async +procedure - if the `Future` representing that operation is still `pending`, a +callback representing where to resume execution will be added to it and the +dispatcher will be given back control to deal with other tasks. + +When a `Future` is `finished`, all its callbacks are scheduled to be run by +the dispatcher, thus continuing any operations that were waiting for an outcome. + +## The `poll` call + +To trigger the processing step of the dispatcher, we need to call `poll()` - +either directly or through a wrapper like `runForever()` or `waitFor()`. + +Each call to poll handles any file descriptors, timers and callbacks that are +ready to be processed. + +Using `waitFor`, the result of a single asynchronous operation can be obtained: + +```nim +proc myApp() {.async.} = + echo "Waiting for a second..." + await sleepAsync(1.seconds) + echo "done!" + +waitFor myApp() +``` + +It is also possible to keep running the event loop forever using `runForever`: + +```nim +proc myApp() {.async.} = + while true: + await sleepAsync(1.seconds) + echo "A bit more than a second passed!" + +let future = myApp() +runForever() +``` + +Such an application never terminates, thus it is rare that applications are +structured this way. + +```admonish warning +Both `waitFor` and `runForever` call `poll` which offers fine-grained +control over the event loop steps. + +Nested calls to `poll`, `waitFor` and `runForever` are not allowed. +``` + +## Cancellation + +Any pending `Future` can be cancelled. This can be used for timeouts, to start +multiple operations in parallel and cancel the rest as soon as one finishes, +to initiate the orderely shutdown of an application etc. + +```nim +{{#include ../examples/cancellation.nim}} +``` + +Even if cancellation is initiated, it is not guaranteed that the operation gets +cancelled - the future might still be completed or fail depending on the +order of events in the dispatcher and the specifics of the operation. + +If the future indeed gets cancelled, `await` will raise a +`CancelledError` as is likely to happen in the following example: + +```nim +proc c1 {.async.} = + echo "Before sleep" + try: + await sleepAsync(10.minutes) + echo "After sleep" # not reach due to cancellation + except CancelledError as exc: + echo "We got cancelled!" + # `CancelledError` is typically re-raised to notify the caller that the + # operation is being cancelled + raise exc + +proc c2 {.async.} = + await c1() + echo "Never reached, since the CancelledError got re-raised" + +let work = c2() +waitFor(work.cancelAndWait()) +``` + +The `CancelledError` will now travel up the stack like any other exception. +It can be caught and handled (for instance, freeing some resources) + +Cancelling an already-finished `Future` has no effect, as the following example +of downloading two web pages concurrently shows: + +```nim +{{#include ../examples/twogets.nim}} +``` + +## Compile-time configuration + +`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant. + +Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information. diff --git a/docs/src/error_handling.md b/docs/src/error_handling.md new file mode 100644 index 000000000..be06a3555 --- /dev/null +++ b/docs/src/error_handling.md @@ -0,0 +1,134 @@ +# Errors and exceptions + + + +## Exceptions + +Exceptions inheriting from [`CatchableError`](https://nim-lang.org/docs/system.html#CatchableError) +interrupt execution of an `async` procedure. The exception is placed in the +`Future.error` field while changing the status of the `Future` to `Failed` +and callbacks are scheduled. + +When a future is read or awaited the exception is re-raised, traversing the +`async` execution chain until handled. + +```nim +proc p1() {.async.} = + await sleepAsync(1.seconds) + raise newException(ValueError, "ValueError inherits from CatchableError") + +proc p2() {.async.} = + await sleepAsync(1.seconds) + +proc p3() {.async.} = + let + fut1 = p1() + fut2 = p2() + await fut1 + echo "unreachable code here" + await fut2 + +# `waitFor()` would call `Future.read()` unconditionally, which would raise the +# exception in `Future.error`. +let fut3 = p3() +while not(fut3.finished()): + poll() + +echo "fut3.state = ", fut3.state # "Failed" +if fut3.failed(): + echo "p3() failed: ", fut3.error.name, ": ", fut3.error.msg + # prints "p3() failed: ValueError: ValueError inherits from CatchableError" +``` + +You can put the `await` in a `try` block, to deal with that exception sooner: + +```nim +proc p3() {.async.} = + let + fut1 = p1() + fut2 = p2() + try: + await fut1 + except CachableError: + echo "p1() failed: ", fut1.error.name, ": ", fut1.error.msg + echo "reachable code here" + await fut2 +``` + +Because `chronos` ensures that all exceptions are re-routed to the `Future`, +`poll` will not itself raise exceptions. + +`poll` may still panic / raise `Defect` if such are raised in user code due to +undefined behavior. + +## Checked exceptions + +By specifying a `raises` list to an async procedure, you can check which +exceptions can be raised by it: + +```nim +proc p1(): Future[void] {.async: (raises: [IOError]).} = + assert not (compiles do: raise newException(ValueError, "uh-uh")) + raise newException(IOError, "works") # Or any child of IOError + +proc p2(): Future[void] {.async, (raises: [IOError]).} = + await p1() # Works, because await knows that p1 + # can only raise IOError +``` + +Under the hood, the return type of `p1` will be rewritten to an internal type +which will convey raises informations to `await`. + +```admonition note +Most `async` include `CancelledError` in the list of `raises`, indicating that +the operation they implement might get cancelled resulting in neither value nor +error! +``` + +## The `Exception` type + +Exceptions deriving from `Exception` are not caught by default as these may +include `Defect` and other forms undefined or uncatchable behavior. + +Because exception effect tracking is turned on for `async` functions, this may +sometimes lead to compile errors around forward declarations, methods and +closures as Nim conservatively asssumes that any `Exception` might be raised +from those. + +Make sure to excplicitly annotate these with `{.raises.}`: + +```nim +# Forward declarations need to explicitly include a raises list: +proc myfunction() {.raises: [ValueError].} + +# ... as do `proc` types +type MyClosure = proc() {.raises: [ValueError].} + +proc myfunction() = + raise (ref ValueError)(msg: "Implementation here") + +let closure: MyClosure = myfunction +``` + +For compatibility, `async` functions can be instructed to handle `Exception` as +well, specifying `handleException: true`. `Exception` that is not a `Defect` and +not a `CatchableError` will then be caught and remapped to +`AsyncExceptionError`: + +```nim +proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} = + raise (ref Exception)(msg: "Raising Exception is UB") + +proc callRaiseException() {.async: (raises: []).} = + try: + raiseException() + except AsyncExceptionError as exc: + # The original Exception is available from the `parent` field + echo exc.parent.msg +``` + +This mode can be enabled globally with `-d:chronosHandleException` as a help +when porting code to `chronos` but should generally be avoided as global +configuration settings may interfere with libraries that use `chronos` leading +to unexpected behavior. + diff --git a/docs/src/getting_started.md b/docs/src/getting_started.md new file mode 100644 index 000000000..809dbca4e --- /dev/null +++ b/docs/src/getting_started.md @@ -0,0 +1,19 @@ +## Getting started + +Install `chronos` using `nimble`: + +```text +nimble install chronos +``` + +or add a dependency to your `.nimble` file: + +```text +requires "chronos" +``` + +and start using it: + +```nim +{{#include ../examples/httpget.nim}} +``` diff --git a/docs/src/introduction.md b/docs/src/introduction.md new file mode 100644 index 000000000..9c2a308aa --- /dev/null +++ b/docs/src/introduction.md @@ -0,0 +1,32 @@ +# Introduction + +Chronos implements the [async/await](https://en.wikipedia.org/wiki/Async/await) +paradigm in a self-contained library using macro and closure iterator +transformation features provided by Nim. + +Features include: + +* Asynchronous socket and process I/O +* HTTP server with SSL/TLS support out of the box (no OpenSSL needed) +* Synchronization primitivies like queues, events and locks +* Cancellation +* Efficient dispatch pipeline with excellent multi-platform support +* Exception [effect support](./guide.md#error-handling) + +## Platform support + +Several platforms are supported, with different backend [options](./concepts.md#compile-time-configuration): + +* Windows: [`IOCP`](https://learn.microsoft.com/en-us/windows/win32/fileio/i-o-completion-ports) +* Linux: [`epoll`](https://en.wikipedia.org/wiki/Epoll) / `poll` +* OSX / BSD: [`kqueue`](https://en.wikipedia.org/wiki/Kqueue) / `poll` +* Android / Emscripten / posix: `poll` + +## Examples + +Examples are available in the [`docs/examples/`](https://github.com/status-im/nim-chronos/docs/examples) folder. + +## API documentation + +This guide covers basic usage of chronos - for details, see the +[API reference](./api/chronos.html). diff --git a/docs/src/porting.md b/docs/src/porting.md new file mode 100644 index 000000000..519de64be --- /dev/null +++ b/docs/src/porting.md @@ -0,0 +1,54 @@ +# Porting code to `chronos` v4 + + + +Thanks to its macro support, Nim allows `async`/`await` to be implemented in +libraries with only minimal support from the language - as such, multiple +`async` libraries exist, including `chronos` and `asyncdispatch`, and more may +come to be developed in the futures. + +## Chronos v3 + +Chronos v4 introduces new features for IPv6, exception effects, a stand-alone +`Future` type as well as several other changes - when upgrading from chronos v3, +here are several things to consider: + +* Exception handling is now strict by default - see the [error handling](./error_handling.md) + chapter for how to deal with `raises` effects +* `AsyncEventBus` was removed - use `AsyncEventQueue` instead + +## `asyncdispatch` + +Projects written for `asyncdispatch` and `chronos` look similar but there are +several differences to be aware of: + +* `chronos` has its own dispatch loop - you can typically not mix `chronos` and + `asyncdispatch` in the same thread +* `import chronos` instead of `import asyncdispatch` +* cleanup is important - make sure to use `closeWait` to release any resources + you're using or file descript leaks and other +* cancellation support means that `CancelledError` may be raised from most + `{.async.}` functions +* Calling `yield` directly in tasks is not supported - instead, use `awaitne`. + +## Supporting multiple backends + +Libraries built on top of `async`/`await` may wish to support multiple async +backends - the best way to do so is to create separate modules for each backend +that may be imported side-by-side - see [nim-metrics](https://github.com/status-im/nim-metrics/blob/master/metrics/) +for an example. + +An alternative way is to select backend using a global compile flag - this +method makes it diffucult to compose applications that use both backends as may +happen with transitive dependencies, but may be appropriate in some cases - +libraries choosing this path should call the flag `asyncBackend`, allowing +applications to choose the backend with `-d:asyncBackend=`. + +Known `async` backends include: + +* `chronos` - this library (`-d:asyncBackend=chronos`) +* `asyncdispatch` the standard library `asyncdispatch` [module](https://nim-lang.org/docs/asyncdispatch.html) (`-d:asyncBackend=asyncdispatch`) +* `none` - ``-d:asyncBackend=none`` - disable ``async`` support completely + +``none`` can be used when a library supports both a synchronous and +asynchronous API, to disable the latter. diff --git a/docs/src/tips.md b/docs/src/tips.md new file mode 100644 index 000000000..627e4641e --- /dev/null +++ b/docs/src/tips.md @@ -0,0 +1,34 @@ +# Tips, tricks and best practices + +## Timeouts + +To prevent a single task from taking too long, `withTimeout` can be used: + +```nim +{{#include ../examples/timeoutsimple.nim}} +``` + +When several tasks should share a single timeout, a common timer can be created +with `sleepAsync`: + +```nim +{{#include ../examples/timeoutcomposed.nim}} +``` + +## `discard` + +When calling an asynchronous procedure without `await`, the operation is started +but its result is not processed until corresponding `Future` is `read`. + +It is therefore important to never `discard` futures directly - instead, one +can discard the result of awaiting the future or use `asyncSpawn` to monitor +the outcome of the future as if it were running in a separate thread. + +Similar to threads, tasks managed by `asyncSpawn` may causes the application to +crash if any exceptions leak out of it - use +[checked exceptions](./error_handling.md#checked-exceptions) to avoid this +problem. + +```nim +{{#include ../examples/discards.nim}} +``` diff --git a/docs/theme/highlight.js b/docs/theme/highlight.js new file mode 100644 index 000000000..3256c00ed --- /dev/null +++ b/docs/theme/highlight.js @@ -0,0 +1,53 @@ +/* + Highlight.js 10.1.1 (93fd0d73) + License: BSD-3-Clause + Copyright (c) 2006-2020, Ivan Sagalaev +*/ +var hljs=function(){"use strict";function e(n){Object.freeze(n);var t="function"==typeof n;return Object.getOwnPropertyNames(n).forEach((function(r){!Object.hasOwnProperty.call(n,r)||null===n[r]||"object"!=typeof n[r]&&"function"!=typeof n[r]||t&&("caller"===r||"callee"===r||"arguments"===r)||Object.isFrozen(n[r])||e(n[r])})),n}class n{constructor(e){void 0===e.data&&(e.data={}),this.data=e.data}ignoreMatch(){this.ignore=!0}}function t(e){return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'")}function r(e,...n){var t={};for(const n in e)t[n]=e[n];return n.forEach((function(e){for(const n in e)t[n]=e[n]})),t}function a(e){return e.nodeName.toLowerCase()}var i=Object.freeze({__proto__:null,escapeHTML:t,inherit:r,nodeStream:function(e){var n=[];return function e(t,r){for(var i=t.firstChild;i;i=i.nextSibling)3===i.nodeType?r+=i.nodeValue.length:1===i.nodeType&&(n.push({event:"start",offset:r,node:i}),r=e(i,r),a(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:r,node:i}));return r}(e,0),n},mergeStreams:function(e,n,r){var i=0,s="",o=[];function l(){return e.length&&n.length?e[0].offset!==n[0].offset?e[0].offset"}function u(e){s+=""}function d(e){("start"===e.event?c:u)(e.node)}for(;e.length||n.length;){var g=l();if(s+=t(r.substring(i,g[0].offset)),i=g[0].offset,g===e){o.reverse().forEach(u);do{d(g.splice(0,1)[0]),g=l()}while(g===e&&g.length&&g[0].offset===i);o.reverse().forEach(c)}else"start"===g[0].event?o.push(g[0].node):o.pop(),d(g.splice(0,1)[0])}return s+t(r.substr(i))}});const s="",o=e=>!!e.kind;class l{constructor(e,n){this.buffer="",this.classPrefix=n.classPrefix,e.walk(this)}addText(e){this.buffer+=t(e)}openNode(e){if(!o(e))return;let n=e.kind;e.sublanguage||(n=`${this.classPrefix}${n}`),this.span(n)}closeNode(e){o(e)&&(this.buffer+=s)}value(){return this.buffer}span(e){this.buffer+=``}}class c{constructor(){this.rootNode={children:[]},this.stack=[this.rootNode]}get top(){return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){this.top.children.push(e)}openNode(e){const n={kind:e,children:[]};this.add(n),this.stack.push(n)}closeNode(){if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)}walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,n){return"string"==typeof n?e.addText(n):n.children&&(e.openNode(n),n.children.forEach(n=>this._walk(e,n)),e.closeNode(n)),e}static _collapse(e){"string"!=typeof e&&e.children&&(e.children.every(e=>"string"==typeof e)?e.children=[e.children.join("")]:e.children.forEach(e=>{c._collapse(e)}))}}class u extends c{constructor(e){super(),this.options=e}addKeyword(e,n){""!==e&&(this.openNode(n),this.addText(e),this.closeNode())}addText(e){""!==e&&this.add(e)}addSublanguage(e,n){const t=e.root;t.kind=n,t.sublanguage=!0,this.add(t)}toHTML(){return new l(this,this.options).value()}finalize(){return!0}}function d(e){return e?"string"==typeof e?e:e.source:null}const g="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",h={begin:"\\\\[\\s\\S]",relevance:0},f={className:"string",begin:"'",end:"'",illegal:"\\n",contains:[h]},p={className:"string",begin:'"',end:'"',illegal:"\\n",contains:[h]},b={begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},m=function(e,n,t={}){var a=r({className:"comment",begin:e,end:n,contains:[]},t);return a.contains.push(b),a.contains.push({className:"doctag",begin:"(?:TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):",relevance:0}),a},v=m("//","$"),x=m("/\\*","\\*/"),E=m("#","$");var _=Object.freeze({__proto__:null,IDENT_RE:"[a-zA-Z]\\w*",UNDERSCORE_IDENT_RE:"[a-zA-Z_]\\w*",NUMBER_RE:"\\b\\d+(\\.\\d+)?",C_NUMBER_RE:g,BINARY_NUMBER_RE:"\\b(0b[01]+)",RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",SHEBANG:(e={})=>{const n=/^#![ ]*\//;return e.binary&&(e.begin=function(...e){return e.map(e=>d(e)).join("")}(n,/.*\b/,e.binary,/\b.*/)),r({className:"meta",begin:n,end:/$/,relevance:0,"on:begin":(e,n)=>{0!==e.index&&n.ignoreMatch()}},e)},BACKSLASH_ESCAPE:h,APOS_STRING_MODE:f,QUOTE_STRING_MODE:p,PHRASAL_WORDS_MODE:b,COMMENT:m,C_LINE_COMMENT_MODE:v,C_BLOCK_COMMENT_MODE:x,HASH_COMMENT_MODE:E,NUMBER_MODE:{className:"number",begin:"\\b\\d+(\\.\\d+)?",relevance:0},C_NUMBER_MODE:{className:"number",begin:g,relevance:0},BINARY_NUMBER_MODE:{className:"number",begin:"\\b(0b[01]+)",relevance:0},CSS_NUMBER_MODE:{className:"number",begin:"\\b\\d+(\\.\\d+)?(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{className:"regexp",begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[h,{begin:/\[/,end:/\]/,relevance:0,contains:[h]}]}]},TITLE_MODE:{className:"title",begin:"[a-zA-Z]\\w*",relevance:0},UNDERSCORE_TITLE_MODE:{className:"title",begin:"[a-zA-Z_]\\w*",relevance:0},METHOD_GUARD:{begin:"\\.\\s*[a-zA-Z_]\\w*",relevance:0},END_SAME_AS_BEGIN:function(e){return Object.assign(e,{"on:begin":(e,n)=>{n.data._beginMatch=e[1]},"on:end":(e,n)=>{n.data._beginMatch!==e[1]&&n.ignoreMatch()}})}}),N="of and for in not or if then".split(" ");function w(e,n){return n?+n:function(e){return N.includes(e.toLowerCase())}(e)?0:1}const R=t,y=r,{nodeStream:k,mergeStreams:O}=i,M=Symbol("nomatch");return function(t){var a=[],i={},s={},o=[],l=!0,c=/(^(<[^>]+>|\t|)+|\n)/gm,g="Could not find the language '{}', did you forget to load/include a language module?";const h={disableAutodetect:!0,name:"Plain text",contains:[]};var f={noHighlightRe:/^(no-?highlight)$/i,languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:null,__emitter:u};function p(e){return f.noHighlightRe.test(e)}function b(e,n,t,r){var a={code:n,language:e};S("before:highlight",a);var i=a.result?a.result:m(a.language,a.code,t,r);return i.code=a.code,S("after:highlight",i),i}function m(e,t,a,s){var o=t;function c(e,n){var t=E.case_insensitive?n[0].toLowerCase():n[0];return Object.prototype.hasOwnProperty.call(e.keywords,t)&&e.keywords[t]}function u(){null!=y.subLanguage?function(){if(""!==A){var e=null;if("string"==typeof y.subLanguage){if(!i[y.subLanguage])return void O.addText(A);e=m(y.subLanguage,A,!0,k[y.subLanguage]),k[y.subLanguage]=e.top}else e=v(A,y.subLanguage.length?y.subLanguage:null);y.relevance>0&&(I+=e.relevance),O.addSublanguage(e.emitter,e.language)}}():function(){if(!y.keywords)return void O.addText(A);let e=0;y.keywordPatternRe.lastIndex=0;let n=y.keywordPatternRe.exec(A),t="";for(;n;){t+=A.substring(e,n.index);const r=c(y,n);if(r){const[e,a]=r;O.addText(t),t="",I+=a,O.addKeyword(n[0],e)}else t+=n[0];e=y.keywordPatternRe.lastIndex,n=y.keywordPatternRe.exec(A)}t+=A.substr(e),O.addText(t)}(),A=""}function h(e){return e.className&&O.openNode(e.className),y=Object.create(e,{parent:{value:y}})}function p(e){return 0===y.matcher.regexIndex?(A+=e[0],1):(L=!0,0)}var b={};function x(t,r){var i=r&&r[0];if(A+=t,null==i)return u(),0;if("begin"===b.type&&"end"===r.type&&b.index===r.index&&""===i){if(A+=o.slice(r.index,r.index+1),!l){const n=Error("0 width match regex");throw n.languageName=e,n.badRule=b.rule,n}return 1}if(b=r,"begin"===r.type)return function(e){var t=e[0],r=e.rule;const a=new n(r),i=[r.__beforeBegin,r["on:begin"]];for(const n of i)if(n&&(n(e,a),a.ignore))return p(t);return r&&r.endSameAsBegin&&(r.endRe=RegExp(t.replace(/[-/\\^$*+?.()|[\]{}]/g,"\\$&"),"m")),r.skip?A+=t:(r.excludeBegin&&(A+=t),u(),r.returnBegin||r.excludeBegin||(A=t)),h(r),r.returnBegin?0:t.length}(r);if("illegal"===r.type&&!a){const e=Error('Illegal lexeme "'+i+'" for mode "'+(y.className||"")+'"');throw e.mode=y,e}if("end"===r.type){var s=function(e){var t=e[0],r=o.substr(e.index),a=function e(t,r,a){let i=function(e,n){var t=e&&e.exec(n);return t&&0===t.index}(t.endRe,a);if(i){if(t["on:end"]){const e=new n(t);t["on:end"](r,e),e.ignore&&(i=!1)}if(i){for(;t.endsParent&&t.parent;)t=t.parent;return t}}if(t.endsWithParent)return e(t.parent,r,a)}(y,e,r);if(!a)return M;var i=y;i.skip?A+=t:(i.returnEnd||i.excludeEnd||(A+=t),u(),i.excludeEnd&&(A=t));do{y.className&&O.closeNode(),y.skip||y.subLanguage||(I+=y.relevance),y=y.parent}while(y!==a.parent);return a.starts&&(a.endSameAsBegin&&(a.starts.endRe=a.endRe),h(a.starts)),i.returnEnd?0:t.length}(r);if(s!==M)return s}if("illegal"===r.type&&""===i)return 1;if(B>1e5&&B>3*r.index)throw Error("potential infinite loop, way more iterations than matches");return A+=i,i.length}var E=T(e);if(!E)throw console.error(g.replace("{}",e)),Error('Unknown language: "'+e+'"');var _=function(e){function n(n,t){return RegExp(d(n),"m"+(e.case_insensitive?"i":"")+(t?"g":""))}class t{constructor(){this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0}addRule(e,n){n.position=this.position++,this.matchIndexes[this.matchAt]=n,this.regexes.push([n,e]),this.matchAt+=function(e){return RegExp(e.toString()+"|").exec("").length-1}(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null);const e=this.regexes.map(e=>e[1]);this.matcherRe=n(function(e,n="|"){for(var t=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./,r=0,a="",i=0;i0&&(a+=n),a+="(";o.length>0;){var l=t.exec(o);if(null==l){a+=o;break}a+=o.substring(0,l.index),o=o.substring(l.index+l[0].length),"\\"===l[0][0]&&l[1]?a+="\\"+(+l[1]+s):(a+=l[0],"("===l[0]&&r++)}a+=")"}return a}(e),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex;const n=this.matcherRe.exec(e);if(!n)return null;const t=n.findIndex((e,n)=>n>0&&void 0!==e),r=this.matchIndexes[t];return n.splice(0,t),Object.assign(n,r)}}class a{constructor(){this.rules=[],this.multiRegexes=[],this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){if(this.multiRegexes[e])return this.multiRegexes[e];const n=new t;return this.rules.slice(e).forEach(([e,t])=>n.addRule(e,t)),n.compile(),this.multiRegexes[e]=n,n}considerAll(){this.regexIndex=0}addRule(e,n){this.rules.push([e,n]),"begin"===n.type&&this.count++}exec(e){const n=this.getMatcher(this.regexIndex);n.lastIndex=this.lastIndex;const t=n.exec(e);return t&&(this.regexIndex+=t.position+1,this.regexIndex===this.count&&(this.regexIndex=0)),t}}function i(e,n){const t=e.input[e.index-1],r=e.input[e.index+e[0].length];"."!==t&&"."!==r||n.ignoreMatch()}if(e.contains&&e.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.");return function t(s,o){const l=s;if(s.compiled)return l;s.compiled=!0,s.__beforeBegin=null,s.keywords=s.keywords||s.beginKeywords;let c=null;if("object"==typeof s.keywords&&(c=s.keywords.$pattern,delete s.keywords.$pattern),s.keywords&&(s.keywords=function(e,n){var t={};return"string"==typeof e?r("keyword",e):Object.keys(e).forEach((function(n){r(n,e[n])})),t;function r(e,r){n&&(r=r.toLowerCase()),r.split(" ").forEach((function(n){var r=n.split("|");t[r[0]]=[e,w(r[0],r[1])]}))}}(s.keywords,e.case_insensitive)),s.lexemes&&c)throw Error("ERR: Prefer `keywords.$pattern` to `mode.lexemes`, BOTH are not allowed. (see mode reference) ");return l.keywordPatternRe=n(s.lexemes||c||/\w+/,!0),o&&(s.beginKeywords&&(s.begin="\\b("+s.beginKeywords.split(" ").join("|")+")(?=\\b|\\s)",s.__beforeBegin=i),s.begin||(s.begin=/\B|\b/),l.beginRe=n(s.begin),s.endSameAsBegin&&(s.end=s.begin),s.end||s.endsWithParent||(s.end=/\B|\b/),s.end&&(l.endRe=n(s.end)),l.terminator_end=d(s.end)||"",s.endsWithParent&&o.terminator_end&&(l.terminator_end+=(s.end?"|":"")+o.terminator_end)),s.illegal&&(l.illegalRe=n(s.illegal)),void 0===s.relevance&&(s.relevance=1),s.contains||(s.contains=[]),s.contains=[].concat(...s.contains.map((function(e){return function(e){return e.variants&&!e.cached_variants&&(e.cached_variants=e.variants.map((function(n){return r(e,{variants:null},n)}))),e.cached_variants?e.cached_variants:function e(n){return!!n&&(n.endsWithParent||e(n.starts))}(e)?r(e,{starts:e.starts?r(e.starts):null}):Object.isFrozen(e)?r(e):e}("self"===e?s:e)}))),s.contains.forEach((function(e){t(e,l)})),s.starts&&t(s.starts,o),l.matcher=function(e){const n=new a;return e.contains.forEach(e=>n.addRule(e.begin,{rule:e,type:"begin"})),e.terminator_end&&n.addRule(e.terminator_end,{type:"end"}),e.illegal&&n.addRule(e.illegal,{type:"illegal"}),n}(l),l}(e)}(E),N="",y=s||_,k={},O=new f.__emitter(f);!function(){for(var e=[],n=y;n!==E;n=n.parent)n.className&&e.unshift(n.className);e.forEach(e=>O.openNode(e))}();var A="",I=0,S=0,B=0,L=!1;try{for(y.matcher.considerAll();;){B++,L?L=!1:(y.matcher.lastIndex=S,y.matcher.considerAll());const e=y.matcher.exec(o);if(!e)break;const n=x(o.substring(S,e.index),e);S=e.index+n}return x(o.substr(S)),O.closeAllNodes(),O.finalize(),N=O.toHTML(),{relevance:I,value:N,language:e,illegal:!1,emitter:O,top:y}}catch(n){if(n.message&&n.message.includes("Illegal"))return{illegal:!0,illegalBy:{msg:n.message,context:o.slice(S-100,S+100),mode:n.mode},sofar:N,relevance:0,value:R(o),emitter:O};if(l)return{illegal:!1,relevance:0,value:R(o),emitter:O,language:e,top:y,errorRaised:n};throw n}}function v(e,n){n=n||f.languages||Object.keys(i);var t=function(e){const n={relevance:0,emitter:new f.__emitter(f),value:R(e),illegal:!1,top:h};return n.emitter.addText(e),n}(e),r=t;return n.filter(T).filter(I).forEach((function(n){var a=m(n,e,!1);a.language=n,a.relevance>r.relevance&&(r=a),a.relevance>t.relevance&&(r=t,t=a)})),r.language&&(t.second_best=r),t}function x(e){return f.tabReplace||f.useBR?e.replace(c,e=>"\n"===e?f.useBR?"
":e:f.tabReplace?e.replace(/\t/g,f.tabReplace):e):e}function E(e){let n=null;const t=function(e){var n=e.className+" ";n+=e.parentNode?e.parentNode.className:"";const t=f.languageDetectRe.exec(n);if(t){var r=T(t[1]);return r||(console.warn(g.replace("{}",t[1])),console.warn("Falling back to no-highlight mode for this block.",e)),r?t[1]:"no-highlight"}return n.split(/\s+/).find(e=>p(e)||T(e))}(e);if(p(t))return;S("before:highlightBlock",{block:e,language:t}),f.useBR?(n=document.createElement("div")).innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n"):n=e;const r=n.textContent,a=t?b(t,r,!0):v(r),i=k(n);if(i.length){const e=document.createElement("div");e.innerHTML=a.value,a.value=O(i,k(e),r)}a.value=x(a.value),S("after:highlightBlock",{block:e,result:a}),e.innerHTML=a.value,e.className=function(e,n,t){var r=n?s[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),e.includes(r)||a.push(r),a.join(" ").trim()}(e.className,t,a.language),e.result={language:a.language,re:a.relevance,relavance:a.relevance},a.second_best&&(e.second_best={language:a.second_best.language,re:a.second_best.relevance,relavance:a.second_best.relevance})}const N=()=>{if(!N.called){N.called=!0;var e=document.querySelectorAll("pre code");a.forEach.call(e,E)}};function T(e){return e=(e||"").toLowerCase(),i[e]||i[s[e]]}function A(e,{languageName:n}){"string"==typeof e&&(e=[e]),e.forEach(e=>{s[e]=n})}function I(e){var n=T(e);return n&&!n.disableAutodetect}function S(e,n){var t=e;o.forEach((function(e){e[t]&&e[t](n)}))}Object.assign(t,{highlight:b,highlightAuto:v,fixMarkup:x,highlightBlock:E,configure:function(e){f=y(f,e)},initHighlighting:N,initHighlightingOnLoad:function(){window.addEventListener("DOMContentLoaded",N,!1)},registerLanguage:function(e,n){var r=null;try{r=n(t)}catch(n){if(console.error("Language definition for '{}' could not be registered.".replace("{}",e)),!l)throw n;console.error(n),r=h}r.name||(r.name=e),i[e]=r,r.rawDefinition=n.bind(null,t),r.aliases&&A(r.aliases,{languageName:e})},listLanguages:function(){return Object.keys(i)},getLanguage:T,registerAliases:A,requireLanguage:function(e){var n=T(e);if(n)return n;throw Error("The '{}' language is required, but not loaded.".replace("{}",e))},autoDetection:I,inherit:y,addPlugin:function(e){o.push(e)}}),t.debugMode=function(){l=!1},t.safeMode=function(){l=!0},t.versionString="10.1.1";for(const n in _)"object"==typeof _[n]&&e(_[n]);return Object.assign(t,_),t}({})}();"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); +hljs.registerLanguage("apache",function(){"use strict";return function(e){var n={className:"number",begin:"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?"};return{name:"Apache config",aliases:["apacheconf"],case_insensitive:!0,contains:[e.HASH_COMMENT_MODE,{className:"section",begin:"",contains:[n,{className:"number",begin:":\\d{1,5}"},e.inherit(e.QUOTE_STRING_MODE,{relevance:0})]},{className:"attribute",begin:/\w+/,relevance:0,keywords:{nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{end:/$/,relevance:0,keywords:{literal:"on off all deny allow"},contains:[{className:"meta",begin:"\\s\\[",end:"\\]$"},{className:"variable",begin:"[\\$%]\\{",end:"\\}",contains:["self",{className:"number",begin:"[\\$%]\\d+"}]},n,{className:"number",begin:"\\d+"},e.QUOTE_STRING_MODE]}}],illegal:/\S/}}}()); +hljs.registerLanguage("bash",function(){"use strict";return function(e){const s={};Object.assign(s,{className:"variable",variants:[{begin:/\$[\w\d#@][\w\d_]*/},{begin:/\$\{/,end:/\}/,contains:[{begin:/:-/,contains:[s]}]}]});const t={className:"subst",begin:/\$\(/,end:/\)/,contains:[e.BACKSLASH_ESCAPE]},n={className:"string",begin:/"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,s,t]};t.contains.push(n);const a={begin:/\$\(\(/,end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},e.NUMBER_MODE,s]},i=e.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10}),c={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{name:"Bash",aliases:["sh","zsh"],keywords:{$pattern:/\b-?[a-z\._]+\b/,keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},contains:[i,e.SHEBANG(),c,a,e.HASH_COMMENT_MODE,n,{className:"",begin:/\\"/},{className:"string",begin:/'/,end:/'/},s]}}}()); +hljs.registerLanguage("c-like",function(){"use strict";return function(e){function t(e){return"(?:"+e+")?"}var n="(decltype\\(auto\\)|"+t("[a-zA-Z_]\\w*::")+"[a-zA-Z_]\\w*"+t("<.*?>")+")",r={className:"keyword",begin:"\\b[a-z\\d_]*_t\\b"},a={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},i={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],relevance:0},s={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{"meta-keyword":"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(a,{className:"meta-string"}),{className:"meta-string",begin:/<.*?>/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},o={className:"title",begin:t("[a-zA-Z_]\\w*::")+e.IDENT_RE,relevance:0},c=t("[a-zA-Z_]\\w*::")+e.IDENT_RE+"\\s*\\(",l={keyword:"int float while private char char8_t char16_t char32_t catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid wchar_t short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignas alignof constexpr consteval constinit decltype concept co_await co_return co_yield requires noexcept static_assert thread_local restrict final override atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and and_eq bitand bitor compl not not_eq or or_eq xor xor_eq",built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr _Bool complex _Complex imaginary _Imaginary",literal:"true false nullptr NULL"},d=[r,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,i,a],_={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/}],keywords:l,contains:d.concat([{begin:/\(/,end:/\)/,keywords:l,contains:d.concat(["self"]),relevance:0}]),relevance:0},u={className:"function",begin:"("+n+"[\\*&\\s]+)+"+c,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:l,illegal:/[^\w\s\*&:<>]/,contains:[{begin:"decltype\\(auto\\)",keywords:l,relevance:0},{begin:c,returnBegin:!0,contains:[o],relevance:0},{className:"params",begin:/\(/,end:/\)/,keywords:l,relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,i,r,{begin:/\(/,end:/\)/,keywords:l,relevance:0,contains:["self",e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,i,r]}]},r,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,s]};return{aliases:["c","cc","h","c++","h++","hpp","hh","hxx","cxx"],keywords:l,disableAutodetect:!0,illegal:"",keywords:l,contains:["self",r]},{begin:e.IDENT_RE+"::",keywords:l},{className:"class",beginKeywords:"class struct",end:/[{;:]/,contains:[{begin://,contains:["self"]},e.TITLE_MODE]}]),exports:{preprocessor:s,strings:a,keywords:l}}}}()); +hljs.registerLanguage("c",function(){"use strict";return function(e){var n=e.getLanguage("c-like").rawDefinition();return n.name="C",n.aliases=["c","h"],n}}()); +hljs.registerLanguage("coffeescript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);return function(r){var t={keyword:e.concat(["then","unless","until","loop","by","when","and","or","is","isnt","not"]).filter((e=>n=>!e.includes(n))(["var","const","let","function","static"])).join(" "),literal:n.concat(["yes","no","on","off"]).join(" "),built_in:a.concat(["npm","print"]).join(" ")},i="[A-Za-z$_][0-9A-Za-z$_]*",s={className:"subst",begin:/#\{/,end:/}/,keywords:t},o=[r.BINARY_NUMBER_MODE,r.inherit(r.C_NUMBER_MODE,{starts:{end:"(\\s*/)?",relevance:0}}),{className:"string",variants:[{begin:/'''/,end:/'''/,contains:[r.BACKSLASH_ESCAPE]},{begin:/'/,end:/'/,contains:[r.BACKSLASH_ESCAPE]},{begin:/"""/,end:/"""/,contains:[r.BACKSLASH_ESCAPE,s]},{begin:/"/,end:/"/,contains:[r.BACKSLASH_ESCAPE,s]}]},{className:"regexp",variants:[{begin:"///",end:"///",contains:[s,r.HASH_COMMENT_MODE]},{begin:"//[gim]{0,3}(?=\\W)",relevance:0},{begin:/\/(?![ *]).*?(?![\\]).\/[gim]{0,3}(?=\W)/}]},{begin:"@"+i},{subLanguage:"javascript",excludeBegin:!0,excludeEnd:!0,variants:[{begin:"```",end:"```"},{begin:"`",end:"`"}]}];s.contains=o;var c=r.inherit(r.TITLE_MODE,{begin:i}),l={className:"params",begin:"\\([^\\(]",returnBegin:!0,contains:[{begin:/\(/,end:/\)/,keywords:t,contains:["self"].concat(o)}]};return{name:"CoffeeScript",aliases:["coffee","cson","iced"],keywords:t,illegal:/\/\*/,contains:o.concat([r.COMMENT("###","###"),r.HASH_COMMENT_MODE,{className:"function",begin:"^\\s*"+i+"\\s*=\\s*(\\(.*\\))?\\s*\\B[-=]>",end:"[-=]>",returnBegin:!0,contains:[c,l]},{begin:/[:\(,=]\s*/,relevance:0,contains:[{className:"function",begin:"(\\(.*\\))?\\s*\\B[-=]>",end:"[-=]>",returnBegin:!0,contains:[l]}]},{className:"class",beginKeywords:"class",end:"$",illegal:/[:="\[\]]/,contains:[{beginKeywords:"extends",endsWithParent:!0,illegal:/[:="\[\]]/,contains:[c]},c]},{begin:i+":",end:":",returnBegin:!0,returnEnd:!0,relevance:0}])}}}()); +hljs.registerLanguage("cpp",function(){"use strict";return function(e){var t=e.getLanguage("c-like").rawDefinition();return t.disableAutodetect=!1,t.name="C++",t.aliases=["cc","c++","h++","hpp","hh","hxx","cxx"],t}}()); +hljs.registerLanguage("csharp",function(){"use strict";return function(e){var n={keyword:"abstract as base bool break byte case catch char checked const continue decimal default delegate do double enum event explicit extern finally fixed float for foreach goto if implicit in int interface internal is lock long object operator out override params private protected public readonly ref sbyte sealed short sizeof stackalloc static string struct switch this try typeof uint ulong unchecked unsafe ushort using virtual void volatile while add alias ascending async await by descending dynamic equals from get global group into join let nameof on orderby partial remove select set value var when where yield",literal:"null false true"},i=e.inherit(e.TITLE_MODE,{begin:"[a-zA-Z](\\.?\\w)*"}),a={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],relevance:0},s={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}]},t=e.inherit(s,{illegal:/\n/}),l={className:"subst",begin:"{",end:"}",keywords:n},r=e.inherit(l,{illegal:/\n/}),c={className:"string",begin:/\$"/,end:'"',illegal:/\n/,contains:[{begin:"{{"},{begin:"}}"},e.BACKSLASH_ESCAPE,r]},o={className:"string",begin:/\$@"/,end:'"',contains:[{begin:"{{"},{begin:"}}"},{begin:'""'},l]},g=e.inherit(o,{illegal:/\n/,contains:[{begin:"{{"},{begin:"}}"},{begin:'""'},r]});l.contains=[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.C_BLOCK_COMMENT_MODE],r.contains=[g,c,t,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.inherit(e.C_BLOCK_COMMENT_MODE,{illegal:/\n/})];var d={variants:[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},E={begin:"<",end:">",contains:[{beginKeywords:"in out"},i]},_=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",b={begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"],keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0,contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{begin:"\x3c!--|--\x3e"},{begin:""}]}]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#",end:"$",keywords:{"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum"}},d,a,{beginKeywords:"class interface",end:/[{;=]/,illegal:/[^\s:,]/,contains:[{beginKeywords:"where class"},i,E,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"namespace",end:/[{;=]/,illegal:/[^\s:]/,contains:[i,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta",begin:"^\\s*\\[",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{className:"meta-string",begin:/"/,end:/"/}]},{beginKeywords:"new return throw await else",relevance:0},{className:"function",begin:"("+_+"\\s+)+"+e.IDENT_RE+"\\s*(\\<.+\\>)?\\s*\\(",returnBegin:!0,end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{begin:e.IDENT_RE+"\\s*(\\<.+\\>)?\\s*\\(",returnBegin:!0,contains:[e.TITLE_MODE,E],relevance:0},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0,contains:[d,a,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},b]}}}()); +hljs.registerLanguage("css",function(){"use strict";return function(e){var n={begin:/(?:[A-Z\_\.\-]+|--[a-zA-Z0-9_-]+)\s*:/,returnBegin:!0,end:";",endsWithParent:!0,contains:[{className:"attribute",begin:/\S/,end:":",excludeEnd:!0,starts:{endsWithParent:!0,excludeEnd:!0,contains:[{begin:/[\w-]+\(/,returnBegin:!0,contains:[{className:"built_in",begin:/[\w-]+/},{begin:/\(/,end:/\)/,contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.CSS_NUMBER_MODE]}]},e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,e.C_BLOCK_COMMENT_MODE,{className:"number",begin:"#[0-9A-Fa-f]+"},{className:"meta",begin:"!important"}]}}]};return{name:"CSS",case_insensitive:!0,illegal:/[=\/|'\$]/,contains:[e.C_BLOCK_COMMENT_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/},{className:"selector-class",begin:/\.[A-Za-z0-9_-]+/},{className:"selector-attr",begin:/\[/,end:/\]/,illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},{className:"selector-pseudo",begin:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{begin:"@(page|font-face)",lexemes:"@[a-z-]+",keywords:"@page @font-face"},{begin:"@",end:"[{;]",illegal:/:/,returnBegin:!0,contains:[{className:"keyword",begin:/@\-?\w[\w]*(\-\w+)*/},{begin:/\s/,endsWithParent:!0,excludeEnd:!0,relevance:0,keywords:"and or not only",contains:[{begin:/[a-z-]+:/,className:"attribute"},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.CSS_NUMBER_MODE]}]},{className:"selector-tag",begin:"[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0},{begin:"{",end:"}",illegal:/\S/,contains:[e.C_BLOCK_COMMENT_MODE,n]}]}}}()); +hljs.registerLanguage("diff",function(){"use strict";return function(e){return{name:"Diff",aliases:["patch"],contains:[{className:"meta",relevance:10,variants:[{begin:/^@@ +\-\d+,\d+ +\+\d+,\d+ +@@$/},{begin:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{begin:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{className:"comment",variants:[{begin:/Index: /,end:/$/},{begin:/={3,}/,end:/$/},{begin:/^\-{3}/,end:/$/},{begin:/^\*{3} /,end:/$/},{begin:/^\+{3}/,end:/$/},{begin:/^\*{15}$/}]},{className:"addition",begin:"^\\+",end:"$"},{className:"deletion",begin:"^\\-",end:"$"},{className:"addition",begin:"^\\!",end:"$"}]}}}()); +hljs.registerLanguage("go",function(){"use strict";return function(e){var n={keyword:"break default func interface select case map struct chan else goto package switch const fallthrough if range type continue for import return var go defer bool byte complex64 complex128 float32 float64 int8 int16 int32 int64 string uint8 uint16 uint32 uint64 int uint uintptr rune",literal:"true false iota nil",built_in:"append cap close complex copy imag len make new panic print println real recover delete"};return{name:"Go",aliases:["golang"],keywords:n,illegal:"e(n)).join("")}return function(a){var s={className:"number",relevance:0,variants:[{begin:/([\+\-]+)?[\d]+_[\d_]+/},{begin:a.NUMBER_RE}]},i=a.COMMENT();i.variants=[{begin:/;/,end:/$/},{begin:/#/,end:/$/}];var t={className:"variable",variants:[{begin:/\$[\w\d"][\w\d_]*/},{begin:/\$\{(.*?)}/}]},r={className:"literal",begin:/\bon|off|true|false|yes|no\b/},l={className:"string",contains:[a.BACKSLASH_ESCAPE],variants:[{begin:"'''",end:"'''",relevance:10},{begin:'"""',end:'"""',relevance:10},{begin:'"',end:'"'},{begin:"'",end:"'"}]},c={begin:/\[/,end:/\]/,contains:[i,r,t,l,s,"self"],relevance:0},g="("+[/[A-Za-z0-9_-]+/,/"(\\"|[^"])*"/,/'[^']*'/].map(n=>e(n)).join("|")+")";return{name:"TOML, also INI",aliases:["toml"],case_insensitive:!0,illegal:/\S/,contains:[i,{className:"section",begin:/\[+/,end:/\]+/},{begin:n(g,"(\\s*\\.\\s*",g,")*",n("(?=",/\s*=\s*[^#\s]/,")")),className:"attr",starts:{end:/$/,contains:[i,c,r,t,l,s]}}]}}}()); +hljs.registerLanguage("java",function(){"use strict";function e(e){return e?"string"==typeof e?e:e.source:null}function n(e){return a("(",e,")?")}function a(...n){return n.map(n=>e(n)).join("")}function s(...n){return"("+n.map(n=>e(n)).join("|")+")"}return function(e){var t="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",i={className:"meta",begin:"@[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*",contains:[{begin:/\(/,end:/\)/,contains:["self"]}]},r=e=>a("[",e,"]+([",e,"_]*[",e,"]+)?"),c={className:"number",variants:[{begin:`\\b(0[bB]${r("01")})[lL]?`},{begin:`\\b(0${r("0-7")})[dDfFlL]?`},{begin:a(/\b0[xX]/,s(a(r("a-fA-F0-9"),/\./,r("a-fA-F0-9")),a(r("a-fA-F0-9"),/\.?/),a(/\./,r("a-fA-F0-9"))),/([pP][+-]?(\d+))?/,/[fFdDlL]?/)},{begin:a(/\b/,s(a(/\d*\./,r("\\d")),r("\\d")),/[eE][+-]?[\d]+[dDfF]?/)},{begin:a(/\b/,r(/\d/),n(/\.?/),n(r(/\d/)),/[dDfFlL]?/)}],relevance:0};return{name:"Java",aliases:["jsp"],keywords:t,illegal:/<\/|#/,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/,relevance:0},{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"class",beginKeywords:"class interface",end:/[{;=]/,excludeEnd:!0,keywords:"class interface",illegal:/[:"\[\]]/,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"new throw return else",relevance:0},{className:"function",begin:"([À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(<[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(\\s*,\\s*[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*)*>)?\\s+)+"+e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:t,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"params",begin:/\(/,end:/\)/,keywords:t,relevance:0,contains:[i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},c,i]}}}()); +hljs.registerLanguage("javascript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);function s(e){return r("(?=",e,")")}function r(...e){return e.map(e=>(function(e){return e?"string"==typeof e?e:e.source:null})(e)).join("")}return function(t){var i="[A-Za-z$_][0-9A-Za-z$_]*",c={begin:/<[A-Za-z0-9\\._:-]+/,end:/\/[A-Za-z0-9\\._:-]+>|\/>/},o={$pattern:"[A-Za-z$_][0-9A-Za-z$_]*",keyword:e.join(" "),literal:n.join(" "),built_in:a.join(" ")},l={className:"number",variants:[{begin:"\\b(0[bB][01]+)n?"},{begin:"\\b(0[oO][0-7]+)n?"},{begin:t.C_NUMBER_RE+"n?"}],relevance:0},E={className:"subst",begin:"\\$\\{",end:"\\}",keywords:o,contains:[]},d={begin:"html`",end:"",starts:{end:"`",returnEnd:!1,contains:[t.BACKSLASH_ESCAPE,E],subLanguage:"xml"}},g={begin:"css`",end:"",starts:{end:"`",returnEnd:!1,contains:[t.BACKSLASH_ESCAPE,E],subLanguage:"css"}},u={className:"string",begin:"`",end:"`",contains:[t.BACKSLASH_ESCAPE,E]};E.contains=[t.APOS_STRING_MODE,t.QUOTE_STRING_MODE,d,g,u,l,t.REGEXP_MODE];var b=E.contains.concat([{begin:/\(/,end:/\)/,contains:["self"].concat(E.contains,[t.C_BLOCK_COMMENT_MODE,t.C_LINE_COMMENT_MODE])},t.C_BLOCK_COMMENT_MODE,t.C_LINE_COMMENT_MODE]),_={className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,contains:b};return{name:"JavaScript",aliases:["js","jsx","mjs","cjs"],keywords:o,contains:[t.SHEBANG({binary:"node",relevance:5}),{className:"meta",relevance:10,begin:/^\s*['"]use (strict|asm)['"]/},t.APOS_STRING_MODE,t.QUOTE_STRING_MODE,d,g,u,t.C_LINE_COMMENT_MODE,t.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+",contains:[{className:"type",begin:"\\{",end:"\\}",relevance:0},{className:"variable",begin:i+"(?=\\s*(-)|$)",endsParent:!0,relevance:0},{begin:/(?=[^\n])\s/,relevance:0}]}]}),t.C_BLOCK_COMMENT_MODE,l,{begin:r(/[{,\n]\s*/,s(r(/(((\/\/.*)|(\/\*(.|\n)*\*\/))\s*)*/,i+"\\s*:"))),relevance:0,contains:[{className:"attr",begin:i+s("\\s*:"),relevance:0}]},{begin:"("+t.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",contains:[t.C_LINE_COMMENT_MODE,t.C_BLOCK_COMMENT_MODE,t.REGEXP_MODE,{className:"function",begin:"(\\([^(]*(\\([^(]*(\\([^(]*\\))?\\))?\\)|"+t.UNDERSCORE_IDENT_RE+")\\s*=>",returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:t.UNDERSCORE_IDENT_RE},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:o,contains:b}]}]},{begin:/,/,relevance:0},{className:"",begin:/\s/,end:/\s*/,skip:!0},{variants:[{begin:"<>",end:""},{begin:c.begin,end:c.end}],subLanguage:"xml",contains:[{begin:c.begin,end:c.end,skip:!0,contains:["self"]}]}],relevance:0},{className:"function",beginKeywords:"function",end:/\{/,excludeEnd:!0,contains:[t.inherit(t.TITLE_MODE,{begin:i}),_],illegal:/\[|%/},{begin:/\$[(.]/},t.METHOD_GUARD,{className:"class",beginKeywords:"class",end:/[{;=]/,excludeEnd:!0,illegal:/[:"\[\]]/,contains:[{beginKeywords:"extends"},t.UNDERSCORE_TITLE_MODE]},{beginKeywords:"constructor",end:/\{/,excludeEnd:!0},{begin:"(get|set)\\s+(?="+i+"\\()",end:/{/,keywords:"get set",contains:[t.inherit(t.TITLE_MODE,{begin:i}),{begin:/\(\)/},_]}],illegal:/#(?!!)/}}}()); +hljs.registerLanguage("json",function(){"use strict";return function(n){var e={literal:"true false null"},i=[n.C_LINE_COMMENT_MODE,n.C_BLOCK_COMMENT_MODE],t=[n.QUOTE_STRING_MODE,n.C_NUMBER_MODE],a={end:",",endsWithParent:!0,excludeEnd:!0,contains:t,keywords:e},l={begin:"{",end:"}",contains:[{className:"attr",begin:/"/,end:/"/,contains:[n.BACKSLASH_ESCAPE],illegal:"\\n"},n.inherit(a,{begin:/:/})].concat(i),illegal:"\\S"},s={begin:"\\[",end:"\\]",contains:[n.inherit(a)],illegal:"\\S"};return t.push(l,s),i.forEach((function(n){t.push(n)})),{name:"JSON",contains:t,keywords:e,illegal:"\\S"}}}()); +hljs.registerLanguage("kotlin",function(){"use strict";return function(e){var n={keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual trait volatile transient native default",built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing",literal:"true false null"},a={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@"},i={className:"subst",begin:"\\${",end:"}",contains:[e.C_NUMBER_MODE]},s={className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},t={className:"string",variants:[{begin:'"""',end:'"""(?=[^"])',contains:[s,i]},{begin:"'",end:"'",illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/,contains:[e.BACKSLASH_ESCAPE,s,i]}]};i.contains.push(t);var r={className:"meta",begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?"},l={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/,end:/\)/,contains:[e.inherit(t,{className:"meta-string"})]}]},c=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),o={variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/,contains:[]}]},d=o;return d.variants[1].contains=[o],o.variants[1].contains=[d],{name:"Kotlin",aliases:["kt"],keywords:n,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,c,{className:"keyword",begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol",begin:/@\w+/}]}},a,r,l,{className:"function",beginKeywords:"fun",end:"[(]|$",returnBegin:!0,excludeEnd:!0,keywords:n,illegal:/fun\s+(<.*>)?[^\s\(]+(\s+[^\s\(]+)\s*=/,relevance:5,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://,keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/,endsWithParent:!0,contains:[o,e.C_LINE_COMMENT_MODE,c],relevance:0},e.C_LINE_COMMENT_MODE,c,r,l,t,e.C_NUMBER_MODE]},c]},{className:"class",beginKeywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0,illegal:"extends implements",contains:[{beginKeywords:"public protected internal private constructor"},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,]|$/,excludeBegin:!0,returnEnd:!0},r,l]},t,{className:"meta",begin:"^#!/usr/bin/env",end:"$",illegal:"\n"},{className:"number",begin:"\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",relevance:0}]}}}()); +hljs.registerLanguage("less",function(){"use strict";return function(e){var n="([\\w-]+|@{[\\w-]+})",a=[],s=[],t=function(e){return{className:"string",begin:"~?"+e+".*?"+e}},r=function(e,n,a){return{className:e,begin:n,relevance:a}},i={begin:"\\(",end:"\\)",contains:s,relevance:0};s.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,t("'"),t('"'),e.CSS_NUMBER_MODE,{begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]",excludeEnd:!0}},r("number","#[0-9A-Fa-f]+\\b"),i,r("variable","@@?[\\w-]+",10),r("variable","@{[\\w-]+}"),r("built_in","~?`[^`]*?`"),{className:"attribute",begin:"[\\w-]+\\s*:",end:":",returnBegin:!0,excludeEnd:!0},{className:"meta",begin:"!important"});var c=s.concat({begin:"{",end:"}",contains:a}),l={beginKeywords:"when",endsWithParent:!0,contains:[{beginKeywords:"and not"}].concat(s)},o={begin:n+"\\s*:",returnBegin:!0,end:"[;}]",relevance:0,contains:[{className:"attribute",begin:n,end:":",excludeEnd:!0,starts:{endsWithParent:!0,illegal:"[<=$]",relevance:0,contains:s}}]},g={className:"keyword",begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b",starts:{end:"[;{}]",returnEnd:!0,contains:s,relevance:0}},d={className:"variable",variants:[{begin:"@[\\w-]+\\s*:",relevance:15},{begin:"@[\\w-]+"}],starts:{end:"[;}]",returnEnd:!0,contains:c}},b={variants:[{begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:n,end:"{"}],returnBegin:!0,returnEnd:!0,illegal:"[<='$\"]",relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,l,r("keyword","all\\b"),r("variable","@{[\\w-]+}"),r("selector-tag",n+"%?",0),r("selector-id","#"+n),r("selector-class","\\."+n,0),r("selector-tag","&",0),{className:"selector-attr",begin:"\\[",end:"\\]"},{className:"selector-pseudo",begin:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{begin:"\\(",end:"\\)",contains:c},{begin:"!important"}]};return a.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,g,d,o,b),{name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:a}}}()); +hljs.registerLanguage("lua",function(){"use strict";return function(e){var t={begin:"\\[=*\\[",end:"\\]=*\\]",contains:["self"]},a=[e.COMMENT("--(?!\\[=*\\[)","$"),e.COMMENT("--\\[=*\\[","\\]=*\\]",{contains:[t],relevance:10})];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE,literal:"true false nil",keyword:"and break do else elseif end for goto if in local not or repeat return then until while",built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove"},contains:a.concat([{className:"function",beginKeywords:"function",end:"\\)",contains:[e.inherit(e.TITLE_MODE,{begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params",begin:"\\(",endsWithParent:!0,contains:a}].concat(a)},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string",begin:"\\[=*\\[",end:"\\]=*\\]",contains:[t],relevance:5}])}}}()); +hljs.registerLanguage("makefile",function(){"use strict";return function(e){var i={className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)",contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%`]+/}]}]}]};return{name:"HTML, XML",aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg"],case_insensitive:!0,contains:[{className:"meta",begin:"",relevance:10,contains:[a,i,t,s,{begin:"\\[",end:"\\]",contains:[{className:"meta",begin:"",contains:[a,s,i,t]}]}]},e.COMMENT("\x3c!--","--\x3e",{relevance:10}),{begin:"<\\!\\[CDATA\\[",end:"\\]\\]>",relevance:10},n,{className:"meta",begin:/<\?xml/,end:/\?>/,relevance:10},{className:"tag",begin:")",end:">",keywords:{name:"style"},contains:[c],starts:{end:"",returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag",begin:")",end:">",keywords:{name:"script"},contains:[c],starts:{end:"<\/script>",returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{className:"tag",begin:"",contains:[{className:"name",begin:/[^\/><\s]+/,relevance:0},c]}]}}}()); +hljs.registerLanguage("markdown",function(){"use strict";return function(n){const e={begin:"<",end:">",subLanguage:"xml",relevance:0},a={begin:"\\[.+?\\][\\(\\[].*?[\\)\\]]",returnBegin:!0,contains:[{className:"string",begin:"\\[",end:"\\]",excludeBegin:!0,returnEnd:!0,relevance:0},{className:"link",begin:"\\]\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0},{className:"symbol",begin:"\\]\\[",end:"\\]",excludeBegin:!0,excludeEnd:!0}],relevance:10},i={className:"strong",contains:[],variants:[{begin:/_{2}/,end:/_{2}/},{begin:/\*{2}/,end:/\*{2}/}]},s={className:"emphasis",contains:[],variants:[{begin:/\*(?!\*)/,end:/\*/},{begin:/_(?!_)/,end:/_/,relevance:0}]};i.contains.push(s),s.contains.push(i);var c=[e,a];return i.contains=i.contains.concat(c),s.contains=s.contains.concat(c),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:c=c.concat(i,s)},{begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n",contains:c}]}]},e,{className:"bullet",begin:"^[ \t]*([*+-]|(\\d+\\.))(?=\\s+)",end:"\\s+",excludeEnd:!0},i,s,{className:"quote",begin:"^>\\s+",contains:c,end:"$"},{className:"code",variants:[{begin:"(`{3,})(.|\\n)*?\\1`*[ ]*"},{begin:"(~{3,})(.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))",contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0}]},{begin:"^[-\\*]{3,}",end:"$"},a,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0}]}]}}}()); +hljs.registerLanguage("nginx",function(){"use strict";return function(e){var n={className:"variable",variants:[{begin:/\$\d+/},{begin:/\$\{/,end:/}/},{begin:"[\\$\\@]"+e.UNDERSCORE_IDENT_RE}]},a={endsWithParent:!0,keywords:{$pattern:"[a-z/_]+",literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},relevance:0,illegal:"=>",contains:[e.HASH_COMMENT_MODE,{className:"string",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:/"/,end:/"/},{begin:/'/,end:/'/}]},{begin:"([a-z]+):/",end:"\\s",endsWithParent:!0,excludeEnd:!0,contains:[n]},{className:"regexp",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:"\\s\\^",end:"\\s|{|;",returnEnd:!0},{begin:"~\\*?\\s+",end:"\\s|{|;",returnEnd:!0},{begin:"\\*(\\.[a-z\\-]+)+"},{begin:"([a-z\\-]+\\.)+\\*"}]},{className:"number",begin:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{className:"number",begin:"\\b\\d+[kKmMgGdshdwy]*\\b",relevance:0},n]};return{name:"Nginx config",aliases:["nginxconf"],contains:[e.HASH_COMMENT_MODE,{begin:e.UNDERSCORE_IDENT_RE+"\\s+{",returnBegin:!0,end:"{",contains:[{className:"section",begin:e.UNDERSCORE_IDENT_RE}],relevance:0},{begin:e.UNDERSCORE_IDENT_RE+"\\s",end:";|{",returnBegin:!0,contains:[{className:"attribute",begin:e.UNDERSCORE_IDENT_RE,starts:a}],relevance:0}],illegal:"[^\\s\\}]"}}}()); +hljs.registerLanguage("objectivec",function(){"use strict";return function(e){var n=/[a-zA-Z@][a-zA-Z0-9_]*/,_={$pattern:n,keyword:"@interface @class @protocol @implementation"};return{name:"Objective-C",aliases:["mm","objc","obj-c"],keywords:{$pattern:n,keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN",literal:"false true FALSE TRUE nil YES NO NULL",built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once"},illegal:"/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"class",begin:"("+_.keyword.split(" ").join("|")+")\\b",end:"({|$)",excludeEnd:!0,keywords:_,contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"\\."+e.UNDERSCORE_IDENT_RE,relevance:0}]}}}()); +hljs.registerLanguage("perl",function(){"use strict";return function(e){var n={$pattern:/[\w.]+/,keyword:"getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qq fileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmget sub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedir ioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when"},t={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:n},s={begin:"->{",end:"}"},r={variants:[{begin:/\$\d/},{begin:/[\$%@](\^\w\b|#\w+(::\w+)*|{\w+}|\w+(::\w*)*)/},{begin:/[\$%@][^\s\w{]/,relevance:0}]},i=[e.BACKSLASH_ESCAPE,t,r],a=[r,e.HASH_COMMENT_MODE,e.COMMENT("^\\=\\w","\\=cut",{endsWithParent:!0}),s,{className:"string",contains:i,variants:[{begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[",end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*\\<",end:"\\>",relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'",contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`",contains:[e.BACKSLASH_ESCAPE]},{begin:"{\\w+}",contains:[],relevance:0},{begin:"-?\\w+\\s*\\=\\>",contains:[],relevance:0}]},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*",keywords:"split return print reverse grep",relevance:0,contains:[e.HASH_COMMENT_MODE,{className:"regexp",begin:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",relevance:10},{className:"regexp",begin:"(m|qr)?/",end:"/[a-z]*",contains:[e.BACKSLASH_ESCAPE],relevance:0}]},{className:"function",beginKeywords:"sub",end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$",subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}]}];return t.contains=a,s.contains=a,{name:"Perl",aliases:["pl","pm"],keywords:n,contains:a}}}()); +hljs.registerLanguage("php",function(){"use strict";return function(e){var r={begin:"\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*"},t={className:"meta",variants:[{begin:/<\?php/,relevance:10},{begin:/<\?[=]?/},{begin:/\?>/}]},a={className:"string",contains:[e.BACKSLASH_ESCAPE,t],variants:[{begin:'b"',end:'"'},{begin:"b'",end:"'"},e.inherit(e.APOS_STRING_MODE,{illegal:null}),e.inherit(e.QUOTE_STRING_MODE,{illegal:null})]},n={variants:[e.BINARY_NUMBER_MODE,e.C_NUMBER_MODE]},i={keyword:"__CLASS__ __DIR__ __FILE__ __FUNCTION__ __LINE__ __METHOD__ __NAMESPACE__ __TRAIT__ die echo exit include include_once print require require_once array abstract and as binary bool boolean break callable case catch class clone const continue declare default do double else elseif empty enddeclare endfor endforeach endif endswitch endwhile eval extends final finally float for foreach from global goto if implements instanceof insteadof int integer interface isset iterable list new object or private protected public real return string switch throw trait try unset use var void while xor yield",literal:"false null true",built_in:"Error|0 AppendIterator ArgumentCountError ArithmeticError ArrayIterator ArrayObject AssertionError BadFunctionCallException BadMethodCallException CachingIterator CallbackFilterIterator CompileError Countable DirectoryIterator DivisionByZeroError DomainException EmptyIterator ErrorException Exception FilesystemIterator FilterIterator GlobIterator InfiniteIterator InvalidArgumentException IteratorIterator LengthException LimitIterator LogicException MultipleIterator NoRewindIterator OutOfBoundsException OutOfRangeException OuterIterator OverflowException ParentIterator ParseError RangeException RecursiveArrayIterator RecursiveCachingIterator RecursiveCallbackFilterIterator RecursiveDirectoryIterator RecursiveFilterIterator RecursiveIterator RecursiveIteratorIterator RecursiveRegexIterator RecursiveTreeIterator RegexIterator RuntimeException SeekableIterator SplDoublyLinkedList SplFileInfo SplFileObject SplFixedArray SplHeap SplMaxHeap SplMinHeap SplObjectStorage SplObserver SplObserver SplPriorityQueue SplQueue SplStack SplSubject SplSubject SplTempFileObject TypeError UnderflowException UnexpectedValueException ArrayAccess Closure Generator Iterator IteratorAggregate Serializable Throwable Traversable WeakReference Directory __PHP_Incomplete_Class parent php_user_filter self static stdClass"};return{aliases:["php","php3","php4","php5","php6","php7"],case_insensitive:!0,keywords:i,contains:[e.HASH_COMMENT_MODE,e.COMMENT("//","$",{contains:[t]}),e.COMMENT("/\\*","\\*/",{contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.COMMENT("__halt_compiler.+?;",!1,{endsWithParent:!0,keywords:"__halt_compiler"}),{className:"string",begin:/<<<['"]?\w+['"]?$/,end:/^\w+;?$/,contains:[e.BACKSLASH_ESCAPE,{className:"subst",variants:[{begin:/\$\w+/},{begin:/\{\$/,end:/\}/}]}]},t,{className:"keyword",begin:/\$this\b/},r,{begin:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{className:"function",beginKeywords:"fn function",end:/[;{]/,excludeEnd:!0,illegal:"[$%\\[]",contains:[e.UNDERSCORE_TITLE_MODE,{className:"params",begin:"\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0,keywords:i,contains:["self",r,e.C_BLOCK_COMMENT_MODE,a,n]}]},{className:"class",beginKeywords:"class interface",end:"{",excludeEnd:!0,illegal:/[:\(\$"]/,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"namespace",end:";",illegal:/[\.']/,contains:[e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"use",end:";",contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"=>"},a,n]}}}()); +hljs.registerLanguage("php-template",function(){"use strict";return function(n){return{name:"PHP template",subLanguage:"xml",contains:[{begin:/<\?(php|=)?/,end:/\?>/,subLanguage:"php",contains:[{begin:"/\\*",end:"\\*/",skip:!0},{begin:'b"',end:'"',skip:!0},{begin:"b'",end:"'",skip:!0},n.inherit(n.APOS_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),n.inherit(n.QUOTE_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0})]}]}}}()); +hljs.registerLanguage("plaintext",function(){"use strict";return function(t){return{name:"Plain text",aliases:["text","txt"],disableAutodetect:!0}}}()); +hljs.registerLanguage("properties",function(){"use strict";return function(e){var n="[ \\t\\f]*",t="("+n+"[:=]"+n+"|[ \\t\\f]+)",a="([^\\\\:= \\t\\f\\n]|\\\\.)+",s={end:t,relevance:0,starts:{className:"string",end:/$/,relevance:0,contains:[{begin:"\\\\\\n"}]}};return{name:".properties",case_insensitive:!0,illegal:/\S/,contains:[e.COMMENT("^\\s*[!#]","$"),{begin:"([^\\\\\\W:= \\t\\f\\n]|\\\\.)+"+t,returnBegin:!0,contains:[{className:"attr",begin:"([^\\\\\\W:= \\t\\f\\n]|\\\\.)+",endsParent:!0,relevance:0}],starts:s},{begin:a+t,returnBegin:!0,relevance:0,contains:[{className:"meta",begin:a,endsParent:!0,relevance:0}],starts:s},{className:"attr",relevance:0,begin:a+n+"$"}]}}}()); +hljs.registerLanguage("python",function(){"use strict";return function(e){var n={keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10",built_in:"Ellipsis NotImplemented",literal:"False None True"},a={className:"meta",begin:/^(>>>|\.\.\.) /},i={className:"subst",begin:/\{/,end:/\}/,keywords:n,illegal:/#/},s={begin:/\{\{/,relevance:0},r={className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:/(u|b)?r?'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{begin:/(u|b)?r?"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{begin:/(fr|rf|f)'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/(fr|rf|f)"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/(u|r|ur)'/,end:/'/,relevance:10},{begin:/(u|r|ur)"/,end:/"/,relevance:10},{begin:/(b|br)'/,end:/'/},{begin:/(b|br)"/,end:/"/},{begin:/(fr|rf|f)'/,end:/'/,contains:[e.BACKSLASH_ESCAPE,s,i]},{begin:/(fr|rf|f)"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,s,i]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},l={className:"number",relevance:0,variants:[{begin:e.BINARY_NUMBER_RE+"[lLjJ]?"},{begin:"\\b(0o[0-7]+)[lLjJ]?"},{begin:e.C_NUMBER_RE+"[lLjJ]?"}]},t={className:"params",variants:[{begin:/\(\s*\)/,skip:!0,className:null},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,contains:["self",a,l,r,e.HASH_COMMENT_MODE]}]};return i.contains=[r,l,a],{name:"Python",aliases:["py","gyp","ipython"],keywords:n,illegal:/(<\/|->|\?)|=>/,contains:[a,l,{beginKeywords:"if",relevance:0},r,e.HASH_COMMENT_MODE,{variants:[{className:"function",beginKeywords:"def"},{className:"class",beginKeywords:"class"}],end:/:/,illegal:/[${=;\n,]/,contains:[e.UNDERSCORE_TITLE_MODE,t,{begin:/->/,endsWithParent:!0,keywords:"None"}]},{className:"meta",begin:/^[\t ]*@/,end:/$/},{begin:/\b(print|exec)\(/}]}}}()); +hljs.registerLanguage("python-repl",function(){"use strict";return function(n){return{aliases:["pycon"],contains:[{className:"meta",starts:{end:/ |$/,starts:{end:"$",subLanguage:"python"}},variants:[{begin:/^>>>(?=[ ]|$)/},{begin:/^\.\.\.(?=[ ]|$)/}]}]}}}()); +hljs.registerLanguage("ruby",function(){"use strict";return function(e){var n="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?",a={keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor",literal:"true false nil"},s={className:"doctag",begin:"@[A-Za-z]+"},i={begin:"#<",end:">"},r=[e.COMMENT("#","$",{contains:[s]}),e.COMMENT("^\\=begin","^\\=end",{contains:[s],relevance:10}),e.COMMENT("^__END__","\\n$")],c={className:"subst",begin:"#\\{",end:"}",keywords:a},t={className:"string",contains:[e.BACKSLASH_ESCAPE,c],variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{begin:"%[qQwWx]?\\(",end:"\\)"},{begin:"%[qQwWx]?\\[",end:"\\]"},{begin:"%[qQwWx]?{",end:"}"},{begin:"%[qQwWx]?<",end:">"},{begin:"%[qQwWx]?/",end:"/"},{begin:"%[qQwWx]?%",end:"%"},{begin:"%[qQwWx]?-",end:"-"},{begin:"%[qQwWx]?\\|",end:"\\|"},{begin:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/},{begin:/<<[-~]?'?(\w+)(?:.|\n)*?\n\s*\1\b/,returnBegin:!0,contains:[{begin:/<<[-~]?'?/},e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,contains:[e.BACKSLASH_ESCAPE,c]})]}]},b={className:"params",begin:"\\(",end:"\\)",endsParent:!0,keywords:a},d=[t,i,{className:"class",beginKeywords:"class module",end:"$|;",illegal:/=/,contains:[e.inherit(e.TITLE_MODE,{begin:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{begin:"<\\s*",contains:[{begin:"("+e.IDENT_RE+"::)?"+e.IDENT_RE}]}].concat(r)},{className:"function",beginKeywords:"def",end:"$|;",contains:[e.inherit(e.TITLE_MODE,{begin:n}),b].concat(r)},{begin:e.IDENT_RE+"::"},{className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"(\\!|\\?)?:",relevance:0},{className:"symbol",begin:":(?!\\s)",contains:[t,{begin:n}],relevance:0},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{className:"params",begin:/\|/,end:/\|/,keywords:a},{begin:"("+e.RE_STARTERS_RE+"|unless)\\s*",keywords:"unless",contains:[i,{className:"regexp",contains:[e.BACKSLASH_ESCAPE,c],illegal:/\n/,variants:[{begin:"/",end:"/[a-z]*"},{begin:"%r{",end:"}[a-z]*"},{begin:"%r\\(",end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[",end:"\\][a-z]*"}]}].concat(r),relevance:0}].concat(r);c.contains=d,b.contains=d;var g=[{begin:/^\s*=>/,starts:{end:"$",contains:d}},{className:"meta",begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+>|(\\w+-)?\\d+\\.\\d+\\.\\d(p\\d+)?[^>]+>)",starts:{end:"$",contains:d}}];return{name:"Ruby",aliases:["rb","gemspec","podspec","thor","irb"],keywords:a,illegal:/\/\*/,contains:r.concat(g).concat(d)}}}()); +hljs.registerLanguage("rust",function(){"use strict";return function(e){var n="([ui](8|16|32|64|128|size)|f(32|64))?",t="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!";return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?",keyword:"abstract as async await become box break const continue crate do dyn else enum extern false final fn for if impl in let loop macro match mod move mut override priv pub ref return self Self static struct super trait true try type typeof unsafe unsized use virtual where while yield",literal:"true false Some None Ok Err",built_in:t},illegal:""}]}}}()); +hljs.registerLanguage("scss",function(){"use strict";return function(e){var t={className:"variable",begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b"},i={className:"number",begin:"#[0-9A-Fa-f]+"};return e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,e.C_BLOCK_COMMENT_MODE,{name:"SCSS",case_insensitive:!0,illegal:"[=/|']",contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"selector-id",begin:"\\#[A-Za-z0-9_-]+",relevance:0},{className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0},{className:"selector-attr",begin:"\\[",end:"\\]",illegal:"$"},{className:"selector-tag",begin:"\\b(a|abbr|acronym|address|area|article|aside|audio|b|base|big|blockquote|body|br|button|canvas|caption|cite|code|col|colgroup|command|datalist|dd|del|details|dfn|div|dl|dt|em|embed|fieldset|figcaption|figure|footer|form|frame|frameset|(h[1-6])|head|header|hgroup|hr|html|i|iframe|img|input|ins|kbd|keygen|label|legend|li|link|map|mark|meta|meter|nav|noframes|noscript|object|ol|optgroup|option|output|p|param|pre|progress|q|rp|rt|ruby|samp|script|section|select|small|span|strike|strong|style|sub|sup|table|tbody|td|textarea|tfoot|th|thead|time|title|tr|tt|ul|var|video)\\b",relevance:0},{className:"selector-pseudo",begin:":(visited|valid|root|right|required|read-write|read-only|out-range|optional|only-of-type|only-child|nth-of-type|nth-last-of-type|nth-last-child|nth-child|not|link|left|last-of-type|last-child|lang|invalid|indeterminate|in-range|hover|focus|first-of-type|first-line|first-letter|first-child|first|enabled|empty|disabled|default|checked|before|after|active)"},{className:"selector-pseudo",begin:"::(after|before|choices|first-letter|first-line|repeat-index|repeat-item|selection|value)"},t,{className:"attribute",begin:"\\b(src|z-index|word-wrap|word-spacing|word-break|width|widows|white-space|visibility|vertical-align|unicode-bidi|transition-timing-function|transition-property|transition-duration|transition-delay|transition|transform-style|transform-origin|transform|top|text-underline-position|text-transform|text-shadow|text-rendering|text-overflow|text-indent|text-decoration-style|text-decoration-line|text-decoration-color|text-decoration|text-align-last|text-align|tab-size|table-layout|right|resize|quotes|position|pointer-events|perspective-origin|perspective|page-break-inside|page-break-before|page-break-after|padding-top|padding-right|padding-left|padding-bottom|padding|overflow-y|overflow-x|overflow-wrap|overflow|outline-width|outline-style|outline-offset|outline-color|outline|orphans|order|opacity|object-position|object-fit|normal|none|nav-up|nav-right|nav-left|nav-index|nav-down|min-width|min-height|max-width|max-height|mask|marks|margin-top|margin-right|margin-left|margin-bottom|margin|list-style-type|list-style-position|list-style-image|list-style|line-height|letter-spacing|left|justify-content|initial|inherit|ime-mode|image-orientation|image-resolution|image-rendering|icon|hyphens|height|font-weight|font-variant-ligatures|font-variant|font-style|font-stretch|font-size-adjust|font-size|font-language-override|font-kerning|font-feature-settings|font-family|font|float|flex-wrap|flex-shrink|flex-grow|flex-flow|flex-direction|flex-basis|flex|filter|empty-cells|display|direction|cursor|counter-reset|counter-increment|content|column-width|column-span|column-rule-width|column-rule-style|column-rule-color|column-rule|column-gap|column-fill|column-count|columns|color|clip-path|clip|clear|caption-side|break-inside|break-before|break-after|box-sizing|box-shadow|box-decoration-break|bottom|border-width|border-top-width|border-top-style|border-top-right-radius|border-top-left-radius|border-top-color|border-top|border-style|border-spacing|border-right-width|border-right-style|border-right-color|border-right|border-radius|border-left-width|border-left-style|border-left-color|border-left|border-image-width|border-image-source|border-image-slice|border-image-repeat|border-image-outset|border-image|border-color|border-collapse|border-bottom-width|border-bottom-style|border-bottom-right-radius|border-bottom-left-radius|border-bottom-color|border-bottom|border|background-size|background-repeat|background-position|background-origin|background-image|background-color|background-clip|background-attachment|background-blend-mode|background|backface-visibility|auto|animation-timing-function|animation-play-state|animation-name|animation-iteration-count|animation-fill-mode|animation-duration|animation-direction|animation-delay|animation|align-self|align-items|align-content)\\b",illegal:"[^\\s]"},{begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b"},{begin:":",end:";",contains:[t,i,e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,{className:"meta",begin:"!important"}]},{begin:"@(page|font-face)",lexemes:"@[a-z-]+",keywords:"@page @font-face"},{begin:"@",end:"[{;]",returnBegin:!0,keywords:"and or not only",contains:[{begin:"@[a-z-]+",className:"keyword"},t,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,i,e.CSS_NUMBER_MODE]}]}}}()); +hljs.registerLanguage("shell",function(){"use strict";return function(s){return{name:"Shell Session",aliases:["console"],contains:[{className:"meta",begin:"^\\s{0,3}[/\\w\\d\\[\\]()@-]*[>%$#]",starts:{end:"$",subLanguage:"bash"}}]}}}()); +hljs.registerLanguage("sql",function(){"use strict";return function(e){var t=e.COMMENT("--","$");return{name:"SQL",case_insensitive:!0,illegal:/[<>{}*]/,contains:[{beginKeywords:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke comment values with",end:/;/,endsWithParent:!0,keywords:{$pattern:/[\w\.]+/,keyword:"as abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias all allocate allow alter always analyze ancillary and anti any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound bucket buffer_cache buffer_pool build bulk by byte byteordermark bytes cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain explode export export_set extended extent external external_1 external_2 externally extract failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force foreign form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour hours http id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists keep keep_duplicates key keys kill language large last last_day last_insert_id last_value lateral lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minutes minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notnull notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second seconds section securefile security seed segment select self semi sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime table tables tablespace tablesample tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unnest unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace window with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek",literal:"true false null unknown",built_in:"array bigint binary bit blob bool boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text time timestamp tinyint varchar varchar2 varying void"},contains:[{className:"string",begin:"'",end:"'",contains:[{begin:"''"}]},{className:"string",begin:'"',end:'"',contains:[{begin:'""'}]},{className:"string",begin:"`",end:"`"},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,t,e.HASH_COMMENT_MODE]},e.C_BLOCK_COMMENT_MODE,t,e.HASH_COMMENT_MODE]}}}()); +hljs.registerLanguage("swift",function(){"use strict";return function(e){var i={keyword:"#available #colorLiteral #column #else #elseif #endif #file #fileLiteral #function #if #imageLiteral #line #selector #sourceLocation _ __COLUMN__ __FILE__ __FUNCTION__ __LINE__ Any as as! as? associatedtype associativity break case catch class continue convenience default defer deinit didSet do dynamic dynamicType else enum extension fallthrough false fileprivate final for func get guard if import in indirect infix init inout internal is lazy left let mutating nil none nonmutating open operator optional override postfix precedence prefix private protocol Protocol public repeat required rethrows return right self Self set static struct subscript super switch throw throws true try try! try? Type typealias unowned var weak where while willSet",literal:"true false nil",built_in:"abs advance alignof alignofValue anyGenerator assert assertionFailure bridgeFromObjectiveC bridgeFromObjectiveCUnconditional bridgeToObjectiveC bridgeToObjectiveCUnconditional c compactMap contains count countElements countLeadingZeros debugPrint debugPrintln distance dropFirst dropLast dump encodeBitsAsWords enumerate equal fatalError filter find getBridgedObjectiveCType getVaList indices insertionSort isBridgedToObjectiveC isBridgedVerbatimToObjectiveC isUniquelyReferenced isUniquelyReferencedNonObjC join lazy lexicographicalCompare map max maxElement min minElement numericCast overlaps partition posix precondition preconditionFailure print println quickSort readLine reduce reflect reinterpretCast reverse roundUpToAlignment sizeof sizeofValue sort split startsWith stride strideof strideofValue swap toString transcode underestimateCount unsafeAddressOf unsafeBitCast unsafeDowncast unsafeUnwrap unsafeReflect withExtendedLifetime withObjectAtPlusZero withUnsafePointer withUnsafePointerToObject withUnsafeMutablePointer withUnsafeMutablePointers withUnsafePointer withUnsafePointers withVaList zip"},n=e.COMMENT("/\\*","\\*/",{contains:["self"]}),t={className:"subst",begin:/\\\(/,end:"\\)",keywords:i,contains:[]},a={className:"string",contains:[e.BACKSLASH_ESCAPE,t],variants:[{begin:/"""/,end:/"""/},{begin:/"/,end:/"/}]},r={className:"number",begin:"\\b([\\d_]+(\\.[\\deE_]+)?|0x[a-fA-F0-9_]+(\\.[a-fA-F0-9p_]+)?|0b[01_]+|0o[0-7_]+)\\b",relevance:0};return t.contains=[r],{name:"Swift",keywords:i,contains:[a,e.C_LINE_COMMENT_MODE,n,{className:"type",begin:"\\b[A-Z][\\wÀ-ʸ']*[!?]"},{className:"type",begin:"\\b[A-Z][\\wÀ-ʸ']*",relevance:0},r,{className:"function",beginKeywords:"func",end:"{",excludeEnd:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/[A-Za-z$_][0-9A-Za-z$_]*/}),{begin://},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:i,contains:["self",r,a,e.C_BLOCK_COMMENT_MODE,{begin:":"}],illegal:/["']/}],illegal:/\[|%/},{className:"class",beginKeywords:"struct protocol class extension enum",keywords:i,end:"\\{",excludeEnd:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/})]},{className:"meta",begin:"(@discardableResult|@warn_unused_result|@exported|@lazy|@noescape|@NSCopying|@NSManaged|@objc|@objcMembers|@convention|@required|@noreturn|@IBAction|@IBDesignable|@IBInspectable|@IBOutlet|@infix|@prefix|@postfix|@autoclosure|@testable|@available|@nonobjc|@NSApplicationMain|@UIApplicationMain|@dynamicMemberLookup|@propertyWrapper)\\b"},{beginKeywords:"import",end:/$/,contains:[e.C_LINE_COMMENT_MODE,n]}]}}}()); +hljs.registerLanguage("typescript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);return function(r){var t={$pattern:"[A-Za-z$_][0-9A-Za-z$_]*",keyword:e.concat(["type","namespace","typedef","interface","public","private","protected","implements","declare","abstract","readonly"]).join(" "),literal:n.join(" "),built_in:a.concat(["any","void","number","boolean","string","object","never","enum"]).join(" ")},s={className:"meta",begin:"@[A-Za-z$_][0-9A-Za-z$_]*"},i={className:"number",variants:[{begin:"\\b(0[bB][01]+)n?"},{begin:"\\b(0[oO][0-7]+)n?"},{begin:r.C_NUMBER_RE+"n?"}],relevance:0},o={className:"subst",begin:"\\$\\{",end:"\\}",keywords:t,contains:[]},c={begin:"html`",end:"",starts:{end:"`",returnEnd:!1,contains:[r.BACKSLASH_ESCAPE,o],subLanguage:"xml"}},l={begin:"css`",end:"",starts:{end:"`",returnEnd:!1,contains:[r.BACKSLASH_ESCAPE,o],subLanguage:"css"}},E={className:"string",begin:"`",end:"`",contains:[r.BACKSLASH_ESCAPE,o]};o.contains=[r.APOS_STRING_MODE,r.QUOTE_STRING_MODE,c,l,E,i,r.REGEXP_MODE];var d={begin:"\\(",end:/\)/,keywords:t,contains:["self",r.QUOTE_STRING_MODE,r.APOS_STRING_MODE,r.NUMBER_MODE]},u={className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:t,contains:[r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,s,d]};return{name:"TypeScript",aliases:["ts"],keywords:t,contains:[r.SHEBANG(),{className:"meta",begin:/^\s*['"]use strict['"]/},r.APOS_STRING_MODE,r.QUOTE_STRING_MODE,c,l,E,r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,i,{begin:"("+r.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",contains:[r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,r.REGEXP_MODE,{className:"function",begin:"(\\([^(]*(\\([^(]*(\\([^(]*\\))?\\))?\\)|"+r.UNDERSCORE_IDENT_RE+")\\s*=>",returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:r.UNDERSCORE_IDENT_RE},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:t,contains:d.contains}]}]}],relevance:0},{className:"function",beginKeywords:"function",end:/[\{;]/,excludeEnd:!0,keywords:t,contains:["self",r.inherit(r.TITLE_MODE,{begin:"[A-Za-z$_][0-9A-Za-z$_]*"}),u],illegal:/%/,relevance:0},{beginKeywords:"constructor",end:/[\{;]/,excludeEnd:!0,contains:["self",u]},{begin:/module\./,keywords:{built_in:"module"},relevance:0},{beginKeywords:"module",end:/\{/,excludeEnd:!0},{beginKeywords:"interface",end:/\{/,excludeEnd:!0,keywords:"interface extends"},{begin:/\$[(.]/},{begin:"\\."+r.IDENT_RE,relevance:0},s,d]}}}()); +hljs.registerLanguage("yaml",function(){"use strict";return function(e){var n="true false yes no null",a="[\\w#;/?:@&=+$,.~*\\'()[\\]]+",s={className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/\S+/}],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable",variants:[{begin:"{{",end:"}}"},{begin:"%{",end:"}"}]}]},i=e.inherit(s,{variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/}]}),l={end:",",endsWithParent:!0,excludeEnd:!0,contains:[],keywords:n,relevance:0},t={begin:"{",end:"}",contains:[l],illegal:"\\n",relevance:0},g={begin:"\\[",end:"\\]",contains:[l],illegal:"\\n",relevance:0},b=[{className:"attr",variants:[{begin:"\\w[\\w :\\/.-]*:(?=[ \t]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ \t]|$)'},{begin:"'\\w[\\w :\\/.-]*':(?=[ \t]|$)"}]},{className:"meta",begin:"^---s*$",relevance:10},{className:"string",begin:"[\\|>]([0-9]?[+-])?[ ]*\\n( *)[\\S ]+\\n(\\2[\\S ]+\\n?)*"},{begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:"!\\w+!"+a},{className:"type",begin:"!<"+a+">"},{className:"type",begin:"!"+a},{className:"type",begin:"!!"+a},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta",begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"\\-(?=[ ]|$)",relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{className:"number",begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b"},{className:"number",begin:e.C_NUMBER_RE+"\\b"},t,g,s],c=[...b];return c.pop(),c.push(i),l.contains=c,{name:"YAML",case_insensitive:!0,aliases:["yml","YAML"],contains:b}}}()); +hljs.registerLanguage("armasm",function(){"use strict";return function(s){const e={variants:[s.COMMENT("^[ \\t]*(?=#)","$",{relevance:0,excludeBegin:!0}),s.COMMENT("[;@]","$",{relevance:0}),s.C_LINE_COMMENT_MODE,s.C_BLOCK_COMMENT_MODE]};return{name:"ARM Assembly",case_insensitive:!0,aliases:["arm"],keywords:{$pattern:"\\.?"+s.IDENT_RE,meta:".2byte .4byte .align .ascii .asciz .balign .byte .code .data .else .end .endif .endm .endr .equ .err .exitm .extern .global .hword .if .ifdef .ifndef .include .irp .long .macro .rept .req .section .set .skip .space .text .word .arm .thumb .code16 .code32 .force_thumb .thumb_func .ltorg ALIAS ALIGN ARM AREA ASSERT ATTR CN CODE CODE16 CODE32 COMMON CP DATA DCB DCD DCDU DCDO DCFD DCFDU DCI DCQ DCQU DCW DCWU DN ELIF ELSE END ENDFUNC ENDIF ENDP ENTRY EQU EXPORT EXPORTAS EXTERN FIELD FILL FUNCTION GBLA GBLL GBLS GET GLOBAL IF IMPORT INCBIN INCLUDE INFO KEEP LCLA LCLL LCLS LTORG MACRO MAP MEND MEXIT NOFP OPT PRESERVE8 PROC QN READONLY RELOC REQUIRE REQUIRE8 RLIST FN ROUT SETA SETL SETS SN SPACE SUBT THUMB THUMBX TTL WHILE WEND ",built_in:"r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 pc lr sp ip sl sb fp a1 a2 a3 a4 v1 v2 v3 v4 v5 v6 v7 v8 f0 f1 f2 f3 f4 f5 f6 f7 p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 cpsr_c cpsr_x cpsr_s cpsr_f cpsr_cx cpsr_cxs cpsr_xs cpsr_xsf cpsr_sf cpsr_cxsf spsr_c spsr_x spsr_s spsr_f spsr_cx spsr_cxs spsr_xs spsr_xsf spsr_sf spsr_cxsf s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 {PC} {VAR} {TRUE} {FALSE} {OPT} {CONFIG} {ENDIAN} {CODESIZE} {CPU} {FPU} {ARCHITECTURE} {PCSTOREOFFSET} {ARMASM_VERSION} {INTER} {ROPI} {RWPI} {SWST} {NOSWST} . @"},contains:[{className:"keyword",begin:"\\b(adc|(qd?|sh?|u[qh]?)?add(8|16)?|usada?8|(q|sh?|u[qh]?)?(as|sa)x|and|adrl?|sbc|rs[bc]|asr|b[lx]?|blx|bxj|cbn?z|tb[bh]|bic|bfc|bfi|[su]bfx|bkpt|cdp2?|clz|clrex|cmp|cmn|cpsi[ed]|cps|setend|dbg|dmb|dsb|eor|isb|it[te]{0,3}|lsl|lsr|ror|rrx|ldm(([id][ab])|f[ds])?|ldr((s|ex)?[bhd])?|movt?|mvn|mra|mar|mul|[us]mull|smul[bwt][bt]|smu[as]d|smmul|smmla|mla|umlaal|smlal?([wbt][bt]|d)|mls|smlsl?[ds]|smc|svc|sev|mia([bt]{2}|ph)?|mrr?c2?|mcrr2?|mrs|msr|orr|orn|pkh(tb|bt)|rbit|rev(16|sh)?|sel|[su]sat(16)?|nop|pop|push|rfe([id][ab])?|stm([id][ab])?|str(ex)?[bhd]?|(qd?)?sub|(sh?|q|u[qh]?)?sub(8|16)|[su]xt(a?h|a?b(16)?)|srs([id][ab])?|swpb?|swi|smi|tst|teq|wfe|wfi|yield)(eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|hs|lo)?[sptrx]?(?=\\s)"},e,s.QUOTE_STRING_MODE,{className:"string",begin:"'",end:"[^\\\\]'",relevance:0},{className:"title",begin:"\\|",end:"\\|",illegal:"\\n",relevance:0},{className:"number",variants:[{begin:"[#$=]?0x[0-9a-f]+"},{begin:"[#$=]?0b[01]+"},{begin:"[#$=]\\d+"},{begin:"\\b\\d+"}],relevance:0},{className:"symbol",variants:[{begin:"^[ \\t]*[a-z_\\.\\$][a-z0-9_\\.\\$]+:"},{begin:"^[a-z_\\.\\$][a-z0-9_\\.\\$]+"},{begin:"[=#]\\w+"}],relevance:0}]}}}()); +hljs.registerLanguage("d",function(){"use strict";return function(e){var a={$pattern:e.UNDERSCORE_IDENT_RE,keyword:"abstract alias align asm assert auto body break byte case cast catch class const continue debug default delete deprecated do else enum export extern final finally for foreach foreach_reverse|10 goto if immutable import in inout int interface invariant is lazy macro mixin module new nothrow out override package pragma private protected public pure ref return scope shared static struct super switch synchronized template this throw try typedef typeid typeof union unittest version void volatile while with __FILE__ __LINE__ __gshared|10 __thread __traits __DATE__ __EOF__ __TIME__ __TIMESTAMP__ __VENDOR__ __VERSION__",built_in:"bool cdouble cent cfloat char creal dchar delegate double dstring float function idouble ifloat ireal long real short string ubyte ucent uint ulong ushort wchar wstring",literal:"false null true"},d="((0|[1-9][\\d_]*)|0[bB][01_]+|0[xX]([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*))",n="\\\\(['\"\\?\\\\abfnrtv]|u[\\dA-Fa-f]{4}|[0-7]{1,3}|x[\\dA-Fa-f]{2}|U[\\dA-Fa-f]{8})|&[a-zA-Z\\d]{2,};",t={className:"number",begin:"\\b"+d+"(L|u|U|Lu|LU|uL|UL)?",relevance:0},_={className:"number",begin:"\\b(((0[xX](([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)\\.([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)|\\.?([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*))[pP][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d))|((0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)(\\.\\d*|([eE][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)))|\\d+\\.(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)|\\.(0|[1-9][\\d_]*)([eE][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d))?))([fF]|L|i|[fF]i|Li)?|"+d+"(i|[fF]i|Li))",relevance:0},r={className:"string",begin:"'("+n+"|.)",end:"'",illegal:"."},i={className:"string",begin:'"',contains:[{begin:n,relevance:0}],end:'"[cwd]?'},s=e.COMMENT("\\/\\+","\\+\\/",{contains:["self"],relevance:10});return{name:"D",keywords:a,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,s,{className:"string",begin:'x"[\\da-fA-F\\s\\n\\r]*"[cwd]?',relevance:10},i,{className:"string",begin:'[rq]"',end:'"[cwd]?',relevance:5},{className:"string",begin:"`",end:"`[cwd]?"},{className:"string",begin:'q"\\{',end:'\\}"'},_,t,r,{className:"meta",begin:"^#!",end:"$",relevance:5},{className:"meta",begin:"#(line)",end:"$",relevance:5},{className:"keyword",begin:"@[a-zA-Z_][a-zA-Z_\\d]*"}]}}}()); +hljs.registerLanguage("handlebars",function(){"use strict";function e(...e){return e.map(e=>(function(e){return e?"string"==typeof e?e:e.source:null})(e)).join("")}return function(n){const a={"builtin-name":"action bindattr collection component concat debugger each each-in get hash if in input link-to loc log lookup mut outlet partial query-params render template textarea unbound unless view with yield"},t=/\[.*?\]/,s=/[^\s!"#%&'()*+,.\/;<=>@\[\\\]^`{|}~]+/,i=e("(",/'.*?'/,"|",/".*?"/,"|",t,"|",s,"|",/\.|\//,")+"),r=e("(",t,"|",s,")(?==)"),l={begin:i,lexemes:/[\w.\/]+/},c=n.inherit(l,{keywords:{literal:"true false undefined null"}}),o={begin:/\(/,end:/\)/},m={className:"attr",begin:r,relevance:0,starts:{begin:/=/,end:/=/,starts:{contains:[n.NUMBER_MODE,n.QUOTE_STRING_MODE,n.APOS_STRING_MODE,c,o]}}},d={contains:[n.NUMBER_MODE,n.QUOTE_STRING_MODE,n.APOS_STRING_MODE,{begin:/as\s+\|/,keywords:{keyword:"as"},end:/\|/,contains:[{begin:/\w+/}]},m,c,o],returnEnd:!0},g=n.inherit(l,{className:"name",keywords:a,starts:n.inherit(d,{end:/\)/})});o.contains=[g];const u=n.inherit(l,{keywords:a,className:"name",starts:n.inherit(d,{end:/}}/})}),b=n.inherit(l,{keywords:a,className:"name"}),h=n.inherit(l,{className:"name",keywords:a,starts:n.inherit(d,{end:/}}/})});return{name:"Handlebars",aliases:["hbs","html.hbs","html.handlebars","htmlbars"],case_insensitive:!0,subLanguage:"xml",contains:[{begin:/\\\{\{/,skip:!0},{begin:/\\\\(?=\{\{)/,skip:!0},n.COMMENT(/\{\{!--/,/--\}\}/),n.COMMENT(/\{\{!/,/\}\}/),{className:"template-tag",begin:/\{\{\{\{(?!\/)/,end:/\}\}\}\}/,contains:[u],starts:{end:/\{\{\{\{\//,returnEnd:!0,subLanguage:"xml"}},{className:"template-tag",begin:/\{\{\{\{\//,end:/\}\}\}\}/,contains:[b]},{className:"template-tag",begin:/\{\{#/,end:/\}\}/,contains:[u]},{className:"template-tag",begin:/\{\{(?=else\}\})/,end:/\}\}/,keywords:"else"},{className:"template-tag",begin:/\{\{\//,end:/\}\}/,contains:[b]},{className:"template-variable",begin:/\{\{\{/,end:/\}\}\}/,contains:[h]},{className:"template-variable",begin:/\{\{/,end:/\}\}/,contains:[h]}]}}}()); +hljs.registerLanguage("haskell",function(){"use strict";return function(e){var n={variants:[e.COMMENT("--","$"),e.COMMENT("{-","-}",{contains:["self"]})]},i={className:"meta",begin:"{-#",end:"#-}"},a={className:"meta",begin:"^#",end:"$"},s={className:"type",begin:"\\b[A-Z][\\w']*",relevance:0},l={begin:"\\(",end:"\\)",illegal:'"',contains:[i,a,{className:"type",begin:"\\b[A-Z][\\w]*(\\((\\.\\.|,|\\w+)\\))?"},e.inherit(e.TITLE_MODE,{begin:"[_a-z][\\w']*"}),n]};return{name:"Haskell",aliases:["hs"],keywords:"let in if then else case of where do module import hiding qualified type data newtype deriving class instance as default infix infixl infixr foreign export ccall stdcall cplusplus jvm dotnet safe unsafe family forall mdo proc rec",contains:[{beginKeywords:"module",end:"where",keywords:"module where",contains:[l,n],illegal:"\\W\\.|;"},{begin:"\\bimport\\b",end:"$",keywords:"import qualified as hiding",contains:[l,n],illegal:"\\W\\.|;"},{className:"class",begin:"^(\\s*)?(class|instance)\\b",end:"where",keywords:"class family instance where",contains:[s,l,n]},{className:"class",begin:"\\b(data|(new)?type)\\b",end:"$",keywords:"data family type newtype deriving",contains:[i,s,l,{begin:"{",end:"}",contains:l.contains},n]},{beginKeywords:"default",end:"$",contains:[s,l,n]},{beginKeywords:"infix infixl infixr",end:"$",contains:[e.C_NUMBER_MODE,n]},{begin:"\\bforeign\\b",end:"$",keywords:"foreign import export ccall stdcall cplusplus jvm dotnet safe unsafe",contains:[s,e.QUOTE_STRING_MODE,n]},{className:"meta",begin:"#!\\/usr\\/bin\\/env runhaskell",end:"$"},i,a,e.QUOTE_STRING_MODE,e.C_NUMBER_MODE,s,e.inherit(e.TITLE_MODE,{begin:"^[_a-z][\\w']*"}),n,{begin:"->|<-"}]}}}()); +hljs.registerLanguage("julia",function(){"use strict";return function(e){var r="[A-Za-z_\\u00A1-\\uFFFF][A-Za-z_0-9\\u00A1-\\uFFFF]*",t={$pattern:r,keyword:"in isa where baremodule begin break catch ccall const continue do else elseif end export false finally for function global if import importall let local macro module quote return true try using while type immutable abstract bitstype typealias ",literal:"true false ARGS C_NULL DevNull ENDIAN_BOM ENV I Inf Inf16 Inf32 Inf64 InsertionSort JULIA_HOME LOAD_PATH MergeSort NaN NaN16 NaN32 NaN64 PROGRAM_FILE QuickSort RoundDown RoundFromZero RoundNearest RoundNearestTiesAway RoundNearestTiesUp RoundToZero RoundUp STDERR STDIN STDOUT VERSION catalan e|0 eu|0 eulergamma golden im nothing pi γ π φ ",built_in:"ANY AbstractArray AbstractChannel AbstractFloat AbstractMatrix AbstractRNG AbstractSerializer AbstractSet AbstractSparseArray AbstractSparseMatrix AbstractSparseVector AbstractString AbstractUnitRange AbstractVecOrMat AbstractVector Any ArgumentError Array AssertionError Associative Base64DecodePipe Base64EncodePipe Bidiagonal BigFloat BigInt BitArray BitMatrix BitVector Bool BoundsError BufferStream CachingPool CapturedException CartesianIndex CartesianRange Cchar Cdouble Cfloat Channel Char Cint Cintmax_t Clong Clonglong ClusterManager Cmd CodeInfo Colon Complex Complex128 Complex32 Complex64 CompositeException Condition ConjArray ConjMatrix ConjVector Cptrdiff_t Cshort Csize_t Cssize_t Cstring Cuchar Cuint Cuintmax_t Culong Culonglong Cushort Cwchar_t Cwstring DataType Date DateFormat DateTime DenseArray DenseMatrix DenseVecOrMat DenseVector Diagonal Dict DimensionMismatch Dims DirectIndexString Display DivideError DomainError EOFError EachLine Enum Enumerate ErrorException Exception ExponentialBackOff Expr Factorization FileMonitor Float16 Float32 Float64 Function Future GlobalRef GotoNode HTML Hermitian IO IOBuffer IOContext IOStream IPAddr IPv4 IPv6 IndexCartesian IndexLinear IndexStyle InexactError InitError Int Int128 Int16 Int32 Int64 Int8 IntSet Integer InterruptException InvalidStateException Irrational KeyError LabelNode LinSpace LineNumberNode LoadError LowerTriangular MIME Matrix MersenneTwister Method MethodError MethodTable Module NTuple NewvarNode NullException Nullable Number ObjectIdDict OrdinalRange OutOfMemoryError OverflowError Pair ParseError PartialQuickSort PermutedDimsArray Pipe PollingFileWatcher ProcessExitedException Ptr QuoteNode RandomDevice Range RangeIndex Rational RawFD ReadOnlyMemoryError Real ReentrantLock Ref Regex RegexMatch RemoteChannel RemoteException RevString RoundingMode RowVector SSAValue SegmentationFault SerializationState Set SharedArray SharedMatrix SharedVector Signed SimpleVector Slot SlotNumber SparseMatrixCSC SparseVector StackFrame StackOverflowError StackTrace StepRange StepRangeLen StridedArray StridedMatrix StridedVecOrMat StridedVector String SubArray SubString SymTridiagonal Symbol Symmetric SystemError TCPSocket Task Text TextDisplay Timer Tridiagonal Tuple Type TypeError TypeMapEntry TypeMapLevel TypeName TypeVar TypedSlot UDPSocket UInt UInt128 UInt16 UInt32 UInt64 UInt8 UndefRefError UndefVarError UnicodeError UniformScaling Union UnionAll UnitRange Unsigned UpperTriangular Val Vararg VecElement VecOrMat Vector VersionNumber Void WeakKeyDict WeakRef WorkerConfig WorkerPool "},a={keywords:t,illegal:/<\//},n={className:"subst",begin:/\$\(/,end:/\)/,keywords:t},o={className:"variable",begin:"\\$"+r},i={className:"string",contains:[e.BACKSLASH_ESCAPE,n,o],variants:[{begin:/\w*"""/,end:/"""\w*/,relevance:10},{begin:/\w*"/,end:/"\w*/}]},l={className:"string",contains:[e.BACKSLASH_ESCAPE,n,o],begin:"`",end:"`"},s={className:"meta",begin:"@"+r};return a.name="Julia",a.contains=[{className:"number",begin:/(\b0x[\d_]*(\.[\d_]*)?|0x\.\d[\d_]*)p[-+]?\d+|\b0[box][a-fA-F0-9][a-fA-F0-9_]*|(\b\d[\d_]*(\.[\d_]*)?|\.\d[\d_]*)([eEfF][-+]?\d+)?/,relevance:0},{className:"string",begin:/'(.|\\[xXuU][a-zA-Z0-9]+)'/},i,l,s,{className:"comment",variants:[{begin:"#=",end:"=#",relevance:10},{begin:"#",end:"$"}]},e.HASH_COMMENT_MODE,{className:"keyword",begin:"\\b(((abstract|primitive)\\s+)type|(mutable\\s+)?struct)\\b"},{begin:/<:/}],n.contains=a.contains,a}}()); +hljs.registerLanguage("nim",function(){"use strict";return function(e){return{name:"Nim",aliases:["nim"],keywords:{keyword:"addr and as asm bind block break case cast const continue converter discard distinct div do elif else end enum except export finally for from func generic if import in include interface is isnot iterator let macro method mixin mod nil not notin object of or out proc ptr raise ref return shl shr static template try tuple type using var when while with without xor yield",literal:"shared guarded stdin stdout stderr result true false",built_in:"int int8 int16 int32 int64 uint uint8 uint16 uint32 uint64 float float32 float64 bool char string cstring pointer expr stmt void auto any range array openarray varargs seq set clong culong cchar cschar cshort cint csize clonglong cfloat cdouble clongdouble cuchar cushort cuint culonglong cstringarray semistatic"},contains:[{className:"meta",begin:/{\./,end:/\.}/,relevance:10},{className:"string",begin:/[a-zA-Z]\w*"/,end:/"/,contains:[{begin:/""/}]},{className:"string",begin:/([a-zA-Z]\w*)?"""/,end:/"""/},e.QUOTE_STRING_MODE,{className:"type",begin:/\b[A-Z]\w+\b/,relevance:0},{className:"number",relevance:0,variants:[{begin:/\b(0[xX][0-9a-fA-F][_0-9a-fA-F]*)('?[iIuU](8|16|32|64))?/},{begin:/\b(0o[0-7][_0-7]*)('?[iIuUfF](8|16|32|64))?/},{begin:/\b(0(b|B)[01][_01]*)('?[iIuUfF](8|16|32|64))?/},{begin:/\b(\d[_\d]*)('?[iIuUfF](8|16|32|64))?/}]},e.HASH_COMMENT_MODE]}}}()); +hljs.registerLanguage("r",function(){"use strict";return function(e){var n="([a-zA-Z]|\\.[a-zA-Z.])[a-zA-Z0-9._]*";return{name:"R",contains:[e.HASH_COMMENT_MODE,{begin:n,keywords:{$pattern:n,keyword:"function if in break next repeat else for return switch while try tryCatch stop warning require library attach detach source setMethod setGeneric setGroupGeneric setClass ...",literal:"NULL NA TRUE FALSE T F Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10"},relevance:0},{className:"number",begin:"0[xX][0-9a-fA-F]+[Li]?\\b",relevance:0},{className:"number",begin:"\\d+(?:[eE][+\\-]?\\d*)?L\\b",relevance:0},{className:"number",begin:"\\d+\\.(?!\\d)(?:i\\b)?",relevance:0},{className:"number",begin:"\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{className:"number",begin:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{begin:"`",end:"`",relevance:0},{className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:'"',end:'"'},{begin:"'",end:"'"}]}]}}}()); +hljs.registerLanguage("scala",function(){"use strict";return function(e){var n={className:"subst",variants:[{begin:"\\$[A-Za-z0-9_]+"},{begin:"\\${",end:"}"}]},a={className:"string",variants:[{begin:'"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:'"""',end:'"""',relevance:10},{begin:'[a-z]+"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE,n]},{className:"string",begin:'[a-z]+"""',end:'"""',contains:[n],relevance:10}]},s={className:"type",begin:"\\b[A-Z][A-Za-z0-9_]*",relevance:0},t={className:"title",begin:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,relevance:0},i={className:"class",beginKeywords:"class object trait type",end:/[:={\[\n;]/,excludeEnd:!0,contains:[{beginKeywords:"extends with",relevance:10},{begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},t]},l={className:"function",beginKeywords:"def",end:/[:={\[(\n;]/,excludeEnd:!0,contains:[t]};return{name:"Scala",keywords:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,{className:"symbol",begin:"'\\w[\\w\\d_]*(?!')"},s,l,i,e.C_NUMBER_MODE,{className:"meta",begin:"@[A-Za-z]+"}]}}}()); +hljs.registerLanguage("x86asm",function(){"use strict";return function(s){return{name:"Intel x86 Assembly",case_insensitive:!0,keywords:{$pattern:"[.%]?"+s.IDENT_RE,keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},contains:[s.COMMENT(";","$",{relevance:0}),{className:"number",variants:[{begin:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",relevance:0},{begin:"\\$[0-9][0-9A-Fa-f]*",relevance:0},{begin:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{begin:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QUOTE_STRING_MODE,{className:"string",variants:[{begin:"'",end:"[^\\\\]'"},{begin:"`",end:"[^\\\\]`"}],relevance:0},{className:"symbol",variants:[{begin:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{begin:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],relevance:0},{className:"subst",begin:"%[0-9]+",relevance:0},{className:"subst",begin:"%!S+",relevance:0},{className:"meta",begin:/^\s*\.[\w_-]+/}]}}}()); \ No newline at end of file From f5ff9e32ca4bd45781426246e4fb6b9fcceff0c2 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 15 Nov 2023 09:38:48 +0100 Subject: [PATCH 087/146] introduce asyncraises in transports/asyncsync (#470) With these fixes, `transports`/`asyncsync` correctly propagate and document their raises information - generally, most transport functions (send etc) raise `TransportError` and `CancelledError` - `closeWait` is special in that it generally doesn't fail. This PR introduces the syntax `Future[void].Raises([types])` to create the `InternalRaisesFuture` type with the correct encoding for the types - this allows it to be used in user code while retaining the possibility to change the internal representation down the line. * introduce raising constraints on stream callbacks - these constraints now give a warning when called with a callback that can raise exceptions (raising callbacks would crash * fix fail and its tests, which wasn't always given a good generic match * work around nim bugs related to macro expansion of generic types * make sure transports raise only `TransportError`-derived exceptions (and `CancelledError`) --- chronos/asyncsync.nim | 126 ++--- chronos/internal/asyncfutures.nim | 56 +- chronos/internal/asyncmacro.nim | 17 +- chronos/internal/raisesfutures.nim | 82 ++- chronos/transports/common.nim | 6 +- chronos/transports/datagram.nim | 238 ++++++-- chronos/transports/stream.nim | 192 ++++--- tests/testasyncstream.nim | 835 ++++++++++++++++------------- tests/testbugs.nim | 23 +- tests/testdatagram.nim | 521 +++++++++--------- tests/testmacro.nim | 19 +- tests/testserver.nim | 43 +- tests/teststream.nim | 527 ++++++++++-------- 13 files changed, 1578 insertions(+), 1107 deletions(-) diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index fa23471a5..9bab1fd68 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -28,7 +28,7 @@ type ## is blocked in ``acquire()`` is being processed. locked: bool acquired: bool - waiters: seq[Future[void]] + waiters: seq[Future[void].Raising([CancelledError])] AsyncEvent* = ref object of RootRef ## A primitive event object. @@ -41,7 +41,7 @@ type ## state to be signaled, when event get fired, then all coroutines ## continue proceeds in order, they have entered waiting state. flag: bool - waiters: seq[Future[void]] + waiters: seq[Future[void].Raising([CancelledError])] AsyncQueue*[T] = ref object of RootRef ## A queue, useful for coordinating producer and consumer coroutines. @@ -50,8 +50,8 @@ type ## infinite. If it is an integer greater than ``0``, then "await put()" ## will block when the queue reaches ``maxsize``, until an item is ## removed by "await get()". - getters: seq[Future[void]] - putters: seq[Future[void]] + getters: seq[Future[void].Raising([CancelledError])] + putters: seq[Future[void].Raising([CancelledError])] queue: Deque[T] maxsize: int @@ -69,7 +69,7 @@ type EventQueueReader* = object key: EventQueueKey offset: int - waiter: Future[void] + waiter: Future[void].Raising([CancelledError]) overflow: bool AsyncEventQueue*[T] = ref object of RootObj @@ -90,17 +90,14 @@ proc newAsyncLock*(): AsyncLock = ## The ``release()`` procedure changes the state to unlocked and returns ## immediately. - # Workaround for callSoon() not worked correctly before - # getThreadDispatcher() call. - discard getThreadDispatcher() - AsyncLock(waiters: newSeq[Future[void]](), locked: false, acquired: false) + AsyncLock() proc wakeUpFirst(lock: AsyncLock): bool {.inline.} = ## Wake up the first waiter if it isn't done. var i = 0 var res = false while i < len(lock.waiters): - var waiter = lock.waiters[i] + let waiter = lock.waiters[i] inc(i) if not(waiter.finished()): waiter.complete() @@ -120,7 +117,7 @@ proc checkAll(lock: AsyncLock): bool {.inline.} = return false return true -proc acquire*(lock: AsyncLock) {.async.} = +proc acquire*(lock: AsyncLock) {.async: (raises: [CancelledError]).} = ## Acquire a lock ``lock``. ## ## This procedure blocks until the lock ``lock`` is unlocked, then sets it @@ -129,7 +126,7 @@ proc acquire*(lock: AsyncLock) {.async.} = lock.acquired = true lock.locked = true else: - var w = newFuture[void]("AsyncLock.acquire") + let w = Future[void].Raising([CancelledError]).init("AsyncLock.acquire") lock.waiters.add(w) await w lock.acquired = true @@ -165,13 +162,10 @@ proc newAsyncEvent*(): AsyncEvent = ## procedure and reset to `false` with the `clear()` procedure. ## The `wait()` procedure blocks until the flag is `true`. The flag is ## initially `false`. + AsyncEvent() - # Workaround for callSoon() not worked correctly before - # getThreadDispatcher() call. - discard getThreadDispatcher() - AsyncEvent(waiters: newSeq[Future[void]](), flag: false) - -proc wait*(event: AsyncEvent): Future[void] = +proc wait*(event: AsyncEvent): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Block until the internal flag of ``event`` is `true`. ## If the internal flag is `true` on entry, return immediately. Otherwise, ## block until another task calls `fire()` to set the flag to `true`, @@ -210,20 +204,15 @@ proc isSet*(event: AsyncEvent): bool = proc newAsyncQueue*[T](maxsize: int = 0): AsyncQueue[T] = ## Creates a new asynchronous queue ``AsyncQueue``. - # Workaround for callSoon() not worked correctly before - # getThreadDispatcher() call. - discard getThreadDispatcher() AsyncQueue[T]( - getters: newSeq[Future[void]](), - putters: newSeq[Future[void]](), queue: initDeque[T](), maxsize: maxsize ) -proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} = +proc wakeupNext(waiters: var seq) {.inline.} = var i = 0 while i < len(waiters): - var waiter = waiters[i] + let waiter = waiters[i] inc(i) if not(waiter.finished()): @@ -250,6 +239,24 @@ proc empty*[T](aq: AsyncQueue[T]): bool {.inline.} = ## Return ``true`` if the queue is empty, ``false`` otherwise. (len(aq.queue) == 0) +proc addFirstImpl[T](aq: AsyncQueue[T], item: T) = + aq.queue.addFirst(item) + aq.getters.wakeupNext() + +proc addLastImpl[T](aq: AsyncQueue[T], item: T) = + aq.queue.addLast(item) + aq.getters.wakeupNext() + +proc popFirstImpl[T](aq: AsyncQueue[T]): T = + let res = aq.queue.popFirst() + aq.putters.wakeupNext() + res + +proc popLastImpl[T](aq: AsyncQueue[T]): T = + let res = aq.queue.popLast() + aq.putters.wakeupNext() + res + proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {. raises: [AsyncQueueFullError].}= ## Put an item ``item`` to the beginning of the queue ``aq`` immediately. @@ -257,8 +264,7 @@ proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {. ## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised. if aq.full(): raise newException(AsyncQueueFullError, "AsyncQueue is full!") - aq.queue.addFirst(item) - aq.getters.wakeupNext() + aq.addFirstImpl(item) proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {. raises: [AsyncQueueFullError].}= @@ -267,8 +273,7 @@ proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {. ## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised. if aq.full(): raise newException(AsyncQueueFullError, "AsyncQueue is full!") - aq.queue.addLast(item) - aq.getters.wakeupNext() + aq.addLastImpl(item) proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {. raises: [AsyncQueueEmptyError].} = @@ -277,9 +282,7 @@ proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {. ## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised. if aq.empty(): raise newException(AsyncQueueEmptyError, "AsyncQueue is empty!") - let res = aq.queue.popFirst() - aq.putters.wakeupNext() - res + aq.popFirstImpl() proc popLastNoWait*[T](aq: AsyncQueue[T]): T {. raises: [AsyncQueueEmptyError].} = @@ -288,65 +291,63 @@ proc popLastNoWait*[T](aq: AsyncQueue[T]): T {. ## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised. if aq.empty(): raise newException(AsyncQueueEmptyError, "AsyncQueue is empty!") - let res = aq.queue.popLast() - aq.putters.wakeupNext() - res + aq.popLastImpl() -proc addFirst*[T](aq: AsyncQueue[T], item: T) {.async.} = +proc addFirst*[T](aq: AsyncQueue[T], item: T) {.async: (raises: [CancelledError]).} = ## Put an ``item`` to the beginning of the queue ``aq``. If the queue is full, ## wait until a free slot is available before adding item. while aq.full(): - var putter = newFuture[void]("AsyncQueue.addFirst") + let putter = Future[void].Raising([CancelledError]).init("AsyncQueue.addFirst") aq.putters.add(putter) try: await putter - except CatchableError as exc: + except CancelledError as exc: if not(aq.full()) and not(putter.cancelled()): aq.putters.wakeupNext() raise exc - aq.addFirstNoWait(item) + aq.addFirstImpl(item) -proc addLast*[T](aq: AsyncQueue[T], item: T) {.async.} = +proc addLast*[T](aq: AsyncQueue[T], item: T) {.async: (raises: [CancelledError]).} = ## Put an ``item`` to the end of the queue ``aq``. If the queue is full, ## wait until a free slot is available before adding item. while aq.full(): - var putter = newFuture[void]("AsyncQueue.addLast") + let putter = Future[void].Raising([CancelledError]).init("AsyncQueue.addLast") aq.putters.add(putter) try: await putter - except CatchableError as exc: + except CancelledError as exc: if not(aq.full()) and not(putter.cancelled()): aq.putters.wakeupNext() raise exc - aq.addLastNoWait(item) + aq.addLastImpl(item) -proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {.async.} = +proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {.async: (raises: [CancelledError]).} = ## Remove and return an ``item`` from the beginning of the queue ``aq``. ## If the queue is empty, wait until an item is available. while aq.empty(): - var getter = newFuture[void]("AsyncQueue.popFirst") + let getter = Future[void].Raising([CancelledError]).init("AsyncQueue.popFirst") aq.getters.add(getter) try: await getter - except CatchableError as exc: + except CancelledError as exc: if not(aq.empty()) and not(getter.cancelled()): aq.getters.wakeupNext() raise exc - return aq.popFirstNoWait() + aq.popFirstImpl() -proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async.} = +proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async: (raises: [CancelledError]).} = ## Remove and return an ``item`` from the end of the queue ``aq``. ## If the queue is empty, wait until an item is available. while aq.empty(): - var getter = newFuture[void]("AsyncQueue.popLast") + let getter = Future[void].Raising([CancelledError]).init("AsyncQueue.popLast") aq.getters.add(getter) try: await getter - except CatchableError as exc: + except CancelledError as exc: if not(aq.empty()) and not(getter.cancelled()): aq.getters.wakeupNext() raise exc - return aq.popLastNoWait() + aq.popLastImpl() proc putNoWait*[T](aq: AsyncQueue[T], item: T) {. raises: [AsyncQueueFullError].} = @@ -358,11 +359,13 @@ proc getNoWait*[T](aq: AsyncQueue[T]): T {. ## Alias of ``popFirstNoWait()``. aq.popFirstNoWait() -proc put*[T](aq: AsyncQueue[T], item: T): Future[void] {.inline.} = +proc put*[T](aq: AsyncQueue[T], item: T): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Alias of ``addLast()``. aq.addLast(item) -proc get*[T](aq: AsyncQueue[T]): Future[T] {.inline.} = +proc get*[T](aq: AsyncQueue[T]): Future[T] {. + async: (raw: true, raises: [CancelledError]).} = ## Alias of ``popFirst()``. aq.popFirst() @@ -416,7 +419,7 @@ proc contains*[T](aq: AsyncQueue[T], item: T): bool {.inline.} = ## via the ``in`` operator. for e in aq.queue.items(): if e == item: return true - return false + false proc `$`*[T](aq: AsyncQueue[T]): string = ## Turn an async queue ``aq`` into its string representation. @@ -452,8 +455,7 @@ proc compact(ab: AsyncEventQueue) {.raises: [].} = else: ab.queue.clear() -proc getReaderIndex(ab: AsyncEventQueue, key: EventQueueKey): int {. - raises: [].} = +proc getReaderIndex(ab: AsyncEventQueue, key: EventQueueKey): int = for index, value in ab.readers.pairs(): if value.key == key: return index @@ -507,7 +509,7 @@ proc close*(ab: AsyncEventQueue) {.raises: [].} = ab.readers.reset() ab.queue.clear() -proc closeWait*(ab: AsyncEventQueue): Future[void] {.raises: [].} = +proc closeWait*(ab: AsyncEventQueue): Future[void] {.async: (raw: true, raises: []).} = let retFuture = newFuture[void]("AsyncEventQueue.closeWait()", {FutureFlag.OwnCancelSchedule}) proc continuation(udata: pointer) {.gcsafe.} = @@ -528,7 +530,7 @@ template readerOverflow*(ab: AsyncEventQueue, reader: EventQueueReader): bool = ab.limit + (reader.offset - ab.offset) <= len(ab.queue) -proc emit*[T](ab: AsyncEventQueue[T], data: T) {.raises: [].} = +proc emit*[T](ab: AsyncEventQueue[T], data: T) = if len(ab.readers) > 0: # We enqueue `data` only if there active reader present. var changesPresent = false @@ -565,7 +567,8 @@ proc emit*[T](ab: AsyncEventQueue[T], data: T) {.raises: [].} = proc waitEvents*[T](ab: AsyncEventQueue[T], key: EventQueueKey, - eventsCount = -1): Future[seq[T]] {.async.} = + eventsCount = -1): Future[seq[T]] {. + async: (raises: [AsyncEventQueueFullError, CancelledError]).} = ## Wait for events var events: seq[T] @@ -595,7 +598,8 @@ proc waitEvents*[T](ab: AsyncEventQueue[T], doAssert(length >= ab.readers[index].offset) if length == ab.readers[index].offset: # We are at the end of queue, it means that we should wait for new events. - let waitFuture = newFuture[void]("AsyncEventQueue.waitEvents") + let waitFuture = Future[void].Raising([CancelledError]).init( + "AsyncEventQueue.waitEvents") ab.readers[index].waiter = waitFuture resetFuture = true await waitFuture @@ -626,4 +630,4 @@ proc waitEvents*[T](ab: AsyncEventQueue[T], if (eventsCount <= 0) or (len(events) == eventsCount): break - return events + events diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index c4a737477..a36ff4a91 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -16,7 +16,9 @@ import stew/base10 import ./[asyncengine, raisesfutures] import ../[config, futures] -export raisesfutures.InternalRaisesFuture +export + raisesfutures.Raising, raisesfutures.InternalRaisesFuture, + raisesfutures.init, raisesfutures.error, raisesfutures.readError when chronosStackTrace: import std/strutils @@ -109,7 +111,7 @@ template newInternalRaisesFuture*[T, E](fromProc: static[string] = ""): auto = ## that this future belongs to, is a good habit as it helps with debugging. newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc)) -template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] = +template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] {.deprecated.} = ## Create a new future which can hold/preserve GC sequence until future will ## not be completed. ## @@ -117,7 +119,7 @@ template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] = ## that this future belongs to, is a good habit as it helps with debugging. newFutureSeqImpl[A, B](getSrcLocation(fromProc)) -template newFutureStr*[T](fromProc: static[string] = ""): FutureStr[T] = +template newFutureStr*[T](fromProc: static[string] = ""): FutureStr[T] {.deprecated.} = ## Create a new future which can hold/preserve GC string until future will ## not be completed. ## @@ -205,7 +207,8 @@ template complete*(future: Future[void]) = ## Completes a void ``future``. complete(future, getSrcLocation()) -proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = +proc failImpl( + future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) future.internalError = error @@ -216,10 +219,16 @@ proc fail(future: FutureBase, error: ref CatchableError, loc: ptr SrcLoc) = getStackTrace(error) future.finish(FutureState.Failed) -template fail*( - future: FutureBase, error: ref CatchableError, warn: static bool = false) = +template fail*[T]( + future: Future[T], error: ref CatchableError, warn: static bool = false) = ## Completes ``future`` with ``error``. - fail(future, error, getSrcLocation()) + failImpl(future, error, getSrcLocation()) + +template fail*[T, E]( + future: InternalRaisesFuture[T, E], error: ref CatchableError, + warn: static bool = true) = + checkRaises(future, E, error, warn) + failImpl(future, error, getSrcLocation()) template newCancelledError(): ref CancelledError = (ref CancelledError)(msg: "Future operation cancelled!") @@ -377,8 +386,6 @@ proc futureContinue*(fut: FutureBase) {.raises: [], gcsafe.} = {.pop.} when chronosStackTrace: - import std/strutils - template getFilenameProcname(entry: StackTraceEntry): (string, string) = when compiles(entry.filenameStr) and compiles(entry.procnameStr): # We can't rely on "entry.filename" and "entry.procname" still being valid @@ -462,31 +469,36 @@ proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} = injectStacktrace(fut.internalError) raise fut.internalError -macro internalCheckComplete*(f: InternalRaisesFuture): untyped = +macro internalCheckComplete*(fut: InternalRaisesFuture, raises: typed) = # For InternalRaisesFuture[void, (ValueError, OSError), will do: # {.cast(raises: [ValueError, OSError]).}: # if isNil(f.error): discard # else: raise f.error - let e = getTypeInst(f)[2] - let types = getType(e) + # TODO https://github.com/nim-lang/Nim/issues/22937 + # we cannot `getTypeInst` on the `fut` - when aliases are involved, the + # generics are lost - so instead, we pass the raises list explicitly + let types = getRaisesTypes(raises) if isNoRaises(types): return quote do: - if not(isNil(`f`.internalError)): - raiseAssert("Unhandled future exception: " & `f`.error.msg) + if not(isNil(`fut`.internalError)): + # This would indicate a bug in which `error` was set via the non-raising + # base type + raiseAssert("Error set on a non-raising future: " & `fut`.internalError.msg) expectKind(types, nnkBracketExpr) expectKind(types[0], nnkSym) + assert types[0].strVal == "tuple" let ifRaise = nnkIfExpr.newTree( nnkElifExpr.newTree( - quote do: isNil(`f`.internalError), + quote do: isNil(`fut`.internalError), quote do: discard ), nnkElseExpr.newTree( - nnkRaiseStmt.newNimNode(lineInfoFrom=f).add( - quote do: (`f`.internalError) + nnkRaiseStmt.newNimNode(lineInfoFrom=fut).add( + quote do: (`fut`.internalError) ) ) ) @@ -1118,7 +1130,7 @@ proc one*[F: SomeFuture](futs: varargs[F]): Future[F] {. return retFuture proc race*(futs: varargs[FutureBase]): Future[FutureBase] {. - async: (raw: true, raises: [CancelledError]).} = + async: (raw: true, raises: [ValueError, CancelledError]).} = ## Returns a future which will complete and return completed FutureBase, ## when one of the futures in ``futs`` will be completed, failed or canceled. ## @@ -1488,12 +1500,6 @@ when defined(windows): {.pop.} # Automatically deduced raises from here onwards -template fail*[T, E]( - future: InternalRaisesFuture[T, E], error: ref CatchableError, - warn: static bool = true) = - checkRaises(future, error, warn) - fail(future, error, getSrcLocation()) - proc waitFor*[T, E](fut: InternalRaisesFuture[T, E]): T = # {.raises: [E]} ## **Blocks** the current thread until the specified future finishes and ## reads it, potentially raising an exception if the future failed or was @@ -1512,7 +1518,7 @@ proc read*[T: not void, E](future: InternalRaisesFuture[T, E]): lent T = # {.rai # TODO: Make a custom exception type for this? raise newException(ValueError, "Future still in progress.") - internalCheckComplete(future) + internalCheckComplete(future, E) future.internalValue proc read*[E](future: InternalRaisesFuture[void, E]) = # {.raises: [E, CancelledError].} diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index 11daf3363..88e11e395 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -497,7 +497,7 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = prc -template await*[T](f: Future[T]): untyped = +template await*[T](f: Future[T]): T = when declared(chronosInternalRetFuture): chronosInternalRetFuture.internalChild = f # `futureContinue` calls the iterator generated by the `async` @@ -512,6 +512,21 @@ template await*[T](f: Future[T]): untyped = else: unsupported "await is only available within {.async.}" +template await*[T, E](f: InternalRaisesFuture[T, E]): T = + when declared(chronosInternalRetFuture): + chronosInternalRetFuture.internalChild = f + # `futureContinue` calls the iterator generated by the `async` + # transformation - `yield` gives control back to `futureContinue` which is + # responsible for resuming execution once the yielded future is finished + yield chronosInternalRetFuture.internalChild + # `child` released by `futureContinue` + cast[type(f)](chronosInternalRetFuture.internalChild).internalCheckComplete(E) + + when T isnot void: + cast[type(f)](chronosInternalRetFuture.internalChild).value() + else: + unsupported "await is only available within {.async.}" + template awaitne*[T](f: Future[T]): Future[T] = when declared(chronosInternalRetFuture): chronosInternalRetFuture.internalChild = f diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index ad811f72b..79384d2ee 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -1,5 +1,5 @@ import - std/macros, + std/[macros, sequtils], ../futures type @@ -18,6 +18,45 @@ proc makeNoRaises*(): NimNode {.compileTime.} = ident"void" +macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped = + ## Given a Future type instance, return a type storing `{.raises.}` + ## information + ## + ## Note; this type may change in the future + E.expectKind(nnkBracket) + + let raises = if E.len == 0: + makeNoRaises() + else: + nnkTupleConstr.newTree(E.mapIt(it)) + nnkBracketExpr.newTree( + ident "InternalRaisesFuture", + nnkDotExpr.newTree(F, ident"T"), + raises + ) + +template init*[T, E]( + F: type InternalRaisesFuture[T, E], fromProc: static[string] = ""): F = + ## Creates a new pending future. + ## + ## Specifying ``fromProc``, which is a string specifying the name of the proc + ## that this future belongs to, is a good habit as it helps with debugging. + let res = F() + internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {}) + res + +template init*[T, E]( + F: type InternalRaisesFuture[T, E], fromProc: static[string] = "", + flags: static[FutureFlags]): F = + ## Creates a new pending future. + ## + ## Specifying ``fromProc``, which is a string specifying the name of the proc + ## that this future belongs to, is a good habit as it helps with debugging. + let res = F() + internalInitFutureBase( + res, getSrcLocation(fromProc), FutureState.Pending, flags) + res + proc isNoRaises*(n: NimNode): bool {.compileTime.} = n.eqIdent("void") @@ -78,21 +117,15 @@ macro union*(tup0: typedesc[tuple], tup1: typedesc[tuple]): typedesc = if result.len == 0: result = makeNoRaises() -proc getRaises*(future: NimNode): NimNode {.compileTime.} = - # Given InternalRaisesFuture[T, (A, B, C)], returns (A, B, C) - let types = getType(getTypeInst(future)[2]) - if isNoRaises(types): - nnkBracketExpr.newTree(newEmptyNode()) - else: - expectKind(types, nnkBracketExpr) - expectKind(types[0], nnkSym) - assert types[0].strVal == "tuple" - assert types.len >= 1 - - types +proc getRaisesTypes*(raises: NimNode): NimNode = + let typ = getType(raises) + case typ.typeKind + of ntyTypeDesc: typ[1] + else: typ macro checkRaises*[T: CatchableError]( - future: InternalRaisesFuture, error: ref T, warn: static bool = true): untyped = + future: InternalRaisesFuture, raises: typed, error: ref T, + warn: static bool = true): untyped = ## Generate code that checks that the given error is compatible with the ## raises restrictions of `future`. ## @@ -100,11 +133,18 @@ macro checkRaises*[T: CatchableError]( ## information available at compile time - in particular, if the raises ## inherit from `error`, we end up with the equivalent of a downcast which ## raises a Defect if it fails. - let raises = getRaises(future) + let + raises = getRaisesTypes(raises) expectKind(getTypeInst(error), nnkRefTy) let toMatch = getTypeInst(error)[0] + + if isNoRaises(raises): + error( + "`fail`: `" & repr(toMatch) & "` incompatible with `raises: []`", future) + return + var typeChecker = ident"false" maybeChecker = ident"false" @@ -134,3 +174,15 @@ macro checkRaises*[T: CatchableError]( else: `warning` assert(`runtimeChecker`, `errorMsg`) + +proc error*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. + raises: [].} = + static: + warning("No exceptions possible with this operation, `error` always returns nil") + nil + +proc readError*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. + raises: [ValueError].} = + static: + warning("No exceptions possible with this operation, `readError` always raises") + raise newException(ValueError, "No error in future.") diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index d8263af2e..24f9852b8 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -113,6 +113,8 @@ type ## Transport's capability not supported exception TransportUseClosedError* = object of TransportError ## Usage after transport close exception + TransportUseEofError* = object of TransportError + ## Usage after transport half-close exception TransportTooManyError* = object of TransportError ## Too many open file descriptors exception TransportAbortedError* = object of TransportError @@ -567,11 +569,11 @@ template checkClosed*(t: untyped, future: untyped) = template checkWriteEof*(t: untyped, future: untyped) = if (WriteEof in (t).state): - future.fail(newException(TransportError, + future.fail(newException(TransportUseEofError, "Transport connection is already dropped!")) return future -template getError*(t: untyped): ref CatchableError = +template getError*(t: untyped): ref TransportError = var err = (t).error (t).error = nil err diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index aec18ae32..30f872d5a 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -27,7 +27,10 @@ type DatagramCallback* = proc(transp: DatagramTransport, remote: TransportAddress): Future[void] {. - gcsafe, raises: [].} + async: (raises: []).} + + UnsafeDatagramCallback* = proc(transp: DatagramTransport, + remote: TransportAddress): Future[void] {.async.} DatagramTransport* = ref object of RootRef fd*: AsyncFD # File descriptor @@ -35,7 +38,7 @@ type flags: set[ServerFlags] # Flags buffer: seq[byte] # Reading buffer buflen: int # Reading buffer effective size - error: ref CatchableError # Current error + error: ref TransportError # Current error queue: Deque[GramVector] # Writer queue local: TransportAddress # Local address remote: TransportAddress # Remote address @@ -599,6 +602,41 @@ proc close*(transp: DatagramTransport) = transp.state.incl({WriteClosed, ReadClosed}) closeSocket(transp.fd, continuation) +proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback, + remote: TransportAddress, + local: TransportAddress, + sock: AsyncFD, + flags: set[ServerFlags], + udata: pointer, + child: DatagramTransport, + bufferSize: int, + ttl: int, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + ## Create new UDP datagram transport (IPv4). + ## + ## ``cbproc`` - callback which will be called, when new datagram received. + ## ``remote`` - bind transport to remote address (optional). + ## ``local`` - bind transport to local address (to serving incoming + ## datagrams, optional) + ## ``sock`` - application-driven socket to use. + ## ``flags`` - flags that will be applied to socket. + ## ``udata`` - custom argument which will be passed to ``cbproc``. + ## ``bufSize`` - size of internal buffer. + ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has + ## ``Broadcast`` option). + + proc wrap(transp: DatagramTransport, + remote: TransportAddress) {.async: (raises: []).} = + try: + cbproc(transp, remote) + except CatchableError as exc: + raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg + + newDatagramTransportCommon(wrap, remote, local, sock, flags, udata, child, + bufferSize, ttl, dualstack) + proc newDatagramTransport*(cbproc: DatagramCallback, remote: TransportAddress = AnyAddress, local: TransportAddress = AnyAddress, @@ -689,7 +727,102 @@ proc newDatagramTransport6*[T](cbproc: DatagramCallback, cast[pointer](udata), child, bufSize, ttl, dualstack) -proc join*(transp: DatagramTransport): Future[void] = +proc newDatagramTransport*(cbproc: UnsafeDatagramCallback, + remote: TransportAddress = AnyAddress, + local: TransportAddress = AnyAddress, + sock: AsyncFD = asyncInvalidSocket, + flags: set[ServerFlags] = {}, + udata: pointer = nil, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError], + deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} = + ## Create new UDP datagram transport (IPv4). + ## + ## ``cbproc`` - callback which will be called, when new datagram received. + ## ``remote`` - bind transport to remote address (optional). + ## ``local`` - bind transport to local address (to serving incoming + ## datagrams, optional) + ## ``sock`` - application-driven socket to use. + ## ``flags`` - flags that will be applied to socket. + ## ``udata`` - custom argument which will be passed to ``cbproc``. + ## ``bufSize`` - size of internal buffer. + ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has + ## ``Broadcast`` option). + newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child, + bufSize, ttl, dualstack) + +proc newDatagramTransport*[T](cbproc: UnsafeDatagramCallback, + udata: ref T, + remote: TransportAddress = AnyAddress, + local: TransportAddress = AnyAddress, + sock: AsyncFD = asyncInvalidSocket, + flags: set[ServerFlags] = {}, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError], + deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} = + var fflags = flags + {GCUserData} + GC_ref(udata) + newDatagramTransportCommon(cbproc, remote, local, sock, fflags, + cast[pointer](udata), child, bufSize, ttl, + dualstack) + +proc newDatagramTransport6*(cbproc: UnsafeDatagramCallback, + remote: TransportAddress = AnyAddress6, + local: TransportAddress = AnyAddress6, + sock: AsyncFD = asyncInvalidSocket, + flags: set[ServerFlags] = {}, + udata: pointer = nil, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError], + deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} = + ## Create new UDP datagram transport (IPv6). + ## + ## ``cbproc`` - callback which will be called, when new datagram received. + ## ``remote`` - bind transport to remote address (optional). + ## ``local`` - bind transport to local address (to serving incoming + ## datagrams, optional) + ## ``sock`` - application-driven socket to use. + ## ``flags`` - flags that will be applied to socket. + ## ``udata`` - custom argument which will be passed to ``cbproc``. + ## ``bufSize`` - size of internal buffer. + ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has + ## ``Broadcast`` option). + newDatagramTransportCommon(cbproc, remote, local, sock, flags, udata, child, + bufSize, ttl, dualstack) + +proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback, + udata: ref T, + remote: TransportAddress = AnyAddress6, + local: TransportAddress = AnyAddress6, + sock: AsyncFD = asyncInvalidSocket, + flags: set[ServerFlags] = {}, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError], + deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} = + var fflags = flags + {GCUserData} + GC_ref(udata) + newDatagramTransportCommon(cbproc, remote, local, sock, fflags, + cast[pointer](udata), child, bufSize, ttl, + dualstack) + +proc join*(transp: DatagramTransport): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Wait until the transport ``transp`` will be closed. var retFuture = newFuture[void]("datagram.transport.join") @@ -707,14 +840,15 @@ proc join*(transp: DatagramTransport): Future[void] = return retFuture -proc closeWait*(transp: DatagramTransport): Future[void] = +proc closeWait*(transp: DatagramTransport): Future[void] {. + async: (raw: true, raises: []).} = ## Close transport ``transp`` and release all resources. - const FutureName = "datagram.transport.closeWait" + let retFuture = newFuture[void]( + "datagram.transport.closeWait", {FutureFlag.OwnCancelSchedule}) if {ReadClosed, WriteClosed} * transp.state != {}: - return Future.completed(FutureName) - - let retFuture = newFuture[void](FutureName, {FutureFlag.OwnCancelSchedule}) + retFuture.complete() + return retFuture proc continuation(udata: pointer) {.gcsafe.} = retFuture.complete() @@ -733,7 +867,8 @@ proc closeWait*(transp: DatagramTransport): Future[void] = retFuture proc send*(transp: DatagramTransport, pbytes: pointer, - nbytes: int): Future[void] = + nbytes: int): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send buffer with pointer ``pbytes`` and size ``nbytes`` using transport ## ``transp`` to remote destination address which was bounded on transport. var retFuture = newFuture[void]("datagram.transport.send(pointer)") @@ -751,22 +886,21 @@ proc send*(transp: DatagramTransport, pbytes: pointer, return retFuture proc send*(transp: DatagramTransport, msg: sink string, - msglen = -1): Future[void] = + msglen = -1): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address which was bounded on transport. - var retFuture = newFutureStr[void]("datagram.transport.send(string)") + var retFuture = newFuture[void]("datagram.transport.send(string)") transp.checkClosed(retFuture) - when declared(shallowCopy): - if not(isLiteral(msg)): - shallowCopy(retFuture.gcholder, msg) - else: - retFuture.gcholder = msg - else: - retFuture.gcholder = msg + let length = if msglen <= 0: len(msg) else: msglen - let vector = GramVector(kind: WithoutAddress, buf: addr retFuture.gcholder[0], + var localCopy = msg + retFuture.addCallback(proc(_: pointer) = reset(localCopy)) + + let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0], buflen: length, - writer: cast[Future[void]](retFuture)) + writer: retFuture) + transp.queue.addLast(vector) if WritePaused in transp.state: let wres = transp.resumeWrite() @@ -775,22 +909,20 @@ proc send*(transp: DatagramTransport, msg: sink string, return retFuture proc send*[T](transp: DatagramTransport, msg: sink seq[T], - msglen = -1): Future[void] = + msglen = -1): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address which was bounded on transport. - var retFuture = newFutureSeq[void, T]("datagram.transport.send(seq)") + var retFuture = newFuture[void]("datagram.transport.send(seq)") transp.checkClosed(retFuture) - when declared(shallowCopy): - if not(isLiteral(msg)): - shallowCopy(retFuture.gcholder, msg) - else: - retFuture.gcholder = msg - else: - retFuture.gcholder = msg + let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - let vector = GramVector(kind: WithoutAddress, buf: addr retFuture.gcholder[0], + var localCopy = msg + retFuture.addCallback(proc(_: pointer) = reset(localCopy)) + + let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0], buflen: length, - writer: cast[Future[void]](retFuture)) + writer: retFuture) transp.queue.addLast(vector) if WritePaused in transp.state: let wres = transp.resumeWrite() @@ -799,7 +931,8 @@ proc send*[T](transp: DatagramTransport, msg: sink seq[T], return retFuture proc sendTo*(transp: DatagramTransport, remote: TransportAddress, - pbytes: pointer, nbytes: int): Future[void] = + pbytes: pointer, nbytes: int): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send buffer with pointer ``pbytes`` and size ``nbytes`` using transport ## ``transp`` to remote destination address ``remote``. var retFuture = newFuture[void]("datagram.transport.sendTo(pointer)") @@ -814,22 +947,20 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, return retFuture proc sendTo*(transp: DatagramTransport, remote: TransportAddress, - msg: sink string, msglen = -1): Future[void] = + msg: sink string, msglen = -1): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address ``remote``. - var retFuture = newFutureStr[void]("datagram.transport.sendTo(string)") + var retFuture = newFuture[void]("datagram.transport.sendTo(string)") transp.checkClosed(retFuture) - when declared(shallowCopy): - if not(isLiteral(msg)): - shallowCopy(retFuture.gcholder, msg) - else: - retFuture.gcholder = msg - else: - retFuture.gcholder = msg + let length = if msglen <= 0: len(msg) else: msglen - let vector = GramVector(kind: WithAddress, buf: addr retFuture.gcholder[0], + var localCopy = msg + retFuture.addCallback(proc(_: pointer) = reset(localCopy)) + + let vector = GramVector(kind: WithAddress, buf: addr localCopy[0], buflen: length, - writer: cast[Future[void]](retFuture), + writer: retFuture, address: remote) transp.queue.addLast(vector) if WritePaused in transp.state: @@ -839,20 +970,17 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, return retFuture proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, - msg: sink seq[T], msglen = -1): Future[void] = + msg: sink seq[T], msglen = -1): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send sequence ``msg`` using transport ``transp`` to remote destination ## address ``remote``. - var retFuture = newFutureSeq[void, T]("datagram.transport.sendTo(seq)") + var retFuture = newFuture[void]("datagram.transport.sendTo(seq)") transp.checkClosed(retFuture) - when declared(shallowCopy): - if not(isLiteral(msg)): - shallowCopy(retFuture.gcholder, msg) - else: - retFuture.gcholder = msg - else: - retFuture.gcholder = msg let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - let vector = GramVector(kind: WithAddress, buf: addr retFuture.gcholder[0], + var localCopy = msg + retFuture.addCallback(proc(_: pointer) = reset(localCopy)) + + let vector = GramVector(kind: WithAddress, buf: addr localCopy[0], buflen: length, writer: cast[Future[void]](retFuture), address: remote) @@ -864,7 +992,7 @@ proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, return retFuture proc peekMessage*(transp: DatagramTransport, msg: var seq[byte], - msglen: var int) {.raises: [CatchableError].} = + msglen: var int) {.raises: [TransportError].} = ## Get access to internal message buffer and length of incoming datagram. if ReadError in transp.state: transp.state.excl(ReadError) @@ -876,7 +1004,7 @@ proc peekMessage*(transp: DatagramTransport, msg: var seq[byte], msglen = transp.buflen proc getMessage*(transp: DatagramTransport): seq[byte] {. - raises: [CatchableError].} = + raises: [TransportError].} = ## Copy data from internal message buffer and return result. var default: seq[byte] if ReadError in transp.state: diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 7471a4468..bdcb8d7b7 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -58,6 +58,8 @@ type done: bool] {. gcsafe, raises: [].} + ReaderFuture = Future[void].Raising([TransportError, CancelledError]) + const StreamTransportTrackerName* = "stream.transport" StreamServerTrackerName* = "stream.server" @@ -68,10 +70,10 @@ when defined(windows): StreamTransport* = ref object of RootRef fd*: AsyncFD # File descriptor state: set[TransportState] # Current Transport state - reader: Future[void] # Current reader Future + reader: ReaderFuture # Current reader Future buffer: seq[byte] # Reading buffer offset: int # Reading buffer offset - error: ref CatchableError # Current error + error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void] # Stream life future # Windows specific part @@ -87,18 +89,18 @@ when defined(windows): local: TransportAddress # Local address remote: TransportAddress # Remote address of TransportKind.Pipe: - todo1: int + discard of TransportKind.File: - todo2: int + discard else: type StreamTransport* = ref object of RootRef fd*: AsyncFD # File descriptor state: set[TransportState] # Current Transport state - reader: Future[void] # Current reader Future + reader: ReaderFuture # Current reader Future buffer: seq[byte] # Reading buffer offset: int # Reading buffer offset - error: ref CatchableError # Current error + error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void] # Stream life future case kind*: TransportKind @@ -107,18 +109,23 @@ else: local: TransportAddress # Local address remote: TransportAddress # Remote address of TransportKind.Pipe: - todo1: int + discard of TransportKind.File: - todo2: int + discard type StreamCallback* = proc(server: StreamServer, - client: StreamTransport): Future[void] {. - gcsafe, raises: [].} + client: StreamTransport) {.async: (raises: []).} ## New remote client connection callback ## ``server`` - StreamServer object. ## ``client`` - accepted client transport. + UnsafeStreamCallback* = proc(server: StreamServer, + client: StreamTransport) {.async.} + ## Connection callback that doesn't check for exceptions at compile time + ## ``server`` - StreamServer object. + ## ``client`` - accepted client transport. + TransportInitCallback* = proc(server: StreamServer, fd: AsyncFD): StreamTransport {. gcsafe, raises: [].} @@ -199,7 +206,7 @@ proc completePendingWriteQueue(queue: var Deque[StreamVector], vector.writer.complete(v) proc failPendingWriteQueue(queue: var Deque[StreamVector], - error: ref CatchableError) {.inline.} = + error: ref TransportError) {.inline.} = while len(queue) > 0: var vector = queue.popFirst() if not(vector.writer.finished()): @@ -640,7 +647,8 @@ when defined(windows): localAddress = TransportAddress(), flags: set[SocketFlags] = {}, dualstack = DualStackType.Auto - ): Future[StreamTransport] = + ): Future[StreamTransport] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Open new connection to remote peer with address ``address`` and create ## new transport object ``StreamTransport`` for established connection. ## ``bufferSize`` is size of internal buffer for transport. @@ -1031,7 +1039,8 @@ when defined(windows): server.aovl.data.cb(addr server.aovl) ok() - proc accept*(server: StreamServer): Future[StreamTransport] = + proc accept*(server: StreamServer): Future[StreamTransport] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = var retFuture = newFuture[StreamTransport]("stream.server.accept") doAssert(server.status != ServerStatus.Running, @@ -1472,7 +1481,8 @@ else: localAddress = TransportAddress(), flags: set[SocketFlags] = {}, dualstack = DualStackType.Auto, - ): Future[StreamTransport] = + ): Future[StreamTransport] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Open new connection to remote peer with address ``address`` and create ## new transport object ``StreamTransport`` for established connection. ## ``bufferSize`` - size of internal buffer for transport. @@ -1658,7 +1668,8 @@ else: transp.state.excl(WritePaused) ok() - proc accept*(server: StreamServer): Future[StreamTransport] = + proc accept*(server: StreamServer): Future[StreamTransport] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = var retFuture = newFuture[StreamTransport]("stream.server.accept") doAssert(server.status != ServerStatus.Running, @@ -1762,7 +1773,8 @@ proc stop*(server: StreamServer) {.raises: [TransportOsError].} = let res = stop2(server) if res.isErr(): raiseTransportOsError(res.error()) -proc join*(server: StreamServer): Future[void] = +proc join*(server: StreamServer): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Waits until ``server`` is not closed. var retFuture = newFuture[void]("stream.transport.server.join") @@ -1785,7 +1797,8 @@ proc connect*(address: TransportAddress, flags: set[TransportFlags], localAddress = TransportAddress(), dualstack = DualStackType.Auto - ): Future[StreamTransport] = + ): Future[StreamTransport] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = # Retro compatibility with TransportFlags var mappedFlags: set[SocketFlags] if TcpNoDelay in flags: mappedFlags.incl(SocketFlags.TcpNoDelay) @@ -1817,7 +1830,8 @@ proc close*(server: StreamServer) = else: server.sock.closeSocket(continuation) -proc closeWait*(server: StreamServer): Future[void] = +proc closeWait*(server: StreamServer): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Close server ``server`` and release all resources. server.close() server.join() @@ -2065,6 +2079,29 @@ proc createStreamServer*(host: TransportAddress, GC_ref(sres) sres +proc createStreamServer*(host: TransportAddress, + cbproc: UnsafeStreamCallback, + flags: set[ServerFlags] = {}, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError], + deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} = + proc wrap(server: StreamServer, + client: StreamTransport) {.async: (raises: []).} = + try: + cbproc(server, client) + except CatchableError as exc: + raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg + + createStreamServer( + host, wrap, flags, sock, backlog, bufferSize, child, init, udata, + dualstack) + proc createStreamServer*(host: TransportAddress, flags: set[ServerFlags] = {}, sock: AsyncFD = asyncInvalidSocket, @@ -2074,8 +2111,8 @@ proc createStreamServer*(host: TransportAddress, init: TransportInitCallback = nil, udata: pointer = nil, dualstack = DualStackType.Auto): StreamServer {. - raises: [CatchableError].} = - createStreamServer(host, nil, flags, sock, backlog, bufferSize, + raises: [TransportOsError].} = + createStreamServer(host, StreamCallback(nil), flags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) proc createStreamServer*[T](host: TransportAddress, @@ -2088,7 +2125,24 @@ proc createStreamServer*[T](host: TransportAddress, child: StreamServer = nil, init: TransportInitCallback = nil, dualstack = DualStackType.Auto): StreamServer {. - raises: [CatchableError].} = + raises: [TransportOsError].} = + var fflags = flags + {GCUserData} + GC_ref(udata) + createStreamServer(host, cbproc, fflags, sock, backlog, bufferSize, + child, init, cast[pointer](udata), dualstack) + +proc createStreamServer*[T](host: TransportAddress, + cbproc: UnsafeStreamCallback, + flags: set[ServerFlags] = {}, + udata: ref T, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError], + deprecated: "Callback must not raise exceptions, annotate with {.async: (raises: []).}".} = var fflags = flags + {GCUserData} GC_ref(udata) createStreamServer(host, cbproc, fflags, sock, backlog, bufferSize, @@ -2103,10 +2157,10 @@ proc createStreamServer*[T](host: TransportAddress, child: StreamServer = nil, init: TransportInitCallback = nil, dualstack = DualStackType.Auto): StreamServer {. - raises: [CatchableError].} = + raises: [TransportOsError].} = var fflags = flags + {GCUserData} GC_ref(udata) - createStreamServer(host, nil, fflags, sock, backlog, bufferSize, + createStreamServer(host, StreamCallback(nil), fflags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) proc getUserData*[T](server: StreamServer): T {.inline.} = @@ -2157,7 +2211,8 @@ template fastWrite(transp: auto, pbytes: var ptr byte, rbytes: var int, return retFuture proc write*(transp: StreamTransport, pbytes: pointer, - nbytes: int): Future[int] = + nbytes: int): Future[int] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write data from buffer ``pbytes`` with size ``nbytes`` using transport ## ``transp``. var retFuture = newFuture[int]("stream.transport.write(pointer)") @@ -2179,9 +2234,10 @@ proc write*(transp: StreamTransport, pbytes: pointer, return retFuture proc write*(transp: StreamTransport, msg: sink string, - msglen = -1): Future[int] = + msglen = -1): Future[int] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write data from string ``msg`` using transport ``transp``. - var retFuture = newFutureStr[int]("stream.transport.write(string)") + var retFuture = newFuture[int]("stream.transport.write(string)") transp.checkClosed(retFuture) transp.checkWriteEof(retFuture) @@ -2197,17 +2253,10 @@ proc write*(transp: StreamTransport, msg: sink string, let written = nbytes - rbytes # In case fastWrite wrote some - pbytes = - when declared(shallowCopy): - if not(isLiteral(msg)): - shallowCopy(retFuture.gcholder, msg) - cast[ptr byte](addr retFuture.gcholder[written]) - else: - retFuture.gcholder = msg[written ..< nbytes] - cast[ptr byte](addr retFuture.gcholder[0]) - else: - retFuture.gcholder = msg[written ..< nbytes] - cast[ptr byte](addr retFuture.gcholder[0]) + var localCopy = msg + retFuture.addCallback(proc(_: pointer) = reset(localCopy)) + + pbytes = cast[ptr byte](addr localCopy[written]) var vector = StreamVector(kind: DataBuffer, writer: retFuture, buf: pbytes, buflen: rbytes, size: nbytes) @@ -2218,9 +2267,10 @@ proc write*(transp: StreamTransport, msg: sink string, return retFuture proc write*[T](transp: StreamTransport, msg: sink seq[T], - msglen = -1): Future[int] = + msglen = -1): Future[int] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write sequence ``msg`` using transport ``transp``. - var retFuture = newFutureSeq[int, T]("stream.transport.write(seq)") + var retFuture = newFuture[int]("stream.transport.write(seq)") transp.checkClosed(retFuture) transp.checkWriteEof(retFuture) @@ -2236,17 +2286,10 @@ proc write*[T](transp: StreamTransport, msg: sink seq[T], let written = nbytes - rbytes # In case fastWrite wrote some - pbytes = - when declared(shallowCopy): - if not(isLiteral(msg)): - shallowCopy(retFuture.gcholder, msg) - cast[ptr byte](addr retFuture.gcholder[written]) - else: - retFuture.gcholder = msg[written ..< nbytes] - cast[ptr byte](addr retFuture.gcholder[0]) - else: - retFuture.gcholder = msg[written ..< nbytes] - cast[ptr byte](addr retFuture.gcholder[0]) + var localCopy = msg + retFuture.addCallback(proc(_: pointer) = reset(localCopy)) + + pbytes = cast[ptr byte](addr localCopy[written]) var vector = StreamVector(kind: DataBuffer, writer: retFuture, buf: pbytes, buflen: rbytes, size: nbytes) @@ -2257,7 +2300,8 @@ proc write*[T](transp: StreamTransport, msg: sink seq[T], return retFuture proc writeFile*(transp: StreamTransport, handle: int, - offset: uint = 0, size: int = 0): Future[int] = + offset: uint = 0, size: int = 0): Future[int] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write data from file descriptor ``handle`` to transport ``transp``. ## ## You can specify starting ``offset`` in opened file and number of bytes @@ -2304,7 +2348,7 @@ template readLoop(name, body: untyped): untyped = break else: checkPending(transp) - var fut = newFuture[void](name) + let fut = ReaderFuture.init(name) transp.reader = fut let res = resumeRead(transp) if res.isErr(): @@ -2328,7 +2372,8 @@ template readLoop(name, body: untyped): untyped = await fut proc readExactly*(transp: StreamTransport, pbytes: pointer, - nbytes: int) {.async.} = + nbytes: int) {. + async: (raises: [TransportError, CancelledError]).} = ## Read exactly ``nbytes`` bytes from transport ``transp`` and store it to ## ``pbytes``. ``pbytes`` must not be ``nil`` pointer and ``nbytes`` should ## be Natural. @@ -2357,7 +2402,8 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, (consumed: count, done: index == nbytes) proc readOnce*(transp: StreamTransport, pbytes: pointer, - nbytes: int): Future[int] {.async.} = + nbytes: int): Future[int] {. + async: (raises: [TransportError, CancelledError]).} = ## Perform one read operation on transport ``transp``. ## ## If internal buffer is not empty, ``nbytes`` bytes will be transferred from @@ -2376,7 +2422,8 @@ proc readOnce*(transp: StreamTransport, pbytes: pointer, return count proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, - sep: seq[byte]): Future[int] {.async.} = + sep: seq[byte]): Future[int] {. + async: (raises: [TransportError, CancelledError]).} = ## Read data from the transport ``transp`` until separator ``sep`` is found. ## ## On success, the data and separator will be removed from the internal @@ -2428,7 +2475,8 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, return k proc readLine*(transp: StreamTransport, limit = 0, - sep = "\r\n"): Future[string] {.async.} = + sep = "\r\n"): Future[string] {. + async: (raises: [TransportError, CancelledError]).} = ## Read one line from transport ``transp``, where "line" is a sequence of ## bytes ending with ``sep`` (default is "\r\n"). ## @@ -2470,7 +2518,8 @@ proc readLine*(transp: StreamTransport, limit = 0, (index, (state == len(sep)) or (lim == len(result))) -proc read*(transp: StreamTransport): Future[seq[byte]] {.async.} = +proc read*(transp: StreamTransport): Future[seq[byte]] {. + async: (raises: [TransportError, CancelledError]).} = ## Read all bytes from transport ``transp``. ## ## This procedure allocates buffer seq[byte] and return it as result. @@ -2481,7 +2530,8 @@ proc read*(transp: StreamTransport): Future[seq[byte]] {.async.} = result.add(transp.buffer.toOpenArray(0, transp.offset - 1)) (transp.offset, false) -proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {.async.} = +proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. + async: (raises: [TransportError, CancelledError]).} = ## Read all bytes (n <= 0) or exactly `n` bytes from transport ``transp``. ## ## This procedure allocates buffer seq[byte] and return it as result. @@ -2496,7 +2546,8 @@ proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {.async.} = result.add(transp.buffer.toOpenArray(0, count - 1)) (count, len(result) == n) -proc consume*(transp: StreamTransport): Future[int] {.async.} = +proc consume*(transp: StreamTransport): Future[int] {. + async: (raises: [TransportError, CancelledError]).} = ## Consume all bytes from transport ``transp`` and discard it. ## ## Return number of bytes actually consumed and discarded. @@ -2507,7 +2558,8 @@ proc consume*(transp: StreamTransport): Future[int] {.async.} = result += transp.offset (transp.offset, false) -proc consume*(transp: StreamTransport, n: int): Future[int] {.async.} = +proc consume*(transp: StreamTransport, n: int): Future[int] {. + async: (raises: [TransportError, CancelledError]).} = ## Consume all bytes (n <= 0) or ``n`` bytes from transport ``transp`` and ## discard it. ## @@ -2524,7 +2576,8 @@ proc consume*(transp: StreamTransport, n: int): Future[int] {.async.} = (count, result == n) proc readMessage*(transp: StreamTransport, - predicate: ReadMessagePredicate) {.async.} = + predicate: ReadMessagePredicate) {. + async: (raises: [TransportError, CancelledError]).} = ## Read all bytes from transport ``transp`` until ``predicate`` callback ## will not be satisfied. ## @@ -2547,7 +2600,8 @@ proc readMessage*(transp: StreamTransport, else: predicate(transp.buffer.toOpenArray(0, transp.offset - 1)) -proc join*(transp: StreamTransport): Future[void] = +proc join*(transp: StreamTransport): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Wait until ``transp`` will not be closed. var retFuture = newFuture[void]("stream.transport.join") @@ -2606,14 +2660,15 @@ proc close*(transp: StreamTransport) = elif transp.kind == TransportKind.Socket: closeSocket(transp.fd, continuation) -proc closeWait*(transp: StreamTransport): Future[void] = +proc closeWait*(transp: StreamTransport): Future[void] {. + async: (raw: true, raises: []).} = ## Close and frees resources of transport ``transp``. - const FutureName = "stream.transport.closeWait" + let retFuture = newFuture[void]( + "stream.transport.closeWait", {FutureFlag.OwnCancelSchedule}) if {ReadClosed, WriteClosed} * transp.state != {}: - return Future.completed(FutureName) - - let retFuture = newFuture[void](FutureName, {FutureFlag.OwnCancelSchedule}) + retFuture.complete() + return retFuture proc continuation(udata: pointer) {.gcsafe.} = retFuture.complete() @@ -2631,7 +2686,8 @@ proc closeWait*(transp: StreamTransport): Future[void] = retFuture.cancelCallback = cancellation retFuture -proc shutdownWait*(transp: StreamTransport): Future[void] = +proc shutdownWait*(transp: StreamTransport): Future[void] {. + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Perform graceful shutdown of TCP connection backed by transport ``transp``. doAssert(transp.kind == TransportKind.Socket) let retFuture = newFuture[void]("stream.transport.shutdown") diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index 86b735759..bd0207f8d 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -87,14 +87,17 @@ suite "AsyncStream test suite": test "AsyncStream(StreamTransport) readExactly() test": proc testReadExactly(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - await wstream.write("000000000011111111112222222222") - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + await wstream.write("000000000011111111112222222222") + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var buffer = newSeq[byte](10) var server = createStreamServer(initTAddress("127.0.0.1:0"), @@ -117,14 +120,17 @@ suite "AsyncStream test suite": test "AsyncStream(StreamTransport) readUntil() test": proc testReadUntil(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - await wstream.write("0000000000NNz1111111111NNz2222222222NNz") - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + await wstream.write("0000000000NNz1111111111NNz2222222222NNz") + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var buffer = newSeq[byte](13) var sep = @[byte('N'), byte('N'), byte('z')] @@ -155,14 +161,17 @@ suite "AsyncStream test suite": test "AsyncStream(StreamTransport) readLine() test": proc testReadLine(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - await wstream.write("0000000000\r\n1111111111\r\n2222222222\r\n") - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + await wstream.write("0000000000\r\n1111111111\r\n2222222222\r\n") + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -184,14 +193,17 @@ suite "AsyncStream test suite": test "AsyncStream(StreamTransport) read() test": proc testRead(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - await wstream.write("000000000011111111112222222222") - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + await wstream.write("000000000011111111112222222222") + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -211,14 +223,17 @@ suite "AsyncStream test suite": test "AsyncStream(StreamTransport) consume() test": proc testConsume(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - await wstream.write("0000000000111111111122222222223333333333") - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + await wstream.write("0000000000111111111122222222223333333333") + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -247,26 +262,29 @@ suite "AsyncStream test suite": test "AsyncStream(AsyncStream) readExactly() test": proc testReadExactly2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - var s1 = "00000" - var s2 = "11111" - var s3 = "22222" - await wstream2.write("00000") - await wstream2.write(addr s1[0], len(s1)) - await wstream2.write("11111") - await wstream2.write(s2.toBytes()) - await wstream2.write("22222") - await wstream2.write(addr s3[0], len(s3)) - - await wstream2.finish() - await wstream.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + var s1 = "00000" + var s2 = "11111" + var s3 = "22222" + await wstream2.write("00000") + await wstream2.write(addr s1[0], len(s1)) + await wstream2.write("11111") + await wstream2.write(s2.toBytes()) + await wstream2.write("22222") + await wstream2.write(addr s3[0], len(s3)) + + await wstream2.finish() + await wstream.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var buffer = newSeq[byte](10) var server = createStreamServer(initTAddress("127.0.0.1:0"), @@ -299,25 +317,28 @@ suite "AsyncStream test suite": test "AsyncStream(AsyncStream) readUntil() test": proc testReadUntil2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - var s1 = "00000NNz" - var s2 = "11111NNz" - var s3 = "22222NNz" - await wstream2.write("00000") - await wstream2.write(addr s1[0], len(s1)) - await wstream2.write("11111") - await wstream2.write(s2) - await wstream2.write("22222") - await wstream2.write(s3.toBytes()) - await wstream2.finish() - await wstream.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + var s1 = "00000NNz" + var s2 = "11111NNz" + var s3 = "22222NNz" + await wstream2.write("00000") + await wstream2.write(addr s1[0], len(s1)) + await wstream2.write("11111") + await wstream2.write(s2) + await wstream2.write("22222") + await wstream2.write(s3.toBytes()) + await wstream2.finish() + await wstream.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var buffer = newSeq[byte](13) var sep = @[byte('N'), byte('N'), byte('z')] @@ -358,22 +379,25 @@ suite "AsyncStream test suite": test "AsyncStream(AsyncStream) readLine() test": proc testReadLine2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - await wstream2.write("00000") - await wstream2.write("00000\r\n") - await wstream2.write("11111") - await wstream2.write("11111\r\n") - await wstream2.write("22222") - await wstream2.write("22222\r\n") - await wstream2.finish() - await wstream.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + await wstream2.write("00000") + await wstream2.write("00000\r\n") + await wstream2.write("11111") + await wstream2.write("11111\r\n") + await wstream2.write("22222") + await wstream2.write("22222\r\n") + await wstream2.finish() + await wstream.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -405,21 +429,24 @@ suite "AsyncStream test suite": test "AsyncStream(AsyncStream) read() test": proc testRead2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - var s2 = "1111111111" - var s3 = "2222222222" - await wstream2.write("0000000000") - await wstream2.write(s2) - await wstream2.write(s3.toBytes()) - await wstream2.finish() - await wstream.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + var s2 = "1111111111" + var s3 = "2222222222" + await wstream2.write("0000000000") + await wstream2.write(s2) + await wstream2.write(s3.toBytes()) + await wstream2.finish() + await wstream.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -446,31 +473,34 @@ suite "AsyncStream test suite": test "AsyncStream(AsyncStream) consume() test": proc testConsume2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - const - S4 = @[byte('3'), byte('3'), byte('3'), byte('3'), byte('3')] - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - - var s1 = "00000" - var s2 = "11111".toBytes() - var s3 = "22222" - - await wstream2.write("00000") - await wstream2.write(s1) - await wstream2.write("11111") - await wstream2.write(s2) - await wstream2.write("22222") - await wstream2.write(addr s3[0], len(s3)) - await wstream2.write("33333") - await wstream2.write(S4) - await wstream2.finish() - await wstream.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + const + S4 = @[byte('3'), byte('3'), byte('3'), byte('3'), byte('3')] + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + + var s1 = "00000" + var s2 = "11111".toBytes() + var s3 = "22222" + + await wstream2.write("00000") + await wstream2.write(s1) + await wstream2.write("11111") + await wstream2.write(s2) + await wstream2.write("22222") + await wstream2.write(addr s3[0], len(s3)) + await wstream2.write("33333") + await wstream2.write(S4) + await wstream2.finish() + await wstream.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -511,27 +541,30 @@ suite "AsyncStream test suite": message = createBigMessage("ABCDEFGHIJKLMNOP", size) proc processClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wbstream = newBoundedStreamWriter(wstream, uint64(size)) + transp: StreamTransport) {.async: (raises: []).} = try: - check wbstream.atEof() == false - await wbstream.write(message) - check wbstream.atEof() == false - await wbstream.finish() - check wbstream.atEof() == true - expect AsyncStreamWriteEOFError: - await wbstream.write(message) - expect AsyncStreamWriteEOFError: - await wbstream.write(message) - expect AsyncStreamWriteEOFError: + var wstream = newAsyncStreamWriter(transp) + var wbstream = newBoundedStreamWriter(wstream, uint64(size)) + try: + check wbstream.atEof() == false await wbstream.write(message) - check wbstream.atEof() == true - await wbstream.closeWait() - check wbstream.atEof() == true - finally: - await wstream.closeWait() - await transp.closeWait() + check wbstream.atEof() == false + await wbstream.finish() + check wbstream.atEof() == true + expect AsyncStreamWriteEOFError: + await wbstream.write(message) + expect AsyncStreamWriteEOFError: + await wbstream.write(message) + expect AsyncStreamWriteEOFError: + await wbstream.write(message) + check wbstream.atEof() == true + await wbstream.closeWait() + check wbstream.atEof() == true + finally: + await wstream.closeWait() + await transp.closeWait() + except CatchableError as exc: + raiseAssert exc.msg let flags = {ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay} var server = createStreamServer(initTAddress("127.0.0.1:0"), @@ -580,15 +613,18 @@ suite "ChunkedStream test suite": ] proc checkVector(inputstr: string): Future[string] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var data = inputstr - await wstream.write(data) - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var data = inputstr + await wstream.write(data) + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -630,15 +666,18 @@ suite "ChunkedStream test suite": ] proc checkVector(inputstr: string): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var data = inputstr - await wstream.write(data) - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var data = inputstr + await wstream.write(data) + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var res = false var server = createStreamServer(initTAddress("127.0.0.1:0"), @@ -713,14 +752,17 @@ suite "ChunkedStream test suite": test "ChunkedStream too big chunk header test": proc checkTooBigChunkHeader(inputstr: seq[byte]): Future[bool] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - await wstream.write(inputstr) - await wstream.finish() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + await wstream.write(inputstr) + await wstream.finish() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -751,23 +793,26 @@ suite "ChunkedStream test suite": proc checkVector(inputstr: seq[byte], chunkSize: int): Future[seq[byte]] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - var data = inputstr - var offset = 0 - while true: - if len(data) == offset: - break - let toWrite = min(chunkSize, len(data) - offset) - await wstream2.write(addr data[offset], toWrite) - offset = offset + toWrite - await wstream2.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + var data = inputstr + var offset = 0 + while true: + if len(data) == offset: + break + let toWrite = min(chunkSize, len(data) - offset) + await wstream2.write(addr data[offset], toWrite) + offset = offset + toWrite + await wstream2.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -796,23 +841,26 @@ suite "ChunkedStream test suite": writeChunkSize: int, readChunkSize: int): Future[seq[byte]] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newChunkedStreamWriter(wstream) - var data = inputstr - var offset = 0 - while true: - if len(data) == offset: - break - let toWrite = min(writeChunkSize, len(data) - offset) - await wstream2.write(addr data[offset], toWrite) - offset = offset + toWrite - await wstream2.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newChunkedStreamWriter(wstream) + var data = inputstr + var offset = 0 + while true: + if len(data) == offset: + break + let toWrite = min(writeChunkSize, len(data) - offset) + await wstream2.write(addr data[offset], toWrite) + offset = offset + toWrite + await wstream2.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -849,30 +897,33 @@ suite "TLSStream test suite": const HttpHeadersMark = @[byte(0x0D), byte(0x0A), byte(0x0D), byte(0x0A)] test "Simple HTTPS connection": proc headerClient(address: TransportAddress, - name: string): Future[bool] {.async.} = - var mark = "HTTP/1.1 " - var buffer = newSeq[byte](8192) - var transp = await connect(address) - var reader = newAsyncStreamReader(transp) - var writer = newAsyncStreamWriter(transp) - var tlsstream = newTLSClientAsyncStream(reader, writer, name) - await tlsstream.writer.write("GET / HTTP/1.1\r\nHost: " & name & - "\r\nConnection: close\r\n\r\n") - var readFut = tlsstream.reader.readUntil(addr buffer[0], len(buffer), - HttpHeadersMark) - let res = await withTimeout(readFut, 5.seconds) - if res: - var length = readFut.read() - buffer.setLen(length) - if len(buffer) > len(mark): - if equalMem(addr buffer[0], addr mark[0], len(mark)): - result = true - - await tlsstream.reader.closeWait() - await tlsstream.writer.closeWait() - await reader.closeWait() - await writer.closeWait() - await transp.closeWait() + name: string): Future[bool] {.async: (raises: []).} = + try: + var mark = "HTTP/1.1 " + var buffer = newSeq[byte](8192) + var transp = await connect(address) + var reader = newAsyncStreamReader(transp) + var writer = newAsyncStreamWriter(transp) + var tlsstream = newTLSClientAsyncStream(reader, writer, name) + await tlsstream.writer.write("GET / HTTP/1.1\r\nHost: " & name & + "\r\nConnection: close\r\n\r\n") + var readFut = tlsstream.reader.readUntil(addr buffer[0], len(buffer), + HttpHeadersMark) + let res = await withTimeout(readFut, 5.seconds) + if res: + var length = readFut.read() + buffer.setLen(length) + if len(buffer) > len(mark): + if equalMem(addr buffer[0], addr mark[0], len(mark)): + result = true + + await tlsstream.reader.closeWait() + await tlsstream.writer.closeWait() + await reader.closeWait() + await writer.closeWait() + await transp.closeWait() + except CatchableError as exc: + raiseAssert exc.msg let res = waitFor(headerClient(resolveTAddress("www.google.com:443")[0], "www.google.com")) @@ -884,20 +935,23 @@ suite "TLSStream test suite": let testMessage = "TEST MESSAGE" proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var reader = newAsyncStreamReader(transp) - var writer = newAsyncStreamWriter(transp) - var sstream = newTLSServerAsyncStream(reader, writer, key, cert) - await handshake(sstream) - await sstream.writer.write(testMessage & "\r\n") - await sstream.writer.finish() - await sstream.writer.closeWait() - await sstream.reader.closeWait() - await reader.closeWait() - await writer.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var reader = newAsyncStreamReader(transp) + var writer = newAsyncStreamWriter(transp) + var sstream = newTLSServerAsyncStream(reader, writer, key, cert) + await handshake(sstream) + await sstream.writer.write(testMessage & "\r\n") + await sstream.writer.finish() + await sstream.writer.closeWait() + await sstream.reader.closeWait() + await reader.closeWait() + await writer.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg key = TLSPrivateKey.init(pemkey) cert = TLSCertificate.init(pemcert) @@ -931,20 +985,23 @@ suite "TLSStream test suite": let trustAnchors = TrustAnchorStore.new(SelfSignedTrustAnchors) proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var reader = newAsyncStreamReader(transp) - var writer = newAsyncStreamWriter(transp) - var sstream = newTLSServerAsyncStream(reader, writer, key, cert) - await handshake(sstream) - await sstream.writer.write(testMessage & "\r\n") - await sstream.writer.finish() - await sstream.writer.closeWait() - await sstream.reader.closeWait() - await reader.closeWait() - await writer.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var reader = newAsyncStreamReader(transp) + var writer = newAsyncStreamWriter(transp) + var sstream = newTLSServerAsyncStream(reader, writer, key, cert) + await handshake(sstream) + await sstream.writer.write(testMessage & "\r\n") + await sstream.writer.finish() + await sstream.writer.closeWait() + await sstream.reader.closeWait() + await reader.closeWait() + await writer.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -988,46 +1045,49 @@ suite "BoundedStream test suite": var clientRes = false proc processClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - case btest - of BoundaryRead: - await wstream.write(message) - await wstream.write(boundary) - await wstream.finish() - await wstream.closeWait() - clientRes = true - of BoundaryDouble: - await wstream.write(message) - await wstream.write(boundary) - await wstream.write(message) - await wstream.finish() - await wstream.closeWait() - clientRes = true - of BoundarySize: - var ncmessage = message - ncmessage.setLen(len(message) - 2) - await wstream.write(ncmessage) - await wstream.write(@[0x2D'u8, 0x2D'u8]) - await wstream.finish() - await wstream.closeWait() - clientRes = true - of BoundaryIncomplete: - var ncmessage = message - ncmessage.setLen(len(message) - 2) - await wstream.write(ncmessage) - await wstream.finish() - await wstream.closeWait() - clientRes = true - of BoundaryEmpty: - await wstream.write(boundary) - await wstream.finish() - await wstream.closeWait() - clientRes = true + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + case btest + of BoundaryRead: + await wstream.write(message) + await wstream.write(boundary) + await wstream.finish() + await wstream.closeWait() + clientRes = true + of BoundaryDouble: + await wstream.write(message) + await wstream.write(boundary) + await wstream.write(message) + await wstream.finish() + await wstream.closeWait() + clientRes = true + of BoundarySize: + var ncmessage = message + ncmessage.setLen(len(message) - 2) + await wstream.write(ncmessage) + await wstream.write(@[0x2D'u8, 0x2D'u8]) + await wstream.finish() + await wstream.closeWait() + clientRes = true + of BoundaryIncomplete: + var ncmessage = message + ncmessage.setLen(len(message) - 2) + await wstream.write(ncmessage) + await wstream.finish() + await wstream.closeWait() + clientRes = true + of BoundaryEmpty: + await wstream.write(boundary) + await wstream.finish() + await wstream.closeWait() + clientRes = true - await transp.closeWait() - server.stop() - server.close() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var res = false let flags = {ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay} @@ -1090,60 +1150,63 @@ suite "BoundedStream test suite": message.add(messagePart) proc processClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wbstream = newBoundedStreamWriter(wstream, uint64(size), - comparison = cmp) - case stest - of SizeReadWrite: - for i in 0 ..< 10: - await wbstream.write(messagePart) - await wbstream.finish() - await wbstream.closeWait() - clientRes = true - of SizeOverflow: - for i in 0 ..< 10: - await wbstream.write(messagePart) - try: - await wbstream.write(messagePart) - except BoundedStreamOverflowError: + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wbstream = newBoundedStreamWriter(wstream, uint64(size), + comparison = cmp) + case stest + of SizeReadWrite: + for i in 0 ..< 10: + await wbstream.write(messagePart) + await wbstream.finish() + await wbstream.closeWait() clientRes = true - await wbstream.closeWait() - of SizeIncomplete: - for i in 0 ..< 9: - await wbstream.write(messagePart) - case cmp - of BoundCmp.Equal: - try: - await wbstream.finish() - except BoundedStreamIncompleteError: - clientRes = true - of BoundCmp.LessOrEqual: + of SizeOverflow: + for i in 0 ..< 10: + await wbstream.write(messagePart) try: - await wbstream.finish() + await wbstream.write(messagePart) + except BoundedStreamOverflowError: clientRes = true - except BoundedStreamIncompleteError: - discard - await wbstream.closeWait() - of SizeEmpty: - case cmp - of BoundCmp.Equal: - try: - await wbstream.finish() - except BoundedStreamIncompleteError: - clientRes = true - of BoundCmp.LessOrEqual: - try: - await wbstream.finish() - clientRes = true - except BoundedStreamIncompleteError: - discard - await wbstream.closeWait() + await wbstream.closeWait() + of SizeIncomplete: + for i in 0 ..< 9: + await wbstream.write(messagePart) + case cmp + of BoundCmp.Equal: + try: + await wbstream.finish() + except BoundedStreamIncompleteError: + clientRes = true + of BoundCmp.LessOrEqual: + try: + await wbstream.finish() + clientRes = true + except BoundedStreamIncompleteError: + discard + await wbstream.closeWait() + of SizeEmpty: + case cmp + of BoundCmp.Equal: + try: + await wbstream.finish() + except BoundedStreamIncompleteError: + clientRes = true + of BoundCmp.LessOrEqual: + try: + await wbstream.finish() + clientRes = true + except BoundedStreamIncompleteError: + discard + await wbstream.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg let flags = {ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay} var server = createStreamServer(initTAddress("127.0.0.1:0"), @@ -1243,23 +1306,26 @@ suite "BoundedStream test suite": writeChunkSize: int, readChunkSize: int): Future[seq[byte]] {.async.} = proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newBoundedStreamWriter(wstream, uint64(len(inputstr))) - var data = inputstr - var offset = 0 - while true: - if len(data) == offset: - break - let toWrite = min(writeChunkSize, len(data) - offset) - await wstream2.write(addr data[offset], toWrite) - offset = offset + toWrite - await wstream2.finish() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newBoundedStreamWriter(wstream, uint64(len(inputstr))) + var data = inputstr + var offset = 0 + while true: + if len(data) == offset: + break + let toWrite = min(writeChunkSize, len(data) - offset) + await wstream2.write(addr data[offset], toWrite) + offset = offset + toWrite + await wstream2.finish() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) @@ -1293,17 +1359,20 @@ suite "BoundedStream test suite": proc checkEmptyStreams(): Future[bool] {.async.} = var writer1Res = false proc serveClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var wstream = newAsyncStreamWriter(transp) - var wstream2 = newBoundedStreamWriter(wstream, 0'u64) - await wstream2.finish() - let res = wstream2.atEof() - await wstream2.closeWait() - await wstream.closeWait() - await transp.closeWait() - server.stop() - server.close() - writer1Res = res + transp: StreamTransport) {.async: (raises: []).} = + try: + var wstream = newAsyncStreamWriter(transp) + var wstream2 = newBoundedStreamWriter(wstream, 0'u64) + await wstream2.finish() + let res = wstream2.atEof() + await wstream2.closeWait() + await wstream.closeWait() + await transp.closeWait() + server.stop() + server.close() + writer1Res = res + except CatchableError as exc: + raiseAssert exc.msg var server = createStreamServer(initTAddress("127.0.0.1:0"), serveClient, {ReuseAddr}) diff --git a/tests/testbugs.nim b/tests/testbugs.nim index 1f2a932d0..fc4af3a45 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -21,16 +21,19 @@ suite "Asynchronous issues test suite": test: string proc udp4DataAvailable(transp: DatagramTransport, - remote: TransportAddress) {.async, gcsafe.} = - var udata = getUserData[CustomData](transp) - var expect = TEST_MSG - var data: seq[byte] - var datalen: int - transp.peekMessage(data, datalen) - if udata.test == "CHECK" and datalen == MSG_LEN and - equalMem(addr data[0], addr expect[0], datalen): - udata.test = "OK" - transp.close() + remote: TransportAddress) {.async: (raises: []).} = + try: + var udata = getUserData[CustomData](transp) + var expect = TEST_MSG + var data: seq[byte] + var datalen: int + transp.peekMessage(data, datalen) + if udata.test == "CHECK" and datalen == MSG_LEN and + equalMem(addr data[0], addr expect[0], datalen): + udata.test = "OK" + transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc issue6(): Future[bool] {.async.} = var myself = initTAddress("127.0.0.1:" & $HELLO_PORT) diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index c941761af..bd33ef365 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -30,286 +30,319 @@ suite "Datagram Transport test suite": " clients x " & $MessagesCount & " messages)" proc client1(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("REQUEST"): - var numstr = data[7..^1] - var num = parseInt(numstr) - var ans = "ANSWER" & $num - await transp.sendTo(raddr, addr ans[0], len(ans)) + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("REQUEST"): + var numstr = data[7..^1] + var num = parseInt(numstr) + var ans = "ANSWER" & $num + await transp.sendTo(raddr, addr ans[0], len(ans)) + else: + var err = "ERROR" + await transp.sendTo(raddr, addr err[0], len(err)) else: - var err = "ERROR" - await transp.sendTo(raddr, addr err[0], len(err)) - else: - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client2(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == TestsCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == TestsCount: + transp.close() + else: + var ta = initTAddress("127.0.0.1:33336") + var req = "REQUEST" & $counterPtr[] + await transp.sendTo(ta, addr req[0], len(req)) else: - var ta = initTAddress("127.0.0.1:33336") - var req = "REQUEST" & $counterPtr[] - await transp.sendTo(ta, addr req[0], len(req)) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client3(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == TestsCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == TestsCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + await transp.send(addr req[0], len(req)) else: - var req = "REQUEST" & $counterPtr[] - await transp.send(addr req[0], len(req)) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client4(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == MessagesCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == MessagesCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + await transp.send(addr req[0], len(req)) else: - var req = "REQUEST" & $counterPtr[] - await transp.send(addr req[0], len(req)) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client5(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == MessagesCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == MessagesCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + await transp.sendTo(raddr, addr req[0], len(req)) else: - var req = "REQUEST" & $counterPtr[] - await transp.sendTo(raddr, addr req[0], len(req)) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client6(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("REQUEST"): - var numstr = data[7..^1] - var num = parseInt(numstr) - var ans = "ANSWER" & $num - await transp.sendTo(raddr, ans) + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("REQUEST"): + var numstr = data[7..^1] + var num = parseInt(numstr) + var ans = "ANSWER" & $num + await transp.sendTo(raddr, ans) + else: + var err = "ERROR" + await transp.sendTo(raddr, err) else: - var err = "ERROR" - await transp.sendTo(raddr, err) - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + ## Read operation failed with error + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client7(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == TestsCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == TestsCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + await transp.sendTo(raddr, req) else: - var req = "REQUEST" & $counterPtr[] - await transp.sendTo(raddr, req) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client8(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == TestsCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == TestsCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + await transp.send(req) else: - var req = "REQUEST" & $counterPtr[] - await transp.send(req) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client9(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("REQUEST"): - var numstr = data[7..^1] - var num = parseInt(numstr) - var ans = "ANSWER" & $num - var ansseq = newSeq[byte](len(ans)) - copyMem(addr ansseq[0], addr ans[0], len(ans)) - await transp.sendTo(raddr, ansseq) + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("REQUEST"): + var numstr = data[7..^1] + var num = parseInt(numstr) + var ans = "ANSWER" & $num + var ansseq = newSeq[byte](len(ans)) + copyMem(addr ansseq[0], addr ans[0], len(ans)) + await transp.sendTo(raddr, ansseq) + else: + var err = "ERROR" + var errseq = newSeq[byte](len(err)) + copyMem(addr errseq[0], addr err[0], len(err)) + await transp.sendTo(raddr, errseq) else: - var err = "ERROR" - var errseq = newSeq[byte](len(err)) - copyMem(addr errseq[0], addr err[0], len(err)) - await transp.sendTo(raddr, errseq) - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + ## Read operation failed with error + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client10(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == TestsCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == TestsCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + var reqseq = newSeq[byte](len(req)) + copyMem(addr reqseq[0], addr req[0], len(req)) + await transp.sendTo(raddr, reqseq) else: - var req = "REQUEST" & $counterPtr[] - var reqseq = newSeq[byte](len(req)) - copyMem(addr reqseq[0], addr req[0], len(req)) - await transp.sendTo(raddr, reqseq) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc client11(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var pbytes = transp.getMessage() - var nbytes = len(pbytes) - if nbytes > 0: - var data = newString(nbytes + 1) - copyMem(addr data[0], addr pbytes[0], nbytes) - data.setLen(nbytes) - if data.startsWith("ANSWER"): - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = counterPtr[] + 1 - if counterPtr[] == TestsCount: - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var pbytes = transp.getMessage() + var nbytes = len(pbytes) + if nbytes > 0: + var data = newString(nbytes + 1) + copyMem(addr data[0], addr pbytes[0], nbytes) + data.setLen(nbytes) + if data.startsWith("ANSWER"): + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = counterPtr[] + 1 + if counterPtr[] == TestsCount: + transp.close() + else: + var req = "REQUEST" & $counterPtr[] + var reqseq = newSeq[byte](len(req)) + copyMem(addr reqseq[0], addr req[0], len(req)) + await transp.send(reqseq) else: - var req = "REQUEST" & $counterPtr[] - var reqseq = newSeq[byte](len(req)) - copyMem(addr reqseq[0], addr req[0], len(req)) - await transp.send(reqseq) + var counterPtr = cast[ptr int](transp.udata) + counterPtr[] = -1 + transp.close() else: + ## Read operation failed with error var counterPtr = cast[ptr int](transp.udata) counterPtr[] = -1 transp.close() - else: - ## Read operation failed with error - var counterPtr = cast[ptr int](transp.udata) - counterPtr[] = -1 - transp.close() + except CatchableError as exc: + raiseAssert exc.msg proc testPointerSendTo(): Future[int] {.async.} = ## sendTo(pointer) test @@ -439,7 +472,7 @@ suite "Datagram Transport test suite": var ta = initTAddress("127.0.0.1:0") var counter = 0 proc clientMark(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = + raddr: TransportAddress): Future[void] {.async: (raises: []).} = counter = 1 transp.close() var dgram1 = newDatagramTransport(client1, local = ta) @@ -457,7 +490,7 @@ suite "Datagram Transport test suite": proc testTransportClose(): Future[bool] {.async.} = var ta = initTAddress("127.0.0.1:45000") proc clientMark(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = + raddr: TransportAddress): Future[void] {.async: (raises: []).} = discard var dgram = newDatagramTransport(clientMark, local = ta) dgram.close() @@ -473,12 +506,15 @@ suite "Datagram Transport test suite": var bta = initTAddress("255.255.255.255:45010") var res = 0 proc clientMark(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var bmsg = transp.getMessage() - var smsg = string.fromBytes(bmsg) - if smsg == expectMessage: - inc(res) - transp.close() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var bmsg = transp.getMessage() + var smsg = string.fromBytes(bmsg) + if smsg == expectMessage: + inc(res) + transp.close() + except CatchableError as exc: + raiseAssert exc.msg var dgram1 = newDatagramTransport(clientMark, local = ta1, flags = {Broadcast}, ttl = 2) await dgram1.sendTo(bta, expectMessage) @@ -493,15 +529,19 @@ suite "Datagram Transport test suite": var event = newAsyncEvent() proc clientMark1(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var bmsg = transp.getMessage() - var smsg = string.fromBytes(bmsg) - if smsg == expectStr: - inc(res) - event.fire() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var bmsg = transp.getMessage() + var smsg = string.fromBytes(bmsg) + if smsg == expectStr: + inc(res) + event.fire() + except CatchableError as exc: + raiseAssert exc.msg + proc clientMark2(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = + raddr: TransportAddress): Future[void] {.async: (raises: []).} = discard var dgram1 = newDatagramTransport(clientMark1, local = ta) @@ -544,15 +584,18 @@ suite "Datagram Transport test suite": res = 0 proc process1(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = - var bmsg = transp.getMessage() - var smsg = string.fromBytes(bmsg) - if smsg == expectStr: - inc(res) - event.fire() + raddr: TransportAddress): Future[void] {.async: (raises: []).} = + try: + var bmsg = transp.getMessage() + var smsg = string.fromBytes(bmsg) + if smsg == expectStr: + inc(res) + event.fire() + except CatchableError as exc: + raiseAssert exc.msg proc process2(transp: DatagramTransport, - raddr: TransportAddress): Future[void] {.async.} = + raddr: TransportAddress): Future[void] {.async: (raises: []).} = discard let diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 13611934a..013379379 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -459,20 +459,31 @@ suite "Exceptions tracking": check waitFor(test1()) == 12 proc test2: Future[int] {.async: (raw: true, raises: [IOError, OSError]).} = - result = newFuture[int]() - result.fail(newException(IOError, "fail")) - result.fail(newException(OSError, "fail")) checkNotCompiles: result.fail(newException(ValueError, "fail")) + result = newFuture[int]() + result.fail(newException(IOError, "fail")) + proc test3: Future[void] {.async: (raw: true, raises: []).} = + result = newFuture[void]() checkNotCompiles: result.fail(newException(ValueError, "fail")) - + result.complete() # Inheritance proc test4: Future[void] {.async: (raw: true, raises: [CatchableError]).} = + result = newFuture[void]() result.fail(newException(IOError, "fail")) + check: + waitFor(test1()) == 12 + expect(IOError): + discard waitFor(test2()) + + waitFor(test3()) + expect(IOError): + waitFor(test4()) + test "or errors": proc testit {.async: (raises: [ValueError]).} = raise (ref ValueError)() diff --git a/tests/testserver.nim b/tests/testserver.nim index a63c9df70..280148cc4 100644 --- a/tests/testserver.nim +++ b/tests/testserver.nim @@ -27,29 +27,36 @@ suite "Server's test suite": checkLeaks() proc serveStreamClient(server: StreamServer, - transp: StreamTransport) {.async.} = + transp: StreamTransport) {.async: (raises: []).} = discard proc serveCustomStreamClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var cserver = cast[CustomServer](server) - var ctransp = cast[CustomTransport](transp) - cserver.test1 = "CONNECTION" - cserver.test2 = ctransp.test - cserver.test3 = await transp.readLine() - var answer = "ANSWER\r\n" - discard await transp.write(answer) - transp.close() - await transp.join() + transp: StreamTransport) {.async: (raises: []).} = + try: + var cserver = cast[CustomServer](server) + var ctransp = cast[CustomTransport](transp) + cserver.test1 = "CONNECTION" + cserver.test2 = ctransp.test + cserver.test3 = await transp.readLine() + var answer = "ANSWER\r\n" + discard await transp.write(answer) + transp.close() + await transp.join() + except CatchableError as exc: + raiseAssert exc.msg + proc serveUdataStreamClient(server: StreamServer, - transp: StreamTransport) {.async.} = - var udata = getUserData[CustomData](server) - var line = await transp.readLine() - var msg = line & udata.test & "\r\n" - discard await transp.write(msg) - transp.close() - await transp.join() + transp: StreamTransport) {.async: (raises: []).} = + try: + var udata = getUserData[CustomData](server) + var line = await transp.readLine() + var msg = line & udata.test & "\r\n" + discard await transp.write(msg) + transp.close() + await transp.join() + except CatchableError as exc: + raiseAssert exc.msg proc customServerTransport(server: StreamServer, fd: AsyncFD): StreamTransport = diff --git a/tests/teststream.nim b/tests/teststream.nim index b0427928c..fb5534b5c 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -55,124 +55,148 @@ suite "Stream Transport test suite": for i in 0 ..< len(result): result[i] = byte(message[i mod len(message)]) - proc serveClient1(server: StreamServer, transp: StreamTransport) {.async.} = - while not transp.atEof(): - var data = await transp.readLine() - if len(data) == 0: - doAssert(transp.atEof()) - break - doAssert(data.startsWith("REQUEST")) - var numstr = data[7..^1] - var num = parseInt(numstr) - var ans = "ANSWER" & $num & "\r\n" - var res = await transp.write(cast[pointer](addr ans[0]), len(ans)) - doAssert(res == len(ans)) - transp.close() - await transp.join() + proc serveClient1(server: StreamServer, transp: StreamTransport) {. + async: (raises: []).} = + try: + while not transp.atEof(): + var data = await transp.readLine() + if len(data) == 0: + doAssert(transp.atEof()) + break + doAssert(data.startsWith("REQUEST")) + var numstr = data[7..^1] + var num = parseInt(numstr) + var ans = "ANSWER" & $num & "\r\n" + var res = await transp.write(cast[pointer](addr ans[0]), len(ans)) + doAssert(res == len(ans)) + transp.close() + await transp.join() + except CatchableError as exc: + raiseAssert exc.msg - proc serveClient2(server: StreamServer, transp: StreamTransport) {.async.} = - var buffer: array[20, char] - var check = "REQUEST" - while not transp.atEof(): - zeroMem(addr buffer[0], MessageSize) - try: - await transp.readExactly(addr buffer[0], MessageSize) - except TransportIncompleteError: - break - doAssert(equalMem(addr buffer[0], addr check[0], len(check))) - var numstr = "" - var i = 7 - while i < MessageSize and (buffer[i] in {'0'..'9'}): - numstr.add(buffer[i]) - inc(i) - var num = parseInt(numstr) - var ans = "ANSWER" & $num - zeroMem(addr buffer[0], MessageSize) - copyMem(addr buffer[0], addr ans[0], len(ans)) - var res = await transp.write(cast[pointer](addr buffer[0]), MessageSize) - doAssert(res == MessageSize) - transp.close() - await transp.join() + proc serveClient2(server: StreamServer, transp: StreamTransport) {. + async: (raises: []).} = + try: + var buffer: array[20, char] + var check = "REQUEST" + while not transp.atEof(): + zeroMem(addr buffer[0], MessageSize) + try: + await transp.readExactly(addr buffer[0], MessageSize) + except TransportIncompleteError: + break + doAssert(equalMem(addr buffer[0], addr check[0], len(check))) + var numstr = "" + var i = 7 + while i < MessageSize and (buffer[i] in {'0'..'9'}): + numstr.add(buffer[i]) + inc(i) + var num = parseInt(numstr) + var ans = "ANSWER" & $num + zeroMem(addr buffer[0], MessageSize) + copyMem(addr buffer[0], addr ans[0], len(ans)) + var res = await transp.write(cast[pointer](addr buffer[0]), MessageSize) + doAssert(res == MessageSize) + transp.close() + await transp.join() + except CatchableError as exc: + raiseAssert exc.msg - proc serveClient3(server: StreamServer, transp: StreamTransport) {.async.} = - var buffer: array[20, char] - var check = "REQUEST" - var suffixStr = "SUFFIX" - var suffix = newSeq[byte](6) - copyMem(addr suffix[0], addr suffixStr[0], len(suffixStr)) - var counter = MessagesCount - while counter > 0: - zeroMem(addr buffer[0], MessageSize) - var res = await transp.readUntil(addr buffer[0], MessageSize, suffix) - doAssert(equalMem(addr buffer[0], addr check[0], len(check))) - var numstr = "" - var i = 7 - while i < MessageSize and (buffer[i] in {'0'..'9'}): - numstr.add(buffer[i]) - inc(i) - var num = parseInt(numstr) - doAssert(len(numstr) < 8) - var ans = "ANSWER" & $num & "SUFFIX" - zeroMem(addr buffer[0], MessageSize) - copyMem(addr buffer[0], addr ans[0], len(ans)) - res = await transp.write(cast[pointer](addr buffer[0]), len(ans)) - doAssert(res == len(ans)) - dec(counter) - transp.close() - await transp.join() + proc serveClient3(server: StreamServer, transp: StreamTransport) {. + async: (raises: []).} = + try: + var buffer: array[20, char] + var check = "REQUEST" + var suffixStr = "SUFFIX" + var suffix = newSeq[byte](6) + copyMem(addr suffix[0], addr suffixStr[0], len(suffixStr)) + var counter = MessagesCount + while counter > 0: + zeroMem(addr buffer[0], MessageSize) + var res = await transp.readUntil(addr buffer[0], MessageSize, suffix) + doAssert(equalMem(addr buffer[0], addr check[0], len(check))) + var numstr = "" + var i = 7 + while i < MessageSize and (buffer[i] in {'0'..'9'}): + numstr.add(buffer[i]) + inc(i) + var num = parseInt(numstr) + doAssert(len(numstr) < 8) + var ans = "ANSWER" & $num & "SUFFIX" + zeroMem(addr buffer[0], MessageSize) + copyMem(addr buffer[0], addr ans[0], len(ans)) + res = await transp.write(cast[pointer](addr buffer[0]), len(ans)) + doAssert(res == len(ans)) + dec(counter) + transp.close() + await transp.join() + except CatchableError as exc: + raiseAssert exc.msg - proc serveClient4(server: StreamServer, transp: StreamTransport) {.async.} = - var pathname = await transp.readLine() - var size = await transp.readLine() - var sizeNum = parseInt(size) - doAssert(sizeNum >= 0) - var rbuffer = newSeq[byte](sizeNum) - await transp.readExactly(addr rbuffer[0], sizeNum) - var lbuffer = readFile(pathname) - doAssert(len(lbuffer) == sizeNum) - doAssert(equalMem(addr rbuffer[0], addr lbuffer[0], sizeNum)) - var answer = "OK\r\n" - var res = await transp.write(cast[pointer](addr answer[0]), len(answer)) - doAssert(res == len(answer)) - transp.close() - await transp.join() + proc serveClient4(server: StreamServer, transp: StreamTransport) {. + async: (raises: []).} = + try: + var pathname = await transp.readLine() + var size = await transp.readLine() + var sizeNum = parseInt(size) + doAssert(sizeNum >= 0) + var rbuffer = newSeq[byte](sizeNum) + await transp.readExactly(addr rbuffer[0], sizeNum) + var lbuffer = readFile(pathname) + doAssert(len(lbuffer) == sizeNum) + doAssert(equalMem(addr rbuffer[0], addr lbuffer[0], sizeNum)) + var answer = "OK\r\n" + var res = await transp.write(cast[pointer](addr answer[0]), len(answer)) + doAssert(res == len(answer)) + transp.close() + await transp.join() + except CatchableError as exc: + raiseAssert exc.msg - proc serveClient7(server: StreamServer, transp: StreamTransport) {.async.} = - var answer = "DONE\r\n" - var expect = "" - var line = await transp.readLine() - doAssert(len(line) == BigMessageCount * len(BigMessagePattern)) - for i in 0.. Date: Fri, 17 Nov 2023 13:45:17 +0100 Subject: [PATCH 088/146] dedicated exceptions for `Future.read` failures (#474) Dedicated exceptions for `read` failures reduce the risk of mixing up "user" exceptions with those of Future itself. The risk still exists, if the user allows a chronos exception to bubble up explicitly. Because `await` structurally guarantees that the Future is not `pending` at the time of `read`, it does not raise this new exception. * introduce `FuturePendingError` and `FutureCompletedError` when `read`:ing a future of uncertain state * fix `waitFor` / `read` to return `lent` values * simplify code generation for `void`-returning async procs * document `Raising` type helper --- chronos/futures.nim | 22 ++-- chronos/internal/asyncfutures.nim | 209 ++++++++++++++++++++---------- chronos/internal/asyncmacro.nim | 177 +++++++++++++++---------- docs/src/async_procs.md | 18 ++- docs/src/concepts.md | 20 ++- docs/src/error_handling.md | 15 +++ docs/src/porting.md | 9 +- 7 files changed, 309 insertions(+), 161 deletions(-) diff --git a/chronos/futures.nim b/chronos/futures.nim index 0af635f5b..6fb9592a9 100644 --- a/chronos/futures.nim +++ b/chronos/futures.nim @@ -73,10 +73,15 @@ type cause*: FutureBase FutureError* = object of CatchableError + future*: FutureBase CancelledError* = object of FutureError ## Exception raised when accessing the value of a cancelled future +func raiseFutureDefect(msg: static string, fut: FutureBase) {. + noinline, noreturn.} = + raise (ref FutureDefect)(msg: msg, cause: fut) + when chronosFutureId: var currentID* {.threadvar.}: uint template id*(fut: FutureBase): uint = fut.internalId @@ -202,13 +207,11 @@ func value*[T: not void](future: Future[T]): lent T = ## Return the value in a completed future - raises Defect when ## `fut.completed()` is `false`. ## - ## See `read` for a version that raises an catchable error when future + ## See `read` for a version that raises a catchable error when future ## has not completed. when chronosStrictFutureAccess: if not future.completed(): - raise (ref FutureDefect)( - msg: "Future not completed while accessing value", - cause: future) + raiseFutureDefect("Future not completed while accessing value", future) future.internalValue @@ -216,13 +219,11 @@ func value*(future: Future[void]) = ## Return the value in a completed future - raises Defect when ## `fut.completed()` is `false`. ## - ## See `read` for a version that raises an catchable error when future + ## See `read` for a version that raises a catchable error when future ## has not completed. when chronosStrictFutureAccess: if not future.completed(): - raise (ref FutureDefect)( - msg: "Future not completed while accessing value", - cause: future) + raiseFutureDefect("Future not completed while accessing value", future) func error*(future: FutureBase): ref CatchableError = ## Return the error of `future`, or `nil` if future did not fail. @@ -231,9 +232,8 @@ func error*(future: FutureBase): ref CatchableError = ## future has not failed. when chronosStrictFutureAccess: if not future.failed() and not future.cancelled(): - raise (ref FutureDefect)( - msg: "Future not failed/cancelled while accessing error", - cause: future) + raiseFutureDefect( + "Future not failed/cancelled while accessing error", future) future.internalError diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index a36ff4a91..f60b2d916 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -8,6 +8,9 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) +## Features and utilities for `Future` that integrate it with the dispatcher +## and the rest of the async machinery + {.push raises: [].} import std/[sequtils, macros] @@ -45,15 +48,28 @@ func `[]`*(loc: array[LocationKind, ptr SrcLoc], v: int): ptr SrcLoc {. type FutureStr*[T] = ref object of Future[T] - ## Future to hold GC strings + ## Deprecated gcholder*: string FutureSeq*[A, B] = ref object of Future[A] - ## Future to hold GC seqs + ## Deprecated gcholder*: seq[B] + FuturePendingError* = object of FutureError + ## Error raised when trying to `read` a Future that is still pending + FutureCompletedError* = object of FutureError + ## Error raised when trying access the error of a completed Future + SomeFuture = Future|InternalRaisesFuture +func raiseFuturePendingError(fut: FutureBase) {. + noinline, noreturn, raises: FuturePendingError.} = + raise (ref FuturePendingError)(msg: "Future is still pending", future: fut) +func raiseFutureCompletedError(fut: FutureBase) {. + noinline, noreturn, raises: FutureCompletedError.} = + raise (ref FutureCompletedError)( + msg: "Future is completed, cannot read error", future: fut) + # Backwards compatibility for old FutureState name template Finished* {.deprecated: "Use Completed instead".} = Completed template Finished*(T: type FutureState): FutureState {. @@ -479,6 +495,10 @@ macro internalCheckComplete*(fut: InternalRaisesFuture, raises: typed) = # generics are lost - so instead, we pass the raises list explicitly let types = getRaisesTypes(raises) + types.copyLineInfo(raises) + for t in types: + t.copyLineInfo(raises) + if isNoRaises(types): return quote do: if not(isNil(`fut`.internalError)): @@ -497,8 +517,8 @@ macro internalCheckComplete*(fut: InternalRaisesFuture, raises: typed) = quote do: discard ), nnkElseExpr.newTree( - nnkRaiseStmt.newNimNode(lineInfoFrom=fut).add( - quote do: (`fut`.internalError) + nnkRaiseStmt.newTree( + nnkDotExpr.newTree(fut, ident "internalError") ) ) ) @@ -520,39 +540,51 @@ macro internalCheckComplete*(fut: InternalRaisesFuture, raises: typed) = ifRaise ) -proc read*[T: not void](future: Future[T] ): lent T {.raises: [CatchableError].} = - ## Retrieves the value of ``future``. Future must be finished otherwise - ## this function will fail with a ``ValueError`` exception. +proc readFinished[T: not void](fut: Future[T]): lent T {. + raises: [CatchableError].} = + # Read a future that is known to be finished, avoiding the extra exception + # effect. + internalCheckComplete(fut) + fut.internalValue + +proc read*[T: not void](fut: Future[T] ): lent T {.raises: [CatchableError].} = + ## Retrieves the value of `fut`. ## - ## If the result of the future is an error then that error will be raised. - if not future.finished(): - # TODO: Make a custom exception type for this? - raise newException(ValueError, "Future still in progress.") + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## If the future is still pending, `FuturePendingError` will be raised. + if not fut.finished(): + raiseFuturePendingError(fut) - internalCheckComplete(future) - future.internalValue + fut.readFinished() -proc read*(future: Future[void] ) {.raises: [CatchableError].} = - ## Retrieves the value of ``future``. Future must be finished otherwise - ## this function will fail with a ``ValueError`` exception. +proc read*(fut: Future[void]) {.raises: [CatchableError].} = + ## Checks that `fut` completed. ## - ## If the result of the future is an error then that error will be raised. - if future.finished(): - internalCheckComplete(future) - else: - # TODO: Make a custom exception type for this? - raise newException(ValueError, "Future still in progress.") + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## If the future is still pending, `FuturePendingError` will be raised. + if not fut.finished(): + raiseFuturePendingError(fut) + + internalCheckComplete(fut) -proc readError*(future: FutureBase): ref CatchableError {.raises: [ValueError].} = - ## Retrieves the exception stored in ``future``. +proc readError*(fut: FutureBase): ref CatchableError {.raises: [FutureError].} = + ## Retrieves the exception of the failed or cancelled `fut`. ## - ## An ``ValueError`` exception will be thrown if no exception exists - ## in the specified Future. - if not(isNil(future.error)): - return future.error - else: - # TODO: Make a custom exception type for this? - raise newException(ValueError, "No error in future.") + ## If the future was completed with a value, `FutureCompletedError` will be + ## raised. + ## + ## If the future is still pending, `FuturePendingError` will be raised. + if not fut.finished(): + raiseFuturePendingError(fut) + + if isNil(fut.error): + raiseFutureCompletedError(fut) + + fut.error template taskFutureLocation(future: FutureBase): string = let loc = future.location[LocationKind.Create] @@ -568,18 +600,46 @@ template taskErrorMessage(future: FutureBase): string = template taskCancelMessage(future: FutureBase): string = "Asynchronous task " & taskFutureLocation(future) & " was cancelled!" -proc waitFor*[T](fut: Future[T]): T {.raises: [CatchableError].} = - ## **Blocks** the current thread until the specified future finishes and - ## reads it, potentially raising an exception if the future failed or was - ## cancelled. - var finished = false - # Ensure that callbacks currently scheduled on the future run before returning - proc continuation(udata: pointer) {.gcsafe.} = finished = true +proc pollFor[F: Future | InternalRaisesFuture](fut: F): F {.raises: [].} = + # Blocks the current thread of execution until `fut` has finished, returning + # the given future. + # + # Must not be called recursively (from inside `async` procedures). + # + # See alse `awaitne`. if not(fut.finished()): + var finished = false + # Ensure that callbacks currently scheduled on the future run before returning + proc continuation(udata: pointer) {.gcsafe.} = finished = true fut.addCallback(continuation) + while not(finished): poll() - fut.read() + + fut + +proc waitFor*[T: not void](fut: Future[T]): lent T {.raises: [CatchableError].} = + ## Blocks the current thread of execution until `fut` has finished, returning + ## its value. + ## + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## Must not be called recursively (from inside `async` procedures). + ## + ## See also `await`, `Future.read` + pollFor(fut).readFinished() + +proc waitFor*(fut: Future[void]) {.raises: [CatchableError].} = + ## Blocks the current thread of execution until `fut` has finished. + ## + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## Must not be called recursively (from inside `async` procedures). + ## + ## See also `await`, `Future.read` + pollFor(fut).internalCheckComplete() proc asyncSpawn*(future: Future[void]) = ## Spawns a new concurrent async task. @@ -943,7 +1003,7 @@ proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {. retFuture -template cancelAndWait*(future: FutureBase): Future[void] = +template cancelAndWait*(future: FutureBase): Future[void].Raising([CancelledError]) = ## Cancel ``future``. cancelAndWait(future, getSrcLocation()) @@ -1500,37 +1560,56 @@ when defined(windows): {.pop.} # Automatically deduced raises from here onwards -proc waitFor*[T, E](fut: InternalRaisesFuture[T, E]): T = # {.raises: [E]} - ## **Blocks** the current thread until the specified future finishes and - ## reads it, potentially raising an exception if the future failed or was - ## cancelled. - while not(fut.finished()): - poll() +proc readFinished[T: not void; E](fut: InternalRaisesFuture[T, E]): lent T = + internalCheckComplete(fut, E) + fut.internalValue + +proc read*[T: not void, E](fut: InternalRaisesFuture[T, E]): lent T = # {.raises: [E, FuturePendingError].} + ## Retrieves the value of `fut`. + ## + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## If the future is still pending, `FuturePendingError` will be raised. + if not fut.finished(): + raiseFuturePendingError(fut) - fut.read() + fut.readFinished() -proc read*[T: not void, E](future: InternalRaisesFuture[T, E]): lent T = # {.raises: [E, ValueError].} - ## Retrieves the value of ``future``. Future must be finished otherwise - ## this function will fail with a ``ValueError`` exception. +proc read*[E](fut: InternalRaisesFuture[void, E]) = # {.raises: [E].} + ## Checks that `fut` completed. ## - ## If the result of the future is an error then that error will be raised. - if not future.finished(): - # TODO: Make a custom exception type for this? - raise newException(ValueError, "Future still in progress.") + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## If the future is still pending, `FuturePendingError` will be raised. + if not fut.finished(): + raiseFuturePendingError(fut) - internalCheckComplete(future, E) - future.internalValue + internalCheckComplete(fut, E) -proc read*[E](future: InternalRaisesFuture[void, E]) = # {.raises: [E, CancelledError].} - ## Retrieves the value of ``future``. Future must be finished otherwise - ## this function will fail with a ``ValueError`` exception. +proc waitFor*[T: not void; E](fut: InternalRaisesFuture[T, E]): lent T = # {.raises: [E]} + ## Blocks the current thread of execution until `fut` has finished, returning + ## its value. ## - ## If the result of the future is an error then that error will be raised. - if future.finished(): - internalCheckComplete(future) - else: - # TODO: Make a custom exception type for this? - raise newException(ValueError, "Future still in progress.") + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## Must not be called recursively (from inside `async` procedures). + ## + ## See also `await`, `Future.read` + pollFor(fut).readFinished() + +proc waitFor*[E](fut: InternalRaisesFuture[void, E]) = # {.raises: [E]} + ## Blocks the current thread of execution until `fut` has finished. + ## + ## If the future failed or was cancelled, the corresponding exception will be + ## raised. + ## + ## Must not be called recursively (from inside `async` procedures). + ## + ## See also `await`, `Future.read` + pollFor(fut).internalCheckComplete(E) proc `or`*[T, Y, E1, E2]( fut1: InternalRaisesFuture[T, E1], diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index 88e11e395..079e3bb4c 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -13,14 +13,14 @@ import ../[futures, config], ./raisesfutures -proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} = +proc processBody(node, setResultSym: NimNode): NimNode {.compileTime.} = case node.kind of nnkReturnStmt: # `return ...` -> `setResult(...); return` let res = newNimNode(nnkStmtList, node) if node[0].kind != nnkEmpty: - res.add newCall(setResultSym, processBody(node[0], setResultSym, baseType)) + res.add newCall(setResultSym, processBody(node[0], setResultSym)) res.add newNimNode(nnkReturnStmt, node).add(newEmptyNode()) res @@ -29,8 +29,14 @@ proc processBody(node, setResultSym, baseType: NimNode): NimNode {.compileTime.} # the Future we inject node else: + if node.kind == nnkYieldStmt: + # asyncdispatch allows `yield` but this breaks cancellation + warning( + "`yield` in async procedures not supported - use `awaitne` instead", + node) + for i in 0 ..< node.len: - node[i] = processBody(node[i], setResultSym, baseType) + node[i] = processBody(node[i], setResultSym) node proc wrapInTryFinally( @@ -179,7 +185,7 @@ proc getName(node: NimNode): string {.compileTime.} = macro unsupported(s: static[string]): untyped = error s -proc params2(someProc: NimNode): NimNode = +proc params2(someProc: NimNode): NimNode {.compileTime.} = # until https://github.com/nim-lang/Nim/pull/19563 is available if someProc.kind == nnkProcTy: someProc[0] @@ -275,6 +281,10 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = returnType[1] let + # When the base type is known to be void (and not generic), we can simplify + # code generation - however, in the case of generic async procedures it + # could still end up being void, meaning void detection needs to happen + # post-macro-expansion. baseTypeIsVoid = baseType.eqIdent("void") (raw, raises, handleException) = decodeParams(params) internalFutureType = @@ -295,7 +305,7 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = prc.params2[0] = internalReturnType - if prc.kind notin {nnkProcTy, nnkLambda}: # TODO: Nim bug? + if prc.kind notin {nnkProcTy, nnkLambda}: prc.addPragma(newColonExpr(ident "stackTrace", ident "off")) # The proc itself doesn't raise @@ -326,63 +336,57 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = prc.body ) - when chronosDumpAsync: - echo repr prc - - return prc - - if prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo} and + elif prc.kind in {nnkProcDef, nnkLambda, nnkMethodDef, nnkDo} and not isEmpty(prc.body): - # don't do anything with forward bodies (empty) let - prcName = prc.name.getName setResultSym = ident "setResult" - procBody = prc.body.processBody(setResultSym, baseType) - internalFutureSym = ident "chronosInternalRetFuture" - castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym) + procBody = prc.body.processBody(setResultSym) resultIdent = ident "result" - - resultDecl = nnkWhenStmt.newTree( - # when `baseType` is void: - nnkElifExpr.newTree( - nnkInfix.newTree(ident "is", baseType, ident "void"), - quote do: - template result: auto {.used.} = - {.fatal: "You should not reference the `result` variable inside" & - " a void async proc".} - ), - # else: - nnkElseExpr.newTree( - newStmtList( - quote do: {.push warning[resultshadowed]: off.}, - # var result {.used.}: `baseType` - # In the proc body, result may or may not end up being used - # depending on how the body is written - with implicit returns / - # expressions in particular, it is likely but not guaranteed that - # it is not used. Ideally, we would avoid emitting it in this - # case to avoid the default initializaiton. {.used.} typically - # works better than {.push.} which has a tendency to leak out of - # scope. - # TODO figure out if there's a way to detect `result` usage in - # the proc body _after_ template exapnsion, and therefore - # avoid creating this variable - one option is to create an - # addtional when branch witha fake `result` and check - # `compiles(procBody)` - this is not without cost though - nnkVarSection.newTree(nnkIdentDefs.newTree( - nnkPragmaExpr.newTree( - resultIdent, - nnkPragma.newTree(ident "used")), - baseType, newEmptyNode()) - ), - quote do: {.pop.}, + fakeResult = quote do: + template result: auto {.used.} = + {.fatal: "You should not reference the `result` variable inside" & + " a void async proc".} + resultDecl = + if baseTypeIsVoid: fakeResult + else: nnkWhenStmt.newTree( + # when `baseType` is void: + nnkElifExpr.newTree( + nnkInfix.newTree(ident "is", baseType, ident "void"), + fakeResult + ), + # else: + nnkElseExpr.newTree( + newStmtList( + quote do: {.push warning[resultshadowed]: off.}, + # var result {.used.}: `baseType` + # In the proc body, result may or may not end up being used + # depending on how the body is written - with implicit returns / + # expressions in particular, it is likely but not guaranteed that + # it is not used. Ideally, we would avoid emitting it in this + # case to avoid the default initializaiton. {.used.} typically + # works better than {.push.} which has a tendency to leak out of + # scope. + # TODO figure out if there's a way to detect `result` usage in + # the proc body _after_ template exapnsion, and therefore + # avoid creating this variable - one option is to create an + # addtional when branch witha fake `result` and check + # `compiles(procBody)` - this is not without cost though + nnkVarSection.newTree(nnkIdentDefs.newTree( + nnkPragmaExpr.newTree( + resultIdent, + nnkPragma.newTree(ident "used")), + baseType, newEmptyNode()) + ), + quote do: {.pop.}, + ) ) ) - ) - # generates: + # ```nim # template `setResultSym`(code: untyped) {.used.} = # when typeof(code) is void: code # else: `resultIdent` = code + # ``` # # this is useful to handle implicit returns, but also # to bind the `result` to the one we declare here @@ -415,6 +419,8 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = ) ) + internalFutureSym = ident "chronosInternalRetFuture" + castFutureSym = nnkCast.newTree(internalFutureType, internalFutureSym) # Wrapping in try/finally ensures that early returns are handled properly # and that `defer` is processed in the right scope completeDecl = wrapInTryFinally( @@ -429,18 +435,13 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = internalFutureParameter = nnkIdentDefs.newTree( internalFutureSym, newIdentNode("FutureBase"), newEmptyNode()) + prcName = prc.name.getName iteratorNameSym = genSym(nskIterator, $prcName) closureIterator = newProc( iteratorNameSym, [newIdentNode("FutureBase"), internalFutureParameter], closureBody, nnkIteratorDef) - outerProcBody = newNimNode(nnkStmtList, prc.body) - - # Copy comment for nimdoc - if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt: - outerProcBody.add(prc.body[0]) - iteratorNameSym.copyLineInfo(prc) closureIterator.pragma = newNimNode(nnkPragma, lineInfoFrom=prc.body) @@ -455,39 +456,56 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = nnkBracket.newTree() )) + # The body of the original procedure (now moved to the iterator) is replaced + # with: + # + # ```nim + # let resultFuture = newFuture[T]() + # resultFuture.internalClosure = `iteratorNameSym` + # futureContinue(resultFuture) + # return resultFuture + # ``` + # + # Declared at the end to be sure that the closure doesn't reference it, + # avoid cyclic ref (#203) + # + # Do not change this code to `quote do` version because `instantiationInfo` + # will be broken for `newFuture()` call. + + let + outerProcBody = newNimNode(nnkStmtList, prc.body) + + # Copy comment for nimdoc + if prc.body.len > 0 and prc.body[0].kind == nnkCommentStmt: + outerProcBody.add(prc.body[0]) + outerProcBody.add(closureIterator) - # -> let resultFuture = newInternalRaisesFuture[T, E]() - # declared at the end to be sure that the closure - # doesn't reference it, avoid cyclic ref (#203) let retFutureSym = ident "resultFuture" newFutProc = if raises == nil: - newTree(nnkBracketExpr, ident "newFuture", baseType) + nnkBracketExpr.newTree(ident "newFuture", baseType) else: - newTree(nnkBracketExpr, ident "newInternalRaisesFuture", baseType, raises) + nnkBracketExpr.newTree(ident "newInternalRaisesFuture", baseType, raises) + retFutureSym.copyLineInfo(prc) - # Do not change this code to `quote do` version because `instantiationInfo` - # will be broken for `newFuture()` call. outerProcBody.add( newLetStmt( retFutureSym, newCall(newFutProc, newLit(prcName)) ) ) - # -> resultFuture.internalClosure = iterator + outerProcBody.add( newAssignment( newDotExpr(retFutureSym, newIdentNode("internalClosure")), iteratorNameSym) ) - # -> futureContinue(resultFuture)) outerProcBody.add( newCall(newIdentNode("futureContinue"), retFutureSym) ) - # -> return resultFuture outerProcBody.add newNimNode(nnkReturnStmt, prc.body[^1]).add(retFutureSym) prc.body = outerProcBody @@ -498,6 +516,13 @@ proc asyncSingleProc(prc, params: NimNode): NimNode {.compileTime.} = prc template await*[T](f: Future[T]): T = + ## Ensure that the given `Future` is finished, then return its value. + ## + ## If the `Future` failed or was cancelled, the corresponding exception will + ## be raised instead. + ## + ## If the `Future` is pending, execution of the current `async` procedure + ## will be suspended until the `Future` is finished. when declared(chronosInternalRetFuture): chronosInternalRetFuture.internalChild = f # `futureContinue` calls the iterator generated by the `async` @@ -512,18 +537,26 @@ template await*[T](f: Future[T]): T = else: unsupported "await is only available within {.async.}" -template await*[T, E](f: InternalRaisesFuture[T, E]): T = +template await*[T, E](fut: InternalRaisesFuture[T, E]): T = + ## Ensure that the given `Future` is finished, then return its value. + ## + ## If the `Future` failed or was cancelled, the corresponding exception will + ## be raised instead. + ## + ## If the `Future` is pending, execution of the current `async` procedure + ## will be suspended until the `Future` is finished. when declared(chronosInternalRetFuture): - chronosInternalRetFuture.internalChild = f + chronosInternalRetFuture.internalChild = fut # `futureContinue` calls the iterator generated by the `async` # transformation - `yield` gives control back to `futureContinue` which is # responsible for resuming execution once the yielded future is finished yield chronosInternalRetFuture.internalChild # `child` released by `futureContinue` - cast[type(f)](chronosInternalRetFuture.internalChild).internalCheckComplete(E) + cast[type(fut)]( + chronosInternalRetFuture.internalChild).internalCheckComplete(E) when T isnot void: - cast[type(f)](chronosInternalRetFuture.internalChild).value() + cast[type(fut)](chronosInternalRetFuture.internalChild).value() else: unsupported "await is only available within {.async.}" diff --git a/docs/src/async_procs.md b/docs/src/async_procs.md index ae8eb51bf..648f19be1 100644 --- a/docs/src/async_procs.md +++ b/docs/src/async_procs.md @@ -1,5 +1,13 @@ # Async procedures +Async procedures are those that interact with `chronos` to cooperatively +suspend and resume their execution depending on the completion of other +async procedures which themselves may be waiting for I/O to complete, timers to +expire or tasks running on other threads to complete. + +Async procedures are marked with the `{.async.}` pragma and return a `Future` +indicating the state of the operation. + ## The `async` pragma @@ -20,8 +28,8 @@ echo p().type # prints "Future[system.void]" Whenever `await` is encountered inside an async procedure, control is given back to the dispatcher for as many steps as it's necessary for the awaited future to complete, fail or be cancelled. `await` calls the -equivalent of `Future.read()` on the completed future and returns the -encapsulated value. +equivalent of `Future.read()` on the completed future to return the +encapsulated value when the operation finishes. ```nim proc p1() {.async.} = @@ -51,10 +59,10 @@ In particular, if two `async` procedures have access to the same mutable state, the value before and after `await` might not be the same as the order of execution is not guaranteed! ``` -## Raw functions +## Raw procedures -Raw functions are those that interact with `chronos` via the `Future` type but -whose body does not go through the async transformation. +Raw async procedures are those that interact with `chronos` via the `Future` +type but whose body does not go through the async transformation. Such functions are created by adding `raw: true` to the `async` parameters: diff --git a/docs/src/concepts.md b/docs/src/concepts.md index fcc33afce..0469b8be4 100644 --- a/docs/src/concepts.md +++ b/docs/src/concepts.md @@ -1,12 +1,13 @@ # Concepts +Async/await is a programming model that relies on cooperative multitasking to +coordinate the concurrent execution of procedures, using event notifications +from the operating system or other treads to resume execution. + ## The dispatcher -Async/await programming relies on cooperative multitasking to coordinate the -concurrent execution of procedures, using event notifications from the operating system to resume execution. - The event handler loop is called a "dispatcher" and a single instance per thread is created, as soon as one is needed. @@ -16,6 +17,9 @@ progress, for example because it's waiting for some data to arrive, it hands control back to the dispatcher which ensures that the procedure is resumed when ready. +A single thread, and thus a single dispatcher, is typically able to handle +thousands of concurrent in-progress requests. + ## The `Future` type `Future` objects encapsulate the outcome of executing an `async` procedure. The @@ -69,13 +73,14 @@ structured this way. Both `waitFor` and `runForever` call `poll` which offers fine-grained control over the event loop steps. -Nested calls to `poll`, `waitFor` and `runForever` are not allowed. +Nested calls to `poll` - directly or indirectly via `waitFor` and `runForever` +are not allowed. ``` ## Cancellation Any pending `Future` can be cancelled. This can be used for timeouts, to start -multiple operations in parallel and cancel the rest as soon as one finishes, +multiple parallel operations and cancel the rest as soon as one finishes, to initiate the orderely shutdown of an application etc. ```nim @@ -110,7 +115,10 @@ waitFor(work.cancelAndWait()) ``` The `CancelledError` will now travel up the stack like any other exception. -It can be caught and handled (for instance, freeing some resources) +It can be caught for instance to free some resources and is then typically +re-raised for the whole chain operations to get cancelled. + +Alternatively, the cancellation request can be translated to a regular outcome of the operation - for example, a `read` operation might return an empty result. Cancelling an already-finished `Future` has no effect, as the following example of downloading two web pages concurrently shows: diff --git a/docs/src/error_handling.md b/docs/src/error_handling.md index be06a3555..54c1236f3 100644 --- a/docs/src/error_handling.md +++ b/docs/src/error_handling.md @@ -85,6 +85,21 @@ the operation they implement might get cancelled resulting in neither value nor error! ``` +When using checked exceptions, the `Future` type is modified to include +`raises` information - it can be constructed with the `Raising` helper: + +```nim +# Create a variable of the type that will be returned by a an async function +# raising `[CancelledError]`: +var fut: Future[int].Raising([CancelledError]) +``` + +```admonition note +`Raising` creates a specialization of `InternalRaisesFuture` type - as the name +suggests, this is an internal type whose implementation details are likely to +change in future `chronos` versions. +``` + ## The `Exception` type Exceptions deriving from `Exception` are not caught by default as these may diff --git a/docs/src/porting.md b/docs/src/porting.md index 519de64be..1bdffe2d1 100644 --- a/docs/src/porting.md +++ b/docs/src/porting.md @@ -16,20 +16,25 @@ here are several things to consider: * Exception handling is now strict by default - see the [error handling](./error_handling.md) chapter for how to deal with `raises` effects * `AsyncEventBus` was removed - use `AsyncEventQueue` instead +* `Future.value` and `Future.error` panic when accessed in the wrong state +* `Future.read` and `Future.readError` raise `FutureError` instead of + `ValueError` when accessed in the wrong state ## `asyncdispatch` -Projects written for `asyncdispatch` and `chronos` look similar but there are +Code written for `asyncdispatch` and `chronos` looks similar but there are several differences to be aware of: * `chronos` has its own dispatch loop - you can typically not mix `chronos` and `asyncdispatch` in the same thread * `import chronos` instead of `import asyncdispatch` * cleanup is important - make sure to use `closeWait` to release any resources - you're using or file descript leaks and other + you're using or file descriptor and other leaks will ensue * cancellation support means that `CancelledError` may be raised from most `{.async.}` functions * Calling `yield` directly in tasks is not supported - instead, use `awaitne`. +* `asyncSpawn` is used instead of `asyncCheck` - note that exceptions raised + in tasks that are `asyncSpawn`:ed cause panic ## Supporting multiple backends From 0b136b33c8b8d8ee09777f31cf6fb53362a741f4 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 18 Nov 2023 00:18:09 +0200 Subject: [PATCH 089/146] Asyncstreams asyncraises. (#472) * Fix transports addresses functions should not return so many exceptions. * Add raising `Defect` functions to AsyncQueue. * Add raises/asyncraises into async streams. * Remove `Safe` primitives. Make AsyncStreamError to be ancestor of AsyncError. Make AsyncStreamReader/Writer loops requirement to not raise any exceptions * Remove `par` fields. * Remove `par` fields from TLSStream. * Attempt to lower memory usage. --- chronos/asyncsync.nim | 47 ++++--- chronos/streams/asyncstream.nim | 211 ++++++++++++++------------------ chronos/streams/boundstream.nim | 22 +++- chronos/streams/chunkstream.nim | 17 ++- chronos/streams/tlsstream.nim | 114 +++++++++-------- chronos/transports/stream.nim | 8 +- 6 files changed, 219 insertions(+), 200 deletions(-) diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index 9bab1fd68..f77d5fe59 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -165,7 +165,7 @@ proc newAsyncEvent*(): AsyncEvent = AsyncEvent() proc wait*(event: AsyncEvent): Future[void] {. - async: (raw: true, raises: [CancelledError]).} = + async: (raw: true, raises: [CancelledError]).} = ## Block until the internal flag of ``event`` is `true`. ## If the internal flag is `true` on entry, return immediately. Otherwise, ## block until another task calls `fire()` to set the flag to `true`, @@ -258,7 +258,7 @@ proc popLastImpl[T](aq: AsyncQueue[T]): T = res proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {. - raises: [AsyncQueueFullError].}= + raises: [AsyncQueueFullError].} = ## Put an item ``item`` to the beginning of the queue ``aq`` immediately. ## ## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised. @@ -267,7 +267,7 @@ proc addFirstNoWait*[T](aq: AsyncQueue[T], item: T) {. aq.addFirstImpl(item) proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {. - raises: [AsyncQueueFullError].}= + raises: [AsyncQueueFullError].} = ## Put an item ``item`` at the end of the queue ``aq`` immediately. ## ## If queue ``aq`` is full, then ``AsyncQueueFullError`` exception raised. @@ -276,7 +276,7 @@ proc addLastNoWait*[T](aq: AsyncQueue[T], item: T) {. aq.addLastImpl(item) proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {. - raises: [AsyncQueueEmptyError].} = + raises: [AsyncQueueEmptyError].} = ## Get an item from the beginning of the queue ``aq`` immediately. ## ## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised. @@ -285,7 +285,7 @@ proc popFirstNoWait*[T](aq: AsyncQueue[T]): T {. aq.popFirstImpl() proc popLastNoWait*[T](aq: AsyncQueue[T]): T {. - raises: [AsyncQueueEmptyError].} = + raises: [AsyncQueueEmptyError].} = ## Get an item from the end of the queue ``aq`` immediately. ## ## If queue ``aq`` is empty, then ``AsyncQueueEmptyError`` exception raised. @@ -293,11 +293,13 @@ proc popLastNoWait*[T](aq: AsyncQueue[T]): T {. raise newException(AsyncQueueEmptyError, "AsyncQueue is empty!") aq.popLastImpl() -proc addFirst*[T](aq: AsyncQueue[T], item: T) {.async: (raises: [CancelledError]).} = +proc addFirst*[T](aq: AsyncQueue[T], item: T) {. + async: (raises: [CancelledError]).} = ## Put an ``item`` to the beginning of the queue ``aq``. If the queue is full, ## wait until a free slot is available before adding item. while aq.full(): - let putter = Future[void].Raising([CancelledError]).init("AsyncQueue.addFirst") + let putter = + Future[void].Raising([CancelledError]).init("AsyncQueue.addFirst") aq.putters.add(putter) try: await putter @@ -307,11 +309,13 @@ proc addFirst*[T](aq: AsyncQueue[T], item: T) {.async: (raises: [CancelledError] raise exc aq.addFirstImpl(item) -proc addLast*[T](aq: AsyncQueue[T], item: T) {.async: (raises: [CancelledError]).} = +proc addLast*[T](aq: AsyncQueue[T], item: T) {. + async: (raises: [CancelledError]).} = ## Put an ``item`` to the end of the queue ``aq``. If the queue is full, ## wait until a free slot is available before adding item. while aq.full(): - let putter = Future[void].Raising([CancelledError]).init("AsyncQueue.addLast") + let putter = + Future[void].Raising([CancelledError]).init("AsyncQueue.addLast") aq.putters.add(putter) try: await putter @@ -321,11 +325,13 @@ proc addLast*[T](aq: AsyncQueue[T], item: T) {.async: (raises: [CancelledError]) raise exc aq.addLastImpl(item) -proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {.async: (raises: [CancelledError]).} = +proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {. + async: (raises: [CancelledError]).} = ## Remove and return an ``item`` from the beginning of the queue ``aq``. ## If the queue is empty, wait until an item is available. while aq.empty(): - let getter = Future[void].Raising([CancelledError]).init("AsyncQueue.popFirst") + let getter = + Future[void].Raising([CancelledError]).init("AsyncQueue.popFirst") aq.getters.add(getter) try: await getter @@ -335,11 +341,13 @@ proc popFirst*[T](aq: AsyncQueue[T]): Future[T] {.async: (raises: [CancelledErro raise exc aq.popFirstImpl() -proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async: (raises: [CancelledError]).} = +proc popLast*[T](aq: AsyncQueue[T]): Future[T] {. + async: (raises: [CancelledError]).} = ## Remove and return an ``item`` from the end of the queue ``aq``. ## If the queue is empty, wait until an item is available. while aq.empty(): - let getter = Future[void].Raising([CancelledError]).init("AsyncQueue.popLast") + let getter = + Future[void].Raising([CancelledError]).init("AsyncQueue.popLast") aq.getters.add(getter) try: await getter @@ -350,22 +358,22 @@ proc popLast*[T](aq: AsyncQueue[T]): Future[T] {.async: (raises: [CancelledError aq.popLastImpl() proc putNoWait*[T](aq: AsyncQueue[T], item: T) {. - raises: [AsyncQueueFullError].} = + raises: [AsyncQueueFullError].} = ## Alias of ``addLastNoWait()``. aq.addLastNoWait(item) proc getNoWait*[T](aq: AsyncQueue[T]): T {. - raises: [AsyncQueueEmptyError].} = + raises: [AsyncQueueEmptyError].} = ## Alias of ``popFirstNoWait()``. aq.popFirstNoWait() proc put*[T](aq: AsyncQueue[T], item: T): Future[void] {. - async: (raw: true, raises: [CancelledError]).} = + async: (raw: true, raises: [CancelledError]).} = ## Alias of ``addLast()``. aq.addLast(item) proc get*[T](aq: AsyncQueue[T]): Future[T] {. - async: (raw: true, raises: [CancelledError]).} = + async: (raw: true, raises: [CancelledError]).} = ## Alias of ``popFirst()``. aq.popFirst() @@ -509,7 +517,8 @@ proc close*(ab: AsyncEventQueue) {.raises: [].} = ab.readers.reset() ab.queue.clear() -proc closeWait*(ab: AsyncEventQueue): Future[void] {.async: (raw: true, raises: []).} = +proc closeWait*(ab: AsyncEventQueue): Future[void] {. + async: (raw: true, raises: []).} = let retFuture = newFuture[void]("AsyncEventQueue.closeWait()", {FutureFlag.OwnCancelSchedule}) proc continuation(udata: pointer) {.gcsafe.} = @@ -568,7 +577,7 @@ proc emit*[T](ab: AsyncEventQueue[T], data: T) = proc waitEvents*[T](ab: AsyncEventQueue[T], key: EventQueueKey, eventsCount = -1): Future[seq[T]] {. - async: (raises: [AsyncEventQueueFullError, CancelledError]).} = + async: (raises: [AsyncEventQueueFullError, CancelledError]).} = ## Wait for events var events: seq[T] diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 4698e8358..a52108476 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -24,15 +24,13 @@ const ## AsyncStreamWriter leaks tracker name type - AsyncStreamError* = object of CatchableError + AsyncStreamError* = object of AsyncError AsyncStreamIncorrectDefect* = object of Defect AsyncStreamIncompleteError* = object of AsyncStreamError AsyncStreamLimitError* = object of AsyncStreamError AsyncStreamUseClosedError* = object of AsyncStreamError AsyncStreamReadError* = object of AsyncStreamError - par*: ref CatchableError AsyncStreamWriteError* = object of AsyncStreamError - par*: ref CatchableError AsyncStreamWriteEOFError* = object of AsyncStreamWriteError AsyncBuffer* = object @@ -53,7 +51,7 @@ type dataStr*: string size*: int offset*: int - future*: Future[void] + future*: Future[void].Raising([CancelledError, AsyncStreamError]) AsyncStreamState* = enum Running, ## Stream is online and working @@ -64,10 +62,10 @@ type Closed ## Stream was closed StreamReaderLoop* = proc (stream: AsyncStreamReader): Future[void] {. - gcsafe, raises: [].} + async: (raises: []).} ## Main read loop for read streams. StreamWriterLoop* = proc (stream: AsyncStreamWriter): Future[void] {. - gcsafe, raises: [].} + async: (raises: []).} ## Main write loop for write streams. AsyncStreamReader* = ref object of RootRef @@ -124,12 +122,12 @@ proc `[]`*(sb: AsyncBuffer, index: int): byte {.inline.} = proc update*(sb: var AsyncBuffer, size: int) {.inline.} = sb.offset += size -proc wait*(sb: var AsyncBuffer): Future[void] = +template wait*(sb: var AsyncBuffer): untyped = sb.events[0].clear() sb.events[1].fire() sb.events[0].wait() -proc transfer*(sb: var AsyncBuffer): Future[void] = +template transfer*(sb: var AsyncBuffer): untyped = sb.events[1].clear() sb.events[0].fire() sb.events[1].wait() @@ -150,7 +148,8 @@ proc copyData*(sb: AsyncBuffer, dest: pointer, offset, length: int) {.inline.} = unsafeAddr sb.buffer[0], length) proc upload*(sb: ptr AsyncBuffer, pbytes: ptr byte, - nbytes: int): Future[void] {.async.} = + nbytes: int): Future[void] {. + async: (raises: [CancelledError]).} = ## You can upload any amount of bytes to the buffer. If size of internal ## buffer is not enough to fit all the data at once, data will be uploaded ## via chunks of size up to internal buffer size. @@ -186,18 +185,20 @@ template copyOut*(dest: pointer, item: WriteItem, length: int) = elif item.kind == String: copyMem(dest, unsafeAddr item.dataStr[item.offset], length) -proc newAsyncStreamReadError(p: ref CatchableError): ref AsyncStreamReadError {. - noinline.} = +proc newAsyncStreamReadError( + p: ref TransportError + ): ref AsyncStreamReadError {.noinline.} = var w = newException(AsyncStreamReadError, "Read stream failed") w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg - w.par = p + w.parent = p w -proc newAsyncStreamWriteError(p: ref CatchableError): ref AsyncStreamWriteError {. - noinline.} = +proc newAsyncStreamWriteError( + p: ref TransportError + ): ref AsyncStreamWriteError {.noinline.} = var w = newException(AsyncStreamWriteError, "Write stream failed") w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg - w.par = p + w.parent = p w proc newAsyncStreamIncompleteError*(): ref AsyncStreamIncompleteError {. @@ -344,7 +345,8 @@ template readLoop(body: untyped): untyped = await rstream.buffer.wait() proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, - nbytes: int) {.async.} = + nbytes: int) {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store ## it to ``pbytes``. ## @@ -365,7 +367,7 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, raise exc except TransportIncompleteError: raise newAsyncStreamIncompleteError() - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -384,7 +386,8 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, (consumed: count, done: index == nbytes) proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, - nbytes: int): Future[int] {.async.} = + nbytes: int): Future[int] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Perform one read operation on read-only stream ``rstream``. ## ## If internal buffer is not empty, ``nbytes`` bytes will be transferred from @@ -398,7 +401,7 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, return await readOnce(rstream.tsource, pbytes, nbytes) except CancelledError as exc: raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -415,7 +418,8 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, return count proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, - sep: seq[byte]): Future[int] {.async.} = + sep: seq[byte]): Future[int] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Read data from the read-only stream ``rstream`` until separator ``sep`` is ## found. ## @@ -446,7 +450,7 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, raise newAsyncStreamIncompleteError() except TransportLimitError: raise newAsyncStreamLimitError() - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -476,7 +480,8 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, return k proc readLine*(rstream: AsyncStreamReader, limit = 0, - sep = "\r\n"): Future[string] {.async.} = + sep = "\r\n"): Future[string] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Read one line from read-only stream ``rstream``, where ``"line"`` is a ## sequence of bytes ending with ``sep`` (default is ``"\r\n"``). ## @@ -495,7 +500,7 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0, return await readLine(rstream.tsource, limit, sep) except CancelledError as exc: raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -530,7 +535,8 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0, (index, (state == len(sep)) or (lim == len(res))) return res -proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} = +proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Read all bytes from read-only stream ``rstream``. ## ## This procedure allocates buffer seq[byte] and return it as result. @@ -543,7 +549,7 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} = raise exc except TransportLimitError: raise newAsyncStreamLimitError() - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -559,7 +565,8 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {.async.} = (count, false) return res -proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} = +proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Read all bytes (n <= 0) or exactly `n` bytes from read-only stream ## ``rstream``. ## @@ -571,7 +578,7 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} = return await read(rstream.tsource, n) except CancelledError as exc: raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -590,7 +597,8 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {.async.} = (count, len(res) == n) return res -proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} = +proc consume*(rstream: AsyncStreamReader): Future[int] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Consume (discard) all bytes from read-only stream ``rstream``. ## ## Return number of bytes actually consumed (discarded). @@ -603,7 +611,7 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} = raise exc except TransportLimitError: raise newAsyncStreamLimitError() - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -618,7 +626,8 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {.async.} = (rstream.buffer.dataLen(), false) return res -proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} = +proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Consume (discard) all bytes (n <= 0) or ``n`` bytes from read-only stream ## ``rstream``. ## @@ -632,7 +641,7 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} = raise exc except TransportLimitError: raise newAsyncStreamLimitError() - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -652,7 +661,7 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {.async.} = return res proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. - async.} = + async: (raises: [CancelledError, AsyncStreamError]).} = ## Read all bytes from stream ``rstream`` until ``predicate`` callback ## will not be satisfied. ## @@ -673,7 +682,7 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. await readMessage(rstream.tsource, pred) except CancelledError as exc: raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamReadError(exc) else: if isNil(rstream.readerLoop): @@ -691,7 +700,8 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. pred(rstream.buffer.buffer.toOpenArray(0, count - 1)) proc write*(wstream: AsyncStreamWriter, pbytes: pointer, - nbytes: int) {.async.} = + nbytes: int) {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Write sequence of bytes pointed by ``pbytes`` of length ``nbytes`` to ## writer stream ``wstream``. ## @@ -708,9 +718,7 @@ proc write*(wstream: AsyncStreamWriter, pbytes: pointer, res = await write(wstream.tsource, pbytes, nbytes) except CancelledError as exc: raise exc - except AsyncStreamError as exc: - raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamWriteError(exc) if res != nbytes: raise newAsyncStreamIncompleteError() @@ -720,23 +728,17 @@ proc write*(wstream: AsyncStreamWriter, pbytes: pointer, await write(wstream.wsource, pbytes, nbytes) wstream.bytesCount = wstream.bytesCount + uint64(nbytes) else: - var item = WriteItem(kind: Pointer) - item.dataPtr = pbytes - item.size = nbytes - item.future = newFuture[void]("async.stream.write(pointer)") - try: - await wstream.queue.put(item) - await item.future - wstream.bytesCount = wstream.bytesCount + uint64(item.size) - except CancelledError as exc: - raise exc - except AsyncStreamError as exc: - raise exc - except CatchableError as exc: - raise newAsyncStreamWriteError(exc) + let item = WriteItem( + kind: Pointer, dataPtr: pbytes, size: nbytes, + future: Future[void].Raising([CancelledError, AsyncStreamError]) + .init("async.stream.write(pointer)")) + await wstream.queue.put(item) + await item.future + wstream.bytesCount = wstream.bytesCount + uint64(item.size) proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], - msglen = -1) {.async.} = + msglen = -1) {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer ## stream ``wstream``. ## @@ -758,7 +760,7 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], res = await write(wstream.tsource, sbytes, length) except CancelledError as exc: raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamWriteError(exc) if res != length: raise newAsyncStreamIncompleteError() @@ -768,29 +770,17 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], await write(wstream.wsource, sbytes, length) wstream.bytesCount = wstream.bytesCount + uint64(length) else: - var item = WriteItem(kind: Sequence) - when declared(shallowCopy): - if not(isLiteral(sbytes)): - shallowCopy(item.dataSeq, sbytes) - else: - item.dataSeq = sbytes - else: - item.dataSeq = sbytes - item.size = length - item.future = newFuture[void]("async.stream.write(seq)") - try: - await wstream.queue.put(item) - await item.future - wstream.bytesCount = wstream.bytesCount + uint64(item.size) - except CancelledError as exc: - raise exc - except AsyncStreamError as exc: - raise exc - except CatchableError as exc: - raise newAsyncStreamWriteError(exc) + let item = WriteItem( + kind: Sequence, dataSeq: move(sbytes), size: length, + future: Future[void].Raising([CancelledError, AsyncStreamError]) + .init("async.stream.write(seq)")) + await wstream.queue.put(item) + await item.future + wstream.bytesCount = wstream.bytesCount + uint64(item.size) proc write*(wstream: AsyncStreamWriter, sbytes: sink string, - msglen = -1) {.async.} = + msglen = -1) {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``. ## ## String ``sbytes`` must not be zero-length. @@ -811,7 +801,7 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink string, res = await write(wstream.tsource, sbytes, length) except CancelledError as exc: raise exc - except CatchableError as exc: + except TransportError as exc: raise newAsyncStreamWriteError(exc) if res != length: raise newAsyncStreamIncompleteError() @@ -821,28 +811,16 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink string, await write(wstream.wsource, sbytes, length) wstream.bytesCount = wstream.bytesCount + uint64(length) else: - var item = WriteItem(kind: String) - when declared(shallowCopy): - if not(isLiteral(sbytes)): - shallowCopy(item.dataStr, sbytes) - else: - item.dataStr = sbytes - else: - item.dataStr = sbytes - item.size = length - item.future = newFuture[void]("async.stream.write(string)") - try: - await wstream.queue.put(item) - await item.future - wstream.bytesCount = wstream.bytesCount + uint64(item.size) - except CancelledError as exc: - raise exc - except AsyncStreamError as exc: - raise exc - except CatchableError as exc: - raise newAsyncStreamWriteError(exc) - -proc finish*(wstream: AsyncStreamWriter) {.async.} = + let item = WriteItem( + kind: String, dataStr: move(sbytes), size: length, + future: Future[void].Raising([CancelledError, AsyncStreamError]) + .init("async.stream.write(string)")) + await wstream.queue.put(item) + await item.future + wstream.bytesCount = wstream.bytesCount + uint64(item.size) + +proc finish*(wstream: AsyncStreamWriter) {. + async: (raises: [CancelledError, AsyncStreamError]).} = ## Finish write stream ``wstream``. checkStreamClosed(wstream) # For AsyncStreamWriter Finished state could be set manually or by stream's @@ -852,20 +830,15 @@ proc finish*(wstream: AsyncStreamWriter) {.async.} = if isNil(wstream.writerLoop): await wstream.wsource.finish() else: - var item = WriteItem(kind: Pointer) - item.size = 0 - item.future = newFuture[void]("async.stream.finish") - try: - await wstream.queue.put(item) - await item.future - except CancelledError as exc: - raise exc - except AsyncStreamError as exc: - raise exc - except CatchableError as exc: - raise newAsyncStreamWriteError(exc) - -proc join*(rw: AsyncStreamRW): Future[void] = + let item = WriteItem( + kind: Pointer, size: 0, + future: Future[void].Raising([CancelledError, AsyncStreamError]) + .init("async.stream.finish")) + await wstream.queue.put(item) + await item.future + +proc join*(rw: AsyncStreamRW): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Get Future[void] which will be completed when stream become finished or ## closed. when rw is AsyncStreamReader: @@ -924,7 +897,8 @@ proc close*(rw: AsyncStreamRW) = rw.future.addCallback(continuation) rw.future.cancelSoon() -proc closeWait*(rw: AsyncStreamRW): Future[void] = +proc closeWait*(rw: AsyncStreamRW): Future[void] {. + async: (raw: true, raises: []).} = ## Close and frees resources of stream ``rw``. const FutureName = when rw is AsyncStreamReader: @@ -932,25 +906,20 @@ proc closeWait*(rw: AsyncStreamRW): Future[void] = else: "async.stream.writer.closeWait" - if rw.closed(): - return Future.completed(FutureName) + let retFuture = Future[void].Raising([]).init(FutureName) - let retFuture = newFuture[void](FutureName, {FutureFlag.OwnCancelSchedule}) + if rw.closed(): + retFuture.complete() + return retFuture proc continuation(udata: pointer) {.gcsafe, raises:[].} = retFuture.complete() - proc cancellation(udata: pointer) {.gcsafe, raises:[].} = - # We are not going to change the state of `retFuture` to cancelled, so we - # will prevent the entire sequence of Futures from being cancelled. - discard - rw.close() if rw.future.finished(): retFuture.complete() else: rw.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation retFuture proc startReader(rstream: AsyncStreamReader) = diff --git a/chronos/streams/boundstream.nim b/chronos/streams/boundstream.nim index dbb36ef09..ce6957198 100644 --- a/chronos/streams/boundstream.nim +++ b/chronos/streams/boundstream.nim @@ -14,6 +14,9 @@ ## ## For stream writing it means that you should write exactly bounded size ## of bytes. + +{.push raises: [].} + import results import ../asyncloop, ../timer import asyncstream, ../transports/stream, ../transports/common @@ -52,7 +55,8 @@ template newBoundedStreamOverflowError(): ref BoundedStreamOverflowError = newException(BoundedStreamOverflowError, "Stream boundary exceeded") proc readUntilBoundary(rstream: AsyncStreamReader, pbytes: pointer, - nbytes: int, sep: seq[byte]): Future[int] {.async.} = + nbytes: int, sep: seq[byte]): Future[int] {. + async: (raises: [CancelledError, AsyncStreamError]).} = doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes >= 0, "nbytes must be non-negative value") checkStreamClosed(rstream) @@ -96,7 +100,7 @@ func endsWith(s, suffix: openArray[byte]): bool = inc(i) if i >= len(suffix): return true -proc boundedReadLoop(stream: AsyncStreamReader) {.async.} = +proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var rstream = BoundedStreamReader(stream) rstream.state = AsyncStreamState.Running var buffer = newSeq[byte](rstream.buffer.bufferLen()) @@ -186,12 +190,16 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async.} = break of AsyncStreamState.Finished: # Send `EOF` state to the consumer and wait until it will be received. - await rstream.buffer.transfer() + try: + await rstream.buffer.transfer() + except CancelledError: + rstream.state = AsyncStreamState.Error + rstream.error = newBoundedStreamIncompleteError() break of AsyncStreamState.Closing, AsyncStreamState.Closed: break -proc boundedWriteLoop(stream: AsyncStreamWriter) {.async.} = +proc boundedWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} = var error: ref AsyncStreamError var wstream = BoundedStreamWriter(stream) @@ -255,7 +263,11 @@ proc boundedWriteLoop(stream: AsyncStreamWriter) {.async.} = doAssert(not(isNil(error))) while not(wstream.queue.empty()): - let item = wstream.queue.popFirstNoWait() + let item = + try: + wstream.queue.popFirstNoWait() + except AsyncQueueEmptyError: + raiseAssert "AsyncQueue should not be empty at this moment" if not(item.future.finished()): item.future.fail(error) diff --git a/chronos/streams/chunkstream.nim b/chronos/streams/chunkstream.nim index c0269a2ae..773920769 100644 --- a/chronos/streams/chunkstream.nim +++ b/chronos/streams/chunkstream.nim @@ -8,6 +8,9 @@ # MIT license (LICENSE-MIT) ## This module implements HTTP/1.1 chunked-encoded stream reading and writing. + +{.push raises: [].} + import ../asyncloop, ../timer import asyncstream, ../transports/stream, ../transports/common import results @@ -95,7 +98,7 @@ proc setChunkSize(buffer: var openArray[byte], length: int64): int = buffer[c + 1] = byte(0x0A) (c + 2) -proc chunkedReadLoop(stream: AsyncStreamReader) {.async.} = +proc chunkedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var rstream = ChunkedStreamReader(stream) var buffer = newSeq[byte](MaxChunkHeaderSize) rstream.state = AsyncStreamState.Running @@ -156,6 +159,10 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async.} = if rstream.state == AsyncStreamState.Running: rstream.state = AsyncStreamState.Error rstream.error = exc + except AsyncStreamError as exc: + if rstream.state == AsyncStreamState.Running: + rstream.state = AsyncStreamState.Error + rstream.error = exc if rstream.state != AsyncStreamState.Running: # We need to notify consumer about error/close, but we do not care about @@ -163,7 +170,7 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async.} = rstream.buffer.forget() break -proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async.} = +proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} = var wstream = ChunkedStreamWriter(stream) var buffer: array[16, byte] var error: ref AsyncStreamError @@ -220,7 +227,11 @@ proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async.} = if not(item.future.finished()): item.future.fail(error) while not(wstream.queue.empty()): - let pitem = wstream.queue.popFirstNoWait() + let pitem = + try: + wstream.queue.popFirstNoWait() + except AsyncQueueEmptyError: + raiseAssert "AsyncQueue should not be empty at this moment" if not(pitem.future.finished()): pitem.future.fail(error) break diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 0c8efb945..26f2babe3 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -9,6 +9,9 @@ ## This module implements Transport Layer Security (TLS) stream. This module ## uses sources of BearSSL by Thomas Pornin. + +{.push raises: [].} + import bearssl/[brssl, ec, errors, pem, rsa, ssl, x509], bearssl/certs/cacert @@ -71,7 +74,7 @@ type scontext: ptr SslServerContext stream*: TLSAsyncStream handshaked*: bool - handshakeFut*: Future[void] + handshakeFut*: Future[void].Raising([CancelledError, AsyncStreamError]) TLSStreamReader* = ref object of AsyncStreamReader case kind: TLSStreamKind @@ -81,7 +84,7 @@ type scontext: ptr SslServerContext stream*: TLSAsyncStream handshaked*: bool - handshakeFut*: Future[void] + handshakeFut*: Future[void].Raising([CancelledError, AsyncStreamError]) TLSAsyncStream* = ref object of RootRef xwc*: X509NoanchorContext @@ -91,7 +94,7 @@ type x509*: X509MinimalContext reader*: TLSStreamReader writer*: TLSStreamWriter - mainLoop*: Future[void] + mainLoop*: Future[void].Raising([]) trustAnchors: TrustAnchorStore SomeTLSStreamType* = TLSStreamReader|TLSStreamWriter|TLSAsyncStream @@ -101,9 +104,7 @@ type TLSStreamHandshakeError* = object of TLSStreamError TLSStreamInitError* = object of TLSStreamError TLSStreamReadError* = object of TLSStreamError - par*: ref AsyncStreamError TLSStreamWriteError* = object of TLSStreamError - par*: ref AsyncStreamError TLSStreamProtocolError* = object of TLSStreamError errCode*: int @@ -111,7 +112,7 @@ proc newTLSStreamWriteError(p: ref AsyncStreamError): ref TLSStreamWriteError {. noinline.} = var w = newException(TLSStreamWriteError, "Write stream failed") w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg - w.par = p + w.parent = p w template newTLSStreamProtocolImpl[T](message: T): ref TLSStreamProtocolError = @@ -137,7 +138,8 @@ template newTLSUnexpectedProtocolError(): ref TLSStreamProtocolError = proc newTLSStreamProtocolError[T](message: T): ref TLSStreamProtocolError = newTLSStreamProtocolImpl(message) -proc raiseTLSStreamProtocolError[T](message: T) {.noreturn, noinline.} = +proc raiseTLSStreamProtocolError[T](message: T) {. + noreturn, noinline, raises: [TLSStreamProtocolError].} = raise newTLSStreamProtocolImpl(message) proc new*(T: typedesc[TrustAnchorStore], @@ -150,7 +152,8 @@ proc new*(T: typedesc[TrustAnchorStore], TrustAnchorStore(anchors: res) proc tlsWriteRec(engine: ptr SslEngineContext, - writer: TLSStreamWriter): Future[TLSResult] {.async.} = + writer: TLSStreamWriter): Future[TLSResult] {. + async: (raises: []).} = try: var length = 0'u var buf = sslEngineSendrecBuf(engine[], length) @@ -168,7 +171,8 @@ proc tlsWriteRec(engine: ptr SslEngineContext, TLSResult.Stopped proc tlsWriteApp(engine: ptr SslEngineContext, - writer: TLSStreamWriter): Future[TLSResult] {.async.} = + writer: TLSStreamWriter): Future[TLSResult] {. + async: (raises: []).} = try: var item = await writer.queue.get() if item.size > 0: @@ -192,7 +196,10 @@ proc tlsWriteApp(engine: ptr SslEngineContext, # only part of item and adjust offset. item.offset = item.offset + int(length) item.size = item.size - int(length) - writer.queue.addFirstNoWait(item) + try: + writer.queue.addFirstNoWait(item) + except AsyncQueueFullError: + raiseAssert "AsyncQueue should not be full at this moment" sslEngineSendappAck(engine[], length) TLSResult.Success else: @@ -205,7 +212,8 @@ proc tlsWriteApp(engine: ptr SslEngineContext, TLSResult.Stopped proc tlsReadRec(engine: ptr SslEngineContext, - reader: TLSStreamReader): Future[TLSResult] {.async.} = + reader: TLSStreamReader): Future[TLSResult] {. + async: (raises: []).} = try: var length = 0'u var buf = sslEngineRecvrecBuf(engine[], length) @@ -226,7 +234,8 @@ proc tlsReadRec(engine: ptr SslEngineContext, TLSResult.Stopped proc tlsReadApp(engine: ptr SslEngineContext, - reader: TLSStreamReader): Future[TLSResult] {.async.} = + reader: TLSStreamReader): Future[TLSResult] {. + async: (raises: []).} = try: var length = 0'u var buf = sslEngineRecvappBuf(engine[], length) @@ -240,7 +249,7 @@ proc tlsReadApp(engine: ptr SslEngineContext, template readAndReset(fut: untyped) = if fut.finished(): - let res = fut.read() + let res = fut.value() case res of TLSResult.Success, TLSResult.WriteEof, TLSResult.Stopped: fut = nil @@ -256,18 +265,6 @@ template readAndReset(fut: untyped) = loopState = AsyncStreamState.Finished break -proc cancelAndWait*(a, b, c, d: Future[TLSResult]): Future[void] = - var waiting: seq[FutureBase] - if not(isNil(a)) and not(a.finished()): - waiting.add(a.cancelAndWait()) - if not(isNil(b)) and not(b.finished()): - waiting.add(b.cancelAndWait()) - if not(isNil(c)) and not(c.finished()): - waiting.add(c.cancelAndWait()) - if not(isNil(d)) and not(d.finished()): - waiting.add(d.cancelAndWait()) - allFutures(waiting) - proc dumpState*(state: cuint): string = var res = "" if (state and SSL_CLOSED) == SSL_CLOSED: @@ -287,10 +284,10 @@ proc dumpState*(state: cuint): string = res.add("SSL_RECVAPP") "{" & res & "}" -proc tlsLoop*(stream: TLSAsyncStream) {.async.} = +proc tlsLoop*(stream: TLSAsyncStream) {.async: (raises: []).} = var - sendRecFut, sendAppFut: Future[TLSResult] - recvRecFut, recvAppFut: Future[TLSResult] + sendRecFut, sendAppFut: Future[TLSResult].Raising([]) + recvRecFut, recvAppFut: Future[TLSResult].Raising([]) let engine = case stream.reader.kind @@ -302,7 +299,7 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} = var loopState = AsyncStreamState.Running while true: - var waiting: seq[Future[TLSResult]] + var waiting: seq[Future[TLSResult].Raising([])] var state = sslEngineCurrentState(engine[]) if (state and SSL_CLOSED) == SSL_CLOSED: @@ -353,6 +350,8 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} = if len(waiting) > 0: try: discard await one(waiting) + except ValueError: + raiseAssert "array should not be empty at this moment" except CancelledError: if loopState == AsyncStreamState.Running: loopState = AsyncStreamState.Stopped @@ -360,8 +359,18 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} = if loopState != AsyncStreamState.Running: break - # Cancelling and waiting all the pending operations - await cancelAndWait(sendRecFut, sendAppFut, recvRecFut, recvAppFut) + # Cancelling and waiting and all the pending operations + var pending: seq[FutureBase] + if not(isNil(sendRecFut)) and not(sendRecFut.finished()): + pending.add(sendRecFut.cancelAndWait()) + if not(isNil(sendAppFut)) and not(sendAppFut.finished()): + pending.add(sendAppFut.cancelAndWait()) + if not(isNil(recvRecFut)) and not(recvRecFut.finished()): + pending.add(recvRecFut.cancelAndWait()) + if not(isNil(recvAppFut)) and not(recvAppFut.finished()): + pending.add(recvAppFut.cancelAndWait()) + await noCancel(allFutures(pending)) + # Calculating error let error = case loopState @@ -395,7 +404,11 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} = if not(isNil(error)): # Completing all pending writes while(not(stream.writer.queue.empty())): - let item = stream.writer.queue.popFirstNoWait() + let item = + try: + stream.writer.queue.popFirstNoWait() + except AsyncQueueEmptyError: + raiseAssert "AsyncQueue should not be empty at this moment" if not(item.future.finished()): item.future.fail(error) # Completing handshake @@ -415,18 +428,18 @@ proc tlsLoop*(stream: TLSAsyncStream) {.async.} = # Completing readers stream.reader.buffer.forget() -proc tlsWriteLoop(stream: AsyncStreamWriter) {.async.} = +proc tlsWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} = var wstream = TLSStreamWriter(stream) wstream.state = AsyncStreamState.Running - await sleepAsync(0.milliseconds) + await noCancel(sleepAsync(0.milliseconds)) if isNil(wstream.stream.mainLoop): wstream.stream.mainLoop = tlsLoop(wstream.stream) await wstream.stream.mainLoop -proc tlsReadLoop(stream: AsyncStreamReader) {.async.} = +proc tlsReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var rstream = TLSStreamReader(stream) rstream.state = AsyncStreamState.Running - await sleepAsync(0.milliseconds) + await noCancel(sleepAsync(0.milliseconds)) if isNil(rstream.stream.mainLoop): rstream.stream.mainLoop = tlsLoop(rstream.stream) await rstream.stream.mainLoop @@ -451,7 +464,7 @@ proc newTLSClientAsyncStream*( maxVersion = TLSVersion.TLS12, flags: set[TLSFlags] = {}, trustAnchors: SomeTrustAnchorType = MozillaTrustAnchors - ): TLSAsyncStream = + ): TLSAsyncStream {.raises: [TLSStreamInitError].} = ## Create new TLS asynchronous stream for outbound (client) connections ## using reading stream ``rsource`` and writing stream ``wsource``. ## @@ -541,7 +554,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader, minVersion = TLSVersion.TLS11, maxVersion = TLSVersion.TLS12, cache: TLSSessionCache = nil, - flags: set[TLSFlags] = {}): TLSAsyncStream = + flags: set[TLSFlags] = {}): TLSAsyncStream {. + raises: [TLSStreamInitError, TLSStreamProtocolError].} = ## Create new TLS asynchronous stream for inbound (server) connections ## using reading stream ``rsource`` and writing stream ``wsource``. ## @@ -609,10 +623,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader, if err == 0: raise newException(TLSStreamInitError, "Could not initialize TLS layer") - init(AsyncStreamWriter(res.writer), wsource, tlsWriteLoop, - bufferSize) - init(AsyncStreamReader(res.reader), rsource, tlsReadLoop, - bufferSize) + init(AsyncStreamWriter(res.writer), wsource, tlsWriteLoop, bufferSize) + init(AsyncStreamReader(res.reader), rsource, tlsReadLoop, bufferSize) res proc copyKey(src: RsaPrivateKey): TLSPrivateKey = @@ -653,7 +665,8 @@ proc copyKey(src: EcPrivateKey): TLSPrivateKey = res.eckey.curve = src.curve res -proc init*(tt: typedesc[TLSPrivateKey], data: openArray[byte]): TLSPrivateKey = +proc init*(tt: typedesc[TLSPrivateKey], data: openArray[byte]): TLSPrivateKey {. + raises: [TLSStreamProtocolError].} = ## Initialize TLS private key from array of bytes ``data``. ## ## This procedure initializes private key using raw, DER-encoded format, @@ -676,7 +689,8 @@ proc init*(tt: typedesc[TLSPrivateKey], data: openArray[byte]): TLSPrivateKey = raiseTLSStreamProtocolError("Unknown key type (" & $keyType & ")") res -proc pemDecode*(data: openArray[char]): seq[PEMElement] = +proc pemDecode*(data: openArray[char]): seq[PEMElement] {. + raises: [TLSStreamProtocolError].} = ## Decode PEM encoded string and get array of binary blobs. if len(data) == 0: raiseTLSStreamProtocolError("Empty PEM message") @@ -717,7 +731,8 @@ proc pemDecode*(data: openArray[char]): seq[PEMElement] = raiseTLSStreamProtocolError("Invalid PEM encoding") res -proc init*(tt: typedesc[TLSPrivateKey], data: openArray[char]): TLSPrivateKey = +proc init*(tt: typedesc[TLSPrivateKey], data: openArray[char]): TLSPrivateKey {. + raises: [TLSStreamProtocolError].} = ## Initialize TLS private key from string ``data``. ## ## This procedure initializes private key using unencrypted PKCS#8 PEM @@ -735,7 +750,8 @@ proc init*(tt: typedesc[TLSPrivateKey], data: openArray[char]): TLSPrivateKey = res proc init*(tt: typedesc[TLSCertificate], - data: openArray[char]): TLSCertificate = + data: openArray[char]): TLSCertificate {. + raises: [TLSStreamProtocolError].} = ## Initialize TLS certificates from string ``data``. ## ## This procedure initializes array of certificates from PEM encoded string. @@ -770,9 +786,11 @@ proc init*(tt: typedesc[TLSSessionCache], size: int = 4096): TLSSessionCache = sslSessionCacheLruInit(addr res.context, addr res.storage[0], rsize) res -proc handshake*(rws: SomeTLSStreamType): Future[void] = +proc handshake*(rws: SomeTLSStreamType): Future[void] {. + async: (raw: true, raises: [CancelledError, AsyncStreamError]).} = ## Wait until initial TLS handshake will be successfully performed. - var retFuture = newFuture[void]("tlsstream.handshake") + let retFuture = Future[void].Raising([CancelledError, AsyncStreamError]) + .init("tlsstream.handshake") when rws is TLSStreamReader: if rws.handshaked: retFuture.complete() diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index bdcb8d7b7..f2e7a586e 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -140,7 +140,7 @@ type # transport for new client proc remoteAddress*(transp: StreamTransport): TransportAddress {. - raises: [TransportAbortedError, TransportTooManyError, TransportOsError].} = + raises: [TransportOsError].} = ## Returns ``transp`` remote socket address. doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") if transp.remote.family == AddressFamily.None: @@ -148,12 +148,12 @@ proc remoteAddress*(transp: StreamTransport): TransportAddress {. var slen = SockLen(sizeof(saddr)) if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportError(osLastError()) + raiseTransportOsError(osLastError()) fromSAddr(addr saddr, slen, transp.remote) transp.remote proc localAddress*(transp: StreamTransport): TransportAddress {. - raises: [TransportAbortedError, TransportTooManyError, TransportOsError].} = + raises: [TransportOsError].} = ## Returns ``transp`` local socket address. doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") if transp.local.family == AddressFamily.None: @@ -161,7 +161,7 @@ proc localAddress*(transp: StreamTransport): TransportAddress {. var slen = SockLen(sizeof(saddr)) if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportError(osLastError()) + raiseTransportOsError(osLastError()) fromSAddr(addr saddr, slen, transp.local) transp.local From f03cdfcc409efdb66801ddd230bba9c4614b62f3 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sun, 19 Nov 2023 18:29:09 +0100 Subject: [PATCH 090/146] futures: sinkify (#475) This avoids copies here and there throughout the pipeline - ie `copyString` and friends can often be avoided when moving things into and out of futures Annoyingly, one has to sprinkle the codebase liberally with `sink` and `move` for the pipeline to work well - sink stuff _generally_ works better in orc/arc Looking at nim 1.6/refc, sink + local variable + move generates the best code: msg directly: ```nim T1_ = (*colonenv_).msg1; (*colonenv_).msg1 = copyStringRC1(msg); ``` local copy without move: ```nim T60_ = (*colonenv_).localCopy1; (*colonenv_).localCopy1 = copyStringRC1(msg); ``` local copy with move: ```nim asgnRef((void**) (&(*colonenv_).localCopy1), msg); ``` Annoyingly, sink is also broken for refc+literals as it tries to changes the refcount of the literal as part of the move (which shouldn't be happening, but here we are), so we have to use a hack to find literals and avoid moving them. --- chronos/config.nim | 37 +++++++++++++++++++++++++++++++ chronos/internal/asyncfutures.nim | 6 ++--- chronos/internal/asyncmacro.nim | 2 +- chronos/streams/tlsstream.nim | 8 +++---- chronos/transports/common.nim | 16 ------------- chronos/transports/datagram.nim | 10 ++++----- chronos/transports/stream.nim | 22 +++++++++--------- tests/testfut.nim | 6 +++++ 8 files changed, 67 insertions(+), 40 deletions(-) diff --git a/chronos/config.nim b/chronos/config.nim index 4055361f3..21c313206 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -101,3 +101,40 @@ when defined(debug) or defined(chronosConfig): printOption("chronosEventEngine", chronosEventEngine) printOption("chronosEventsCount", chronosEventsCount) printOption("chronosInitialSize", chronosInitialSize) + + +# In nim 1.6, `sink` + local variable + `move` generates the best code for +# moving a proc parameter into a closure - this only works for closure +# procedures however - in closure iterators, the parameter is always copied +# into the closure (!) meaning that non-raw `{.async.}` functions always carry +# this overhead, sink or no. See usages of chronosMoveSink for examples. +# In addition, we need to work around https://github.com/nim-lang/Nim/issues/22175 +# which has not been backported to 1.6. +# Long story short, the workaround is not needed in non-raw {.async.} because +# a copy of the literal is always made. +# TODO review the above for 2.0 / 2.0+refc +type + SeqHeader = object + length, reserved: int + +proc isLiteral(s: string): bool {.inline.} = + when defined(gcOrc) or defined(gcArc): + false + else: + s.len > 0 and (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0 + +proc isLiteral[T](s: seq[T]): bool {.inline.} = + when defined(gcOrc) or defined(gcArc): + false + else: + s.len > 0 and (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0 + +template chronosMoveSink*(val: auto): untyped = + bind isLiteral + when not (defined(gcOrc) or defined(gcArc)) and val is seq|string: + if isLiteral(val): + val + else: + move(val) + else: + move(val) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index f60b2d916..6a6dbb2ac 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -202,14 +202,14 @@ proc finish(fut: FutureBase, state: FutureState) = when chronosFutureTracking: scheduleDestructor(fut) -proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = +proc complete[T](future: Future[T], val: sink T, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) doAssert(isNil(future.internalError)) - future.internalValue = val + future.internalValue = chronosMoveSink(val) future.finish(FutureState.Completed) -template complete*[T](future: Future[T], val: T) = +template complete*[T](future: Future[T], val: sink T) = ## Completes ``future`` with value ``val``. complete(future, val, getSrcLocation()) diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index 079e3bb4c..4e9b8d4b4 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -157,7 +157,7 @@ proc wrapInTryFinally( newCall(ident "complete", fut) ), nnkElseExpr.newTree( - newCall(ident "complete", fut, ident "result") + newCall(ident "complete", fut, newCall(ident "move", ident "result")) ) ) ) diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 26f2babe3..12ea6d3c4 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -15,7 +15,7 @@ import bearssl/[brssl, ec, errors, pem, rsa, ssl, x509], bearssl/certs/cacert -import ../asyncloop, ../timer, ../asyncsync +import ".."/[asyncloop, asyncsync, config, timer] import asyncstream, ../transports/stream, ../transports/common export asyncloop, asyncsync, timer, asyncstream @@ -62,7 +62,7 @@ type PEMContext = ref object data: seq[byte] - + TrustAnchorStore* = ref object anchors: seq[X509TrustAnchor] @@ -158,7 +158,7 @@ proc tlsWriteRec(engine: ptr SslEngineContext, var length = 0'u var buf = sslEngineSendrecBuf(engine[], length) doAssert(length != 0 and not isNil(buf)) - await writer.wsource.write(buf, int(length)) + await writer.wsource.write(chronosMoveSink(buf), int(length)) sslEngineSendrecAck(engine[], length) TLSResult.Success except AsyncStreamError as exc: @@ -481,7 +481,7 @@ proc newTLSClientAsyncStream*( ## ``minVersion`` of bigger then ``maxVersion`` you will get an error. ## ## ``flags`` - custom TLS connection flags. - ## + ## ## ``trustAnchors`` - use this if you want to use certificate trust ## anchors other than the default Mozilla trust anchors. If you pass ## a ``TrustAnchorStore`` you should reuse the same instance for diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 24f9852b8..ba7568a45 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -596,22 +596,6 @@ proc raiseTransportOsError*(err: OSErrorCode) {. ## Raises transport specific OS error. raise getTransportOsError(err) -type - SeqHeader = object - length, reserved: int - -proc isLiteral*(s: string): bool {.inline.} = - when defined(gcOrc) or defined(gcArc): - false - else: - (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0 - -proc isLiteral*[T](s: seq[T]): bool {.inline.} = - when defined(gcOrc) or defined(gcArc): - false - else: - (cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0 - template getTransportTooManyError*( code = OSErrorCode(0) ): ref TransportTooManyError = diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 30f872d5a..fed15d362 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -11,7 +11,7 @@ import std/deques when not(defined(windows)): import ".."/selectors2 -import ".."/[asyncloop, osdefs, oserrno, osutils, handles] +import ".."/[asyncloop, config, osdefs, oserrno, osutils, handles] import "."/common type @@ -894,7 +894,7 @@ proc send*(transp: DatagramTransport, msg: sink string, transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen - var localCopy = msg + var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0], @@ -917,7 +917,7 @@ proc send*[T](transp: DatagramTransport, msg: sink seq[T], transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - var localCopy = msg + var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0], @@ -955,7 +955,7 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen - var localCopy = msg + var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithAddress, buf: addr localCopy[0], @@ -977,7 +977,7 @@ proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, var retFuture = newFuture[void]("datagram.transport.sendTo(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - var localCopy = msg + var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithAddress, buf: addr localCopy[0], diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index f2e7a586e..58aabc38d 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -10,8 +10,9 @@ {.push raises: [].} import std/deques -import ".."/[asyncloop, handles, osdefs, osutils, oserrno] -import common +import stew/ptrops +import ".."/[asyncloop, config, handles, osdefs, osutils, oserrno] +import ./common type VectorKind = enum @@ -770,7 +771,7 @@ when defined(windows): # Continue only if `retFuture` is not cancelled. if not(retFuture.finished()): let - pipeSuffix = $cast[cstring](unsafeAddr address.address_un[0]) + pipeSuffix = $cast[cstring](baseAddr address.address_un) pipeAsciiName = PipeHeaderName & pipeSuffix[1 .. ^1] pipeName = toWideString(pipeAsciiName).valueOr: retFuture.fail(getTransportOsError(error)) @@ -806,7 +807,7 @@ when defined(windows): proc createAcceptPipe(server: StreamServer): Result[AsyncFD, OSErrorCode] = let - pipeSuffix = $cast[cstring](addr server.local.address_un) + pipeSuffix = $cast[cstring](baseAddr server.local.address_un) pipeName = ? toWideString(PipeHeaderName & pipeSuffix) openMode = if FirstPipe notin server.flags: @@ -878,7 +879,7 @@ when defined(windows): if server.status notin {ServerStatus.Stopped, ServerStatus.Closed}: server.apending = true let - pipeSuffix = $cast[cstring](addr server.local.address_un) + pipeSuffix = $cast[cstring](baseAddr server.local.address_un) pipeAsciiName = PipeHeaderName & pipeSuffix pipeName = toWideString(pipeAsciiName).valueOr: raiseOsDefect(error, "acceptPipeLoop(): Unable to create name " & @@ -2011,7 +2012,7 @@ proc createStreamServer*(host: TransportAddress, elif host.family in {AddressFamily.Unix}: # We do not care about result here, because if file cannot be removed, # `bindSocket` will return EADDRINUSE. - discard osdefs.unlink(cast[cstring](unsafeAddr host.address_un[0])) + discard osdefs.unlink(cast[cstring](baseAddr host.address_un)) host.toSAddr(saddr, slen) if osdefs.bindSocket(SocketHandle(serverSocket), @@ -2240,12 +2241,11 @@ proc write*(transp: StreamTransport, msg: sink string, var retFuture = newFuture[int]("stream.transport.write(string)") transp.checkClosed(retFuture) transp.checkWriteEof(retFuture) - let nbytes = if msglen <= 0: len(msg) else: msglen var - pbytes = cast[ptr byte](unsafeAddr msg[0]) + pbytes = cast[ptr byte](baseAddr msg) rbytes = nbytes fastWrite(transp, pbytes, rbytes, nbytes) @@ -2253,7 +2253,7 @@ proc write*(transp: StreamTransport, msg: sink string, let written = nbytes - rbytes # In case fastWrite wrote some - var localCopy = msg + var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) pbytes = cast[ptr byte](addr localCopy[written]) @@ -2278,7 +2278,7 @@ proc write*[T](transp: StreamTransport, msg: sink seq[T], nbytes = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) var - pbytes = cast[ptr byte](unsafeAddr msg[0]) + pbytes = cast[ptr byte](baseAddr msg) rbytes = nbytes fastWrite(transp, pbytes, rbytes, nbytes) @@ -2286,7 +2286,7 @@ proc write*[T](transp: StreamTransport, msg: sink seq[T], let written = nbytes - rbytes # In case fastWrite wrote some - var localCopy = msg + var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) pbytes = cast[ptr byte](addr localCopy[written]) diff --git a/tests/testfut.nim b/tests/testfut.nim index 1297dc454..367b5d050 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1997,3 +1997,9 @@ suite "Future[T] behavior test suite": check: future1.cancelled() == true future2.cancelled() == true + test "Sink with literals": + # https://github.com/nim-lang/Nim/issues/22175 + let fut = newFuture[string]() + fut.complete("test") + check: + fut.value() == "test" From fa0bf405e64b1c2c3693110aaff1881a535e5fab Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 20 Nov 2023 11:04:28 +0100 Subject: [PATCH 091/146] varargs overloads (#477) * varargs overloads for convenience and compatibility * no parameterless varargs calls with generic overloads --- chronos/internal/asyncfutures.nim | 38 +++++++++++++++++++++++++++---- tests/testfut.nim | 13 +++++++---- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 6a6dbb2ac..ba7eaf0ac 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1094,10 +1094,18 @@ proc allFutures*[T](futs: varargs[Future[T]]): Future[void] {. ## ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. # Because we can't capture varargs[T] in closures we need to create copy. - var nfuts: seq[FutureBase] - for future in futs: - nfuts.add(future) - allFutures(nfuts) + allFutures(futs.mapIt(FutureBase(it))) + +proc allFutures*[T, E](futs: varargs[InternalRaisesFuture[T, E]]): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = + ## Returns a future which will complete only when all futures in ``futs`` + ## will be completed, failed or canceled. + ## + ## If the argument is empty, the returned future COMPLETES immediately. + ## + ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. + # Because we can't capture varargs[T] in closures we need to create copy. + allFutures(futs.mapIt(FutureBase(it))) proc allFinished*[F: SomeFuture](futs: varargs[F]): Future[seq[F]] {. async: (raw: true, raises: [CancelledError]).} = @@ -1239,6 +1247,28 @@ proc race*(futs: varargs[FutureBase]): Future[FutureBase] {. return retFuture +proc race*[T](futs: varargs[Future[T]]): Future[FutureBase] {. + async: (raw: true, raises: [ValueError, CancelledError]).} = + ## Returns a future which will complete only when all futures in ``futs`` + ## will be completed, failed or canceled. + ## + ## If the argument is empty, the returned future COMPLETES immediately. + ## + ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. + # Because we can't capture varargs[T] in closures we need to create copy. + race(futs.mapIt(FutureBase(it))) + +proc race*[T, E](futs: varargs[InternalRaisesFuture[T, E]]): Future[FutureBase] {. + async: (raw: true, raises: [ValueError, CancelledError]).} = + ## Returns a future which will complete only when all futures in ``futs`` + ## will be completed, failed or canceled. + ## + ## If the argument is empty, the returned future COMPLETES immediately. + ## + ## On cancel all the awaited futures ``futs`` WILL NOT BE cancelled. + # Because we can't capture varargs[T] in closures we need to create copy. + race(futs.mapIt(FutureBase(it))) + when (chronosEventEngine in ["epoll", "kqueue"]) or defined(windows): import std/os diff --git a/tests/testfut.nim b/tests/testfut.nim index 367b5d050..fc2401d04 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1314,12 +1314,17 @@ suite "Future[T] behavior test suite": test "race(zero) test": var tseq = newSeq[FutureBase]() var fut1 = race(tseq) - var fut2 = race() - var fut3 = race([]) + check: + # https://github.com/nim-lang/Nim/issues/22964 + not compiles(block: + var fut2 = race()) + not compiles(block: + var fut3 = race([])) + check: fut1.failed() - fut2.failed() - fut3.failed() + # fut2.failed() + # fut3.failed() asyncTest "race(varargs) test": proc vlient1() {.async.} = From b18d471629357ac7a07aed47d74e15fc9c71c664 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 21 Nov 2023 12:01:44 +0200 Subject: [PATCH 092/146] Asyncraises HTTP client/server. (#476) * Fixes. * Make httpcommon no-raises. * Make httpbodyrw no-raises. * Make multipart no-raises. * Make httpdebug no-raises. * Make httpagent no-raises. * Make httpclient no-raises. * Make httpserver/shttpserver no-raises. * fix prepend/remove when E is noraises --------- Co-authored-by: Jacek Sieka --- chronos/apps/http/httpagent.nim | 3 + chronos/apps/http/httpbodyrw.nim | 12 +- chronos/apps/http/httpclient.nim | 272 +++++++++++++++------------- chronos/apps/http/httpcommon.nim | 51 +++--- chronos/apps/http/httpdebug.nim | 5 +- chronos/apps/http/httpserver.nim | 280 ++++++++++++++++------------- chronos/apps/http/multipart.nim | 103 ++++++----- chronos/apps/http/shttpserver.nim | 51 +++--- chronos/internal/asyncfutures.nim | 4 +- chronos/internal/raisesfutures.nim | 25 ++- chronos/transports/stream.nim | 15 +- 11 files changed, 468 insertions(+), 353 deletions(-) diff --git a/chronos/apps/http/httpagent.nim b/chronos/apps/http/httpagent.nim index c8cac48f6..36d13f253 100644 --- a/chronos/apps/http/httpagent.nim +++ b/chronos/apps/http/httpagent.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import strutils const diff --git a/chronos/apps/http/httpbodyrw.nim b/chronos/apps/http/httpbodyrw.nim index bb28ea643..c9ac899bf 100644 --- a/chronos/apps/http/httpbodyrw.nim +++ b/chronos/apps/http/httpbodyrw.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import ../../asyncloop, ../../asyncsync import ../../streams/[asyncstream, boundstream] import httpcommon @@ -36,7 +39,7 @@ proc newHttpBodyReader*(streams: varargs[AsyncStreamReader]): HttpBodyReader = trackCounter(HttpBodyReaderTrackerName) res -proc closeWait*(bstream: HttpBodyReader) {.async.} = +proc closeWait*(bstream: HttpBodyReader) {.async: (raises: []).} = ## Close and free resource allocated by body reader. if bstream.bstate == HttpState.Alive: bstream.bstate = HttpState.Closing @@ -61,7 +64,7 @@ proc newHttpBodyWriter*(streams: varargs[AsyncStreamWriter]): HttpBodyWriter = trackCounter(HttpBodyWriterTrackerName) res -proc closeWait*(bstream: HttpBodyWriter) {.async.} = +proc closeWait*(bstream: HttpBodyWriter) {.async: (raises: []).} = ## Close and free all the resources allocated by body writer. if bstream.bstate == HttpState.Alive: bstream.bstate = HttpState.Closing @@ -73,7 +76,7 @@ proc closeWait*(bstream: HttpBodyWriter) {.async.} = bstream.bstate = HttpState.Closed untrackCounter(HttpBodyWriterTrackerName) -proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [].} = +proc hasOverflow*(bstream: HttpBodyReader): bool = if len(bstream.streams) == 1: # If HttpBodyReader has only one stream it has ``BoundedStreamReader``, in # such case its impossible to get more bytes then expected amount. @@ -89,6 +92,5 @@ proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [].} = else: false -proc closed*(bstream: HttpBodyReader | HttpBodyWriter): bool {. - raises: [].} = +proc closed*(bstream: HttpBodyReader | HttpBodyWriter): bool = bstream.bstate != HttpState.Alive diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 83d1ddfc7..5f4bd71fb 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import std/[uri, tables, sequtils] import stew/[base10, base64, byteutils], httputils, results import ../../asyncloop, ../../asyncsync @@ -120,7 +123,7 @@ type headersTimeout*: Duration idleTimeout: Duration idlePeriod: Duration - watcherFut: Future[void] + watcherFut: Future[void].Raising([]) connectionBufferSize*: int maxConnections*: int connectionsCount*: int @@ -253,7 +256,7 @@ template isIdle(conn: HttpClientConnectionRef, timestamp: Moment, timeout: Duration): bool = (timestamp - conn.timestamp) >= timeout -proc sessionWatcher(session: HttpSessionRef) {.async.} +proc sessionWatcher(session: HttpSessionRef) {.async: (raises: []).} proc new*(t: typedesc[HttpSessionRef], flags: HttpClientFlags = {}, @@ -265,8 +268,7 @@ proc new*(t: typedesc[HttpSessionRef], idleTimeout = HttpConnectionIdleTimeout, idlePeriod = HttpConnectionCheckPeriod, socketFlags: set[SocketFlags] = {}, - dualstack = DualStackType.Auto): HttpSessionRef {. - raises: [] .} = + dualstack = DualStackType.Auto): HttpSessionRef = ## Create new HTTP session object. ## ## ``maxRedirections`` - maximum number of HTTP 3xx redirections @@ -292,10 +294,10 @@ proc new*(t: typedesc[HttpSessionRef], if HttpClientFlag.Http11Pipeline in flags: sessionWatcher(res) else: - newFuture[void]("session.watcher.placeholder") + Future[void].Raising([]).init("session.watcher.placeholder") res -proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [] .} = +proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] = var res: set[TLSFlags] if HttpClientFlag.NoVerifyHost in flags: res.incl(TLSFlags.NoVerifyHost) @@ -306,7 +308,7 @@ proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] {.raises: [] .} = proc getHttpAddress*( url: Uri, flags: HttpClientFlags = {} - ): HttpAddressResult {.raises: [].} = + ): HttpAddressResult = let scheme = if len(url.scheme) == 0: @@ -370,24 +372,23 @@ proc getHttpAddress*( proc getHttpAddress*( url: string, flags: HttpClientFlags = {} - ): HttpAddressResult {.raises: [].} = + ): HttpAddressResult = getHttpAddress(parseUri(url), flags) proc getHttpAddress*( session: HttpSessionRef, url: Uri - ): HttpAddressResult {.raises: [].} = + ): HttpAddressResult = getHttpAddress(url, session.flags) proc getHttpAddress*( session: HttpSessionRef, url: string - ): HttpAddressResult {.raises: [].} = + ): HttpAddressResult = ## Create new HTTP address using URL string ``url`` and . getHttpAddress(parseUri(url), session.flags) -proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {. - raises: [] .} = +proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] = let scheme = if len(url.scheme) == 0: HttpClientScheme.NonSecure @@ -451,13 +452,13 @@ proc getAddress*(session: HttpSessionRef, url: Uri): HttpResult[HttpAddress] {. addresses: addresses)) proc getAddress*(session: HttpSessionRef, - url: string): HttpResult[HttpAddress] {.raises: [].} = + url: string): HttpResult[HttpAddress] = ## Create new HTTP address using URL string ``url`` and . session.getAddress(parseUri(url)) proc getAddress*(address: TransportAddress, ctype: HttpClientScheme = HttpClientScheme.NonSecure, - queryString: string = "/"): HttpAddress {.raises: [].} = + queryString: string = "/"): HttpAddress = ## Create new HTTP address using Transport address ``address``, connection ## type ``ctype`` and query string ``queryString``. let uri = parseUri(queryString) @@ -540,8 +541,12 @@ proc getUniqueConnectionId(session: HttpSessionRef): uint64 = inc(session.counter) session.counter -proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef, - ha: HttpAddress, transp: StreamTransport): HttpClientConnectionRef = +proc new( + t: typedesc[HttpClientConnectionRef], + session: HttpSessionRef, + ha: HttpAddress, + transp: StreamTransport + ): Result[HttpClientConnectionRef, string] = case ha.scheme of HttpClientScheme.NonSecure: let res = HttpClientConnectionRef( @@ -554,44 +559,48 @@ proc new(t: typedesc[HttpClientConnectionRef], session: HttpSessionRef, remoteHostname: ha.id ) trackCounter(HttpClientConnectionTrackerName) - res + ok(res) of HttpClientScheme.Secure: - let treader = newAsyncStreamReader(transp) - let twriter = newAsyncStreamWriter(transp) - let tls = newTLSClientAsyncStream(treader, twriter, ha.hostname, - flags = session.flags.getTLSFlags()) - let res = HttpClientConnectionRef( - id: session.getUniqueConnectionId(), - kind: HttpClientScheme.Secure, - transp: transp, - treader: treader, - twriter: twriter, - reader: tls.reader, - writer: tls.writer, - tls: tls, - state: HttpClientConnectionState.Connecting, - remoteHostname: ha.id - ) + let + treader = newAsyncStreamReader(transp) + twriter = newAsyncStreamWriter(transp) + tls = + try: + newTLSClientAsyncStream(treader, twriter, ha.hostname, + flags = session.flags.getTLSFlags()) + except TLSStreamInitError as exc: + return err(exc.msg) + + res = HttpClientConnectionRef( + id: session.getUniqueConnectionId(), + kind: HttpClientScheme.Secure, + transp: transp, + treader: treader, + twriter: twriter, + reader: tls.reader, + writer: tls.writer, + tls: tls, + state: HttpClientConnectionState.Connecting, + remoteHostname: ha.id + ) trackCounter(HttpClientConnectionTrackerName) - res + ok(res) -proc setError(request: HttpClientRequestRef, error: ref HttpError) {. - raises: [] .} = +proc setError(request: HttpClientRequestRef, error: ref HttpError) = request.error = error request.state = HttpReqRespState.Error if not(isNil(request.connection)): request.connection.state = HttpClientConnectionState.Error request.connection.error = error -proc setError(response: HttpClientResponseRef, error: ref HttpError) {. - raises: [] .} = +proc setError(response: HttpClientResponseRef, error: ref HttpError) = response.error = error response.state = HttpReqRespState.Error if not(isNil(response.connection)): response.connection.state = HttpClientConnectionState.Error response.connection.error = error -proc closeWait(conn: HttpClientConnectionRef) {.async.} = +proc closeWait(conn: HttpClientConnectionRef) {.async: (raises: []).} = ## Close HttpClientConnectionRef instance ``conn`` and free all the resources. if conn.state notin {HttpClientConnectionState.Closing, HttpClientConnectionState.Closed}: @@ -613,7 +622,8 @@ proc closeWait(conn: HttpClientConnectionRef) {.async.} = untrackCounter(HttpClientConnectionTrackerName) proc connect(session: HttpSessionRef, - ha: HttpAddress): Future[HttpClientConnectionRef] {.async.} = + ha: HttpAddress): Future[HttpClientConnectionRef] {. + async: (raises: [CancelledError, HttpConnectionError]).} = ## Establish new connection with remote server using ``url`` and ``flags``. ## On success returns ``HttpClientConnectionRef`` object. var lastError = "" @@ -627,12 +637,14 @@ proc connect(session: HttpSessionRef, dualstack = session.dualstack) except CancelledError as exc: raise exc - except CatchableError: + except TransportError: nil if not(isNil(transp)): let conn = block: - let res = HttpClientConnectionRef.new(session, ha, transp) + let res = HttpClientConnectionRef.new(session, ha, transp).valueOr: + raiseHttpConnectionError( + "Could not connect to remote host, reason: " & error) if res.kind == HttpClientScheme.Secure: try: await res.tls.handshake() @@ -662,7 +674,7 @@ proc connect(session: HttpSessionRef, raiseHttpConnectionError("Could not connect to remote host") proc removeConnection(session: HttpSessionRef, - conn: HttpClientConnectionRef) {.async.} = + conn: HttpClientConnectionRef) {.async: (raises: []).} = let removeHost = block: var res = false @@ -686,7 +698,8 @@ proc acquireConnection( session: HttpSessionRef, ha: HttpAddress, flags: set[HttpClientRequestFlag] - ): Future[HttpClientConnectionRef] {.async.} = + ): Future[HttpClientConnectionRef] {. + async: (raises: [CancelledError, HttpConnectionError]).} = ## Obtain connection from ``session`` or establish a new one. var default: seq[HttpClientConnectionRef] let timestamp = Moment.now() @@ -710,10 +723,11 @@ proc acquireConnection( inc(session.connectionsCount) connection.setTimestamp(timestamp) connection.setDuration() - return connection + connection proc releaseConnection(session: HttpSessionRef, - connection: HttpClientConnectionRef) {.async.} = + connection: HttpClientConnectionRef) {. + async: (raises: []).} = ## Return connection back to the ``session``. let removeConnection = if HttpClientFlag.Http11Pipeline notin session.flags: @@ -751,7 +765,7 @@ proc releaseConnection(session: HttpSessionRef, HttpClientConnectionFlag.Response, HttpClientConnectionFlag.NoBody}) -proc releaseConnection(request: HttpClientRequestRef) {.async.} = +proc releaseConnection(request: HttpClientRequestRef) {.async: (raises: []).} = let session = request.session connection = request.connection @@ -763,7 +777,8 @@ proc releaseConnection(request: HttpClientRequestRef) {.async.} = if HttpClientConnectionFlag.Response notin connection.flags: await session.releaseConnection(connection) -proc releaseConnection(response: HttpClientResponseRef) {.async.} = +proc releaseConnection(response: HttpClientResponseRef) {. + async: (raises: []).} = let session = response.session connection = response.connection @@ -775,7 +790,7 @@ proc releaseConnection(response: HttpClientResponseRef) {.async.} = if HttpClientConnectionFlag.Request notin connection.flags: await session.releaseConnection(connection) -proc closeWait*(session: HttpSessionRef) {.async.} = +proc closeWait*(session: HttpSessionRef) {.async: (raises: []).} = ## Closes HTTP session object. ## ## This closes all the connections opened to remote servers. @@ -788,7 +803,7 @@ proc closeWait*(session: HttpSessionRef) {.async.} = pending.add(closeWait(conn)) await noCancel(allFutures(pending)) -proc sessionWatcher(session: HttpSessionRef) {.async.} = +proc sessionWatcher(session: HttpSessionRef) {.async: (raises: []).} = while true: let firstBreak = try: @@ -819,18 +834,19 @@ proc sessionWatcher(session: HttpSessionRef) {.async.} = var pending: seq[Future[void]] let secondBreak = try: - pending = idleConnections.mapIt(it.closeWait()) + for conn in idleConnections: + pending.add(conn.closeWait()) await allFutures(pending) false except CancelledError: # We still want to close connections to avoid socket leaks. - await allFutures(pending) + await noCancel(allFutures(pending)) true if secondBreak: break -proc closeWait*(request: HttpClientRequestRef) {.async.} = +proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} = var pending: seq[FutureBase] if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: request.state = HttpReqRespState.Closing @@ -845,7 +861,7 @@ proc closeWait*(request: HttpClientRequestRef) {.async.} = request.state = HttpReqRespState.Closed untrackCounter(HttpClientRequestTrackerName) -proc closeWait*(response: HttpClientResponseRef) {.async.} = +proc closeWait*(response: HttpClientResponseRef) {.async: (raises: []).} = var pending: seq[FutureBase] if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: response.state = HttpReqRespState.Closing @@ -860,8 +876,10 @@ proc closeWait*(response: HttpClientResponseRef) {.async.} = response.state = HttpReqRespState.Closed untrackCounter(HttpClientResponseTrackerName) -proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] - ): HttpResult[HttpClientResponseRef] {.raises: [] .} = +proc prepareResponse( + request: HttpClientRequestRef, + data: openArray[byte] + ): HttpResult[HttpClientResponseRef] = ## Process response headers. let resp = parseResponse(data, false) if resp.failed(): @@ -972,7 +990,7 @@ proc prepareResponse(request: HttpClientRequestRef, data: openArray[byte] ok(res) proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = var buffer: array[HttpMaxHeadersSize, byte] let timestamp = Moment.now() req.connection.setTimestamp(timestamp) @@ -984,8 +1002,9 @@ proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. req.session.headersTimeout) except AsyncTimeoutError: raiseHttpReadError("Reading response headers timed out") - except AsyncStreamError: - raiseHttpReadError("Could not read response headers") + except AsyncStreamError as exc: + raiseHttpReadError( + "Could not read response headers, reason: " & $exc.msg) let response = prepareResponse(req, buffer.toOpenArray(0, bytesRead - 1)) if response.isErr(): @@ -999,8 +1018,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], - body: openArray[byte] = []): HttpClientRequestRef {. - raises: [].} = + body: openArray[byte] = []): HttpClientRequestRef = let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, version: version, flags: flags, headers: HttpTable.init(headers), @@ -1014,8 +1032,7 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], - body: openArray[byte] = []): HttpResult[HttpClientRequestRef] {. - raises: [].} = + body: openArray[byte] = []): HttpResult[HttpClientRequestRef] = let address = ? session.getAddress(parseUri(url)) let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, @@ -1029,14 +1046,14 @@ proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [] - ): HttpResult[HttpClientRequestRef] {.raises: [].} = + ): HttpResult[HttpClientRequestRef] = HttpClientRequestRef.new(session, url, MethodGet, version, flags, headers) proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [] - ): HttpClientRequestRef {.raises: [].} = + ): HttpClientRequestRef = HttpClientRequestRef.new(session, ha, MethodGet, version, flags, headers) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, @@ -1044,7 +1061,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = [] - ): HttpResult[HttpClientRequestRef] {.raises: [].} = + ): HttpResult[HttpClientRequestRef] = HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, body) @@ -1052,8 +1069,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], - body: openArray[char] = []): HttpResult[HttpClientRequestRef] {. - raises: [].} = + body: openArray[char] = []): HttpResult[HttpClientRequestRef] = HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, body.toOpenArrayByte(0, len(body) - 1)) @@ -1061,8 +1077,7 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], - body: openArray[byte] = []): HttpClientRequestRef {. - raises: [].} = + body: openArray[byte] = []): HttpClientRequestRef = HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, body) @@ -1070,13 +1085,11 @@ proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, headers: openArray[HttpHeaderTuple] = [], - body: openArray[char] = []): HttpClientRequestRef {. - raises: [].} = + body: openArray[char] = []): HttpClientRequestRef = HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, body.toOpenArrayByte(0, len(body) - 1)) -proc prepareRequest(request: HttpClientRequestRef): string {. - raises: [].} = +proc prepareRequest(request: HttpClientRequestRef): string = template hasChunkedEncoding(request: HttpClientRequestRef): bool = toLowerAscii(request.headers.getString(TransferEncodingHeader)) == "chunked" @@ -1151,7 +1164,7 @@ proc prepareRequest(request: HttpClientRequestRef): string {. res proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = doAssert(request.state == HttpReqRespState.Ready, "Request's state is " & $request.state) let connection = @@ -1184,25 +1197,24 @@ proc send*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {. request.setDuration() request.setError(newHttpInterruptError()) raise exc - except AsyncStreamError: + except AsyncStreamError as exc: request.setDuration() - let error = newHttpWriteError("Could not send request headers") + let error = newHttpWriteError( + "Could not send request headers, reason: " & $exc.msg) request.setError(error) raise error - let resp = - try: - await request.getResponse() - except CancelledError as exc: - request.setError(newHttpInterruptError()) - raise exc - except HttpError as exc: - request.setError(exc) - raise exc - return resp + try: + await request.getResponse() + except CancelledError as exc: + request.setError(newHttpInterruptError()) + raise exc + except HttpError as exc: + request.setError(exc) + raise exc proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = ## Start sending request's headers and return `HttpBodyWriter`, which can be ## used to send request's body. doAssert(request.state == HttpReqRespState.Ready, @@ -1232,8 +1244,9 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {. request.setDuration() request.setError(newHttpInterruptError()) raise exc - except AsyncStreamError: - let error = newHttpWriteError("Could not send request headers") + except AsyncStreamError as exc: + let error = newHttpWriteError( + "Could not send request headers, reason: " & $exc.msg) request.setDuration() request.setError(error) raise error @@ -1255,10 +1268,10 @@ proc open*(request: HttpClientRequestRef): Future[HttpBodyWriter] {. request.writer = writer request.state = HttpReqRespState.Open request.connection.state = HttpClientConnectionState.RequestBodySending - return writer + writer proc finish*(request: HttpClientRequestRef): Future[HttpClientResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = ## Finish sending request and receive response. doAssert(not(isNil(request.connection)), "Request missing connection instance") @@ -1295,7 +1308,8 @@ proc getNewLocation*(resp: HttpClientResponseRef): HttpResult[HttpAddress] = else: err("Location header is missing") -proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader = +proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader {. + raises: [HttpUseClosedError].} = ## Returns stream's reader instance which can be used to read response's body. ## ## Streams which was obtained using this procedure must be closed to avoid @@ -1324,7 +1338,8 @@ proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader = response.reader = reader response.reader -proc finish*(response: HttpClientResponseRef) {.async.} = +proc finish*(response: HttpClientResponseRef) {. + async: (raises: [HttpUseClosedError]).} = ## Finish receiving response. ## ## Because ``finish()`` returns nothing, this operation become NOP for @@ -1343,7 +1358,7 @@ proc finish*(response: HttpClientResponseRef) {.async.} = response.setDuration() proc getBodyBytes*(response: HttpClientResponseRef): Future[seq[byte]] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = ## Read all bytes from response ``response``. ## ## Note: This procedure performs automatic finishing for ``response``. @@ -1353,21 +1368,22 @@ proc getBodyBytes*(response: HttpClientResponseRef): Future[seq[byte]] {. await reader.closeWait() reader = nil await response.finish() - return data + data except CancelledError as exc: if not(isNil(reader)): await reader.closeWait() response.setError(newHttpInterruptError()) raise exc - except AsyncStreamError: + except AsyncStreamError as exc: + let error = newHttpReadError("Could not read response, reason: " & $exc.msg) if not(isNil(reader)): await reader.closeWait() - let error = newHttpReadError("Could not read response") response.setError(error) raise error proc getBodyBytes*(response: HttpClientResponseRef, - nbytes: int): Future[seq[byte]] {.async.} = + nbytes: int): Future[seq[byte]] {. + async: (raises: [CancelledError, HttpError]).} = ## Read all bytes (nbytes <= 0) or exactly `nbytes` bytes from response ## ``response``. ## @@ -1378,20 +1394,21 @@ proc getBodyBytes*(response: HttpClientResponseRef, await reader.closeWait() reader = nil await response.finish() - return data + data except CancelledError as exc: if not(isNil(reader)): await reader.closeWait() response.setError(newHttpInterruptError()) raise exc - except AsyncStreamError: + except AsyncStreamError as exc: + let error = newHttpReadError("Could not read response, reason: " & $exc.msg) if not(isNil(reader)): await reader.closeWait() - let error = newHttpReadError("Could not read response") response.setError(error) raise error -proc consumeBody*(response: HttpClientResponseRef): Future[int] {.async.} = +proc consumeBody*(response: HttpClientResponseRef): Future[int] {. + async: (raises: [CancelledError, HttpError]).} = ## Consume/discard response and return number of bytes consumed. ## ## Note: This procedure performs automatic finishing for ``response``. @@ -1401,16 +1418,17 @@ proc consumeBody*(response: HttpClientResponseRef): Future[int] {.async.} = await reader.closeWait() reader = nil await response.finish() - return res + res except CancelledError as exc: if not(isNil(reader)): await reader.closeWait() response.setError(newHttpInterruptError()) raise exc - except AsyncStreamError: + except AsyncStreamError as exc: + let error = newHttpReadError( + "Could not consume response, reason: " & $exc.msg) if not(isNil(reader)): await reader.closeWait() - let error = newHttpReadError("Could not read response") response.setError(error) raise error @@ -1460,7 +1478,7 @@ proc redirect*(request: HttpClientRequestRef, ok(res) proc fetch*(request: HttpClientRequestRef): Future[HttpResponseTuple] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = var response: HttpClientResponseRef try: response = await request.send() @@ -1468,7 +1486,7 @@ proc fetch*(request: HttpClientRequestRef): Future[HttpResponseTuple] {. let status = response.status await response.closeWait() response = nil - return (status, buffer) + (status, buffer) except HttpError as exc: if not(isNil(response)): await response.closeWait() raise exc @@ -1477,7 +1495,7 @@ proc fetch*(request: HttpClientRequestRef): Future[HttpResponseTuple] {. raise exc proc fetch*(session: HttpSessionRef, url: Uri): Future[HttpResponseTuple] {. - async.} = + async: (raises: [CancelledError, HttpError]).} = ## Fetch resource pointed by ``url`` using HTTP GET method and ``session`` ## parameters. ## @@ -1519,28 +1537,34 @@ proc fetch*(session: HttpSessionRef, url: Uri): Future[HttpResponseTuple] {. request = redirect redirect = nil else: - let data = await response.getBodyBytes() - let code = response.status + let + data = await response.getBodyBytes() + code = response.status await response.closeWait() response = nil await request.closeWait() request = nil return (code, data) except CancelledError as exc: - if not(isNil(response)): await closeWait(response) - if not(isNil(request)): await closeWait(request) - if not(isNil(redirect)): await closeWait(redirect) + var pending: seq[Future[void]] + if not(isNil(response)): pending.add(closeWait(response)) + if not(isNil(request)): pending.add(closeWait(request)) + if not(isNil(redirect)): pending.add(closeWait(redirect)) + await noCancel(allFutures(pending)) raise exc except HttpError as exc: - if not(isNil(response)): await closeWait(response) - if not(isNil(request)): await closeWait(request) - if not(isNil(redirect)): await closeWait(redirect) + var pending: seq[Future[void]] + if not(isNil(response)): pending.add(closeWait(response)) + if not(isNil(request)): pending.add(closeWait(request)) + if not(isNil(redirect)): pending.add(closeWait(redirect)) + await noCancel(allFutures(pending)) raise exc proc getServerSentEvents*( response: HttpClientResponseRef, maxEventSize: int = -1 - ): Future[seq[ServerSentEvent]] {.async.} = + ): Future[seq[ServerSentEvent]] {. + async: (raises: [CancelledError, HttpError]).} = ## Read number of server-sent events (SSE) from HTTP response ``response``. ## ## ``maxEventSize`` - maximum size of events chunk in one message, use @@ -1628,8 +1652,14 @@ proc getServerSentEvents*( (i, false) - await reader.readMessage(predicate) + try: + await reader.readMessage(predicate) + except CancelledError as exc: + raise exc + except AsyncStreamError as exc: + raiseHttpReadError($exc.msg) + if not isNil(error): raise error - else: - return res + + res diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index da5e03f61..d2148fbf6 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import std/[strutils, uri] import results, httputils import ../../asyncloop, ../../asyncsync @@ -49,7 +52,7 @@ type HttpResultCode*[T] = Result[T, HttpCode] HttpDefect* = object of Defect - HttpError* = object of CatchableError + HttpError* = object of AsyncError HttpCriticalError* = object of HttpError code*: HttpCode HttpRecoverableError* = object of HttpError @@ -124,35 +127,43 @@ func toString*(error: HttpAddressErrorType): string = of HttpAddressErrorType.NoAddressResolved: "No address has been resolved" -proc raiseHttpCriticalError*(msg: string, - code = Http400) {.noinline, noreturn.} = +proc raiseHttpCriticalError*(msg: string, code = Http400) {. + noinline, noreturn, raises: [HttpCriticalError].} = raise (ref HttpCriticalError)(code: code, msg: msg) -proc raiseHttpDisconnectError*() {.noinline, noreturn.} = +proc raiseHttpDisconnectError*() {. + noinline, noreturn, raises: [HttpDisconnectError].} = raise (ref HttpDisconnectError)(msg: "Remote peer disconnected") proc raiseHttpDefect*(msg: string) {.noinline, noreturn.} = raise (ref HttpDefect)(msg: msg) -proc raiseHttpConnectionError*(msg: string) {.noinline, noreturn.} = +proc raiseHttpConnectionError*(msg: string) {. + noinline, noreturn, raises: [HttpConnectionError].} = raise (ref HttpConnectionError)(msg: msg) -proc raiseHttpInterruptError*() {.noinline, noreturn.} = +proc raiseHttpInterruptError*() {. + noinline, noreturn, raises: [HttpInterruptError].} = raise (ref HttpInterruptError)(msg: "Connection was interrupted") -proc raiseHttpReadError*(msg: string) {.noinline, noreturn.} = +proc raiseHttpReadError*(msg: string) {. + noinline, noreturn, raises: [HttpReadError].} = raise (ref HttpReadError)(msg: msg) -proc raiseHttpProtocolError*(msg: string) {.noinline, noreturn.} = +proc raiseHttpProtocolError*(msg: string) {. + noinline, noreturn, raises: [HttpProtocolError].} = raise (ref HttpProtocolError)(msg: msg) -proc raiseHttpWriteError*(msg: string) {.noinline, noreturn.} = +proc raiseHttpWriteError*(msg: string) {. + noinline, noreturn, raises: [HttpWriteError].} = raise (ref HttpWriteError)(msg: msg) -proc raiseHttpRedirectError*(msg: string) {.noinline, noreturn.} = +proc raiseHttpRedirectError*(msg: string) {. + noinline, noreturn, raises: [HttpRedirectError].} = raise (ref HttpRedirectError)(msg: msg) -proc raiseHttpAddressError*(msg: string) {.noinline, noreturn.} = +proc raiseHttpAddressError*(msg: string) {. + noinline, noreturn, raises: [HttpAddressError].} = raise (ref HttpAddressError)(msg: msg) template newHttpInterruptError*(): ref HttpInterruptError = @@ -168,8 +179,7 @@ template newHttpUseClosedError*(): ref HttpUseClosedError = newException(HttpUseClosedError, "Connection was already closed") iterator queryParams*(query: string, - flags: set[QueryParamsFlag] = {}): KeyValueTuple {. - raises: [].} = + flags: set[QueryParamsFlag] = {}): KeyValueTuple = ## Iterate over url-encoded query string. for pair in query.split('&'): let items = pair.split('=', maxsplit = 1) @@ -182,9 +192,9 @@ iterator queryParams*(query: string, else: yield (decodeUrl(k), decodeUrl(v)) -func getTransferEncoding*(ch: openArray[string]): HttpResult[ - set[TransferEncodingFlags]] {. - raises: [].} = +func getTransferEncoding*( + ch: openArray[string] + ): HttpResult[set[TransferEncodingFlags]] = ## Parse value of multiple HTTP headers ``Transfer-Encoding`` and return ## it as set of ``TransferEncodingFlags``. var res: set[TransferEncodingFlags] = {} @@ -213,9 +223,9 @@ func getTransferEncoding*(ch: openArray[string]): HttpResult[ return err("Incorrect Transfer-Encoding value") ok(res) -func getContentEncoding*(ch: openArray[string]): HttpResult[ - set[ContentEncodingFlags]] {. - raises: [].} = +func getContentEncoding*( + ch: openArray[string] + ): HttpResult[set[ContentEncodingFlags]] = ## Parse value of multiple HTTP headers ``Content-Encoding`` and return ## it as set of ``ContentEncodingFlags``. var res: set[ContentEncodingFlags] = {} @@ -244,8 +254,7 @@ func getContentEncoding*(ch: openArray[string]): HttpResult[ return err("Incorrect Content-Encoding value") ok(res) -func getContentType*(ch: openArray[string]): HttpResult[ContentTypeData] {. - raises: [].} = +func getContentType*(ch: openArray[string]): HttpResult[ContentTypeData] = ## Check and prepare value of ``Content-Type`` header. if len(ch) == 0: err("No Content-Type values found") diff --git a/chronos/apps/http/httpdebug.nim b/chronos/apps/http/httpdebug.nim index d343265ba..7d52575f5 100644 --- a/chronos/apps/http/httpdebug.nim +++ b/chronos/apps/http/httpdebug.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import std/tables import results import ../../timer @@ -16,8 +19,6 @@ from ../../osdefs import SocketHandle from ../../transports/common import TransportAddress, ServerFlags export HttpClientScheme, SocketHandle, TransportAddress, ServerFlags, HttpState -{.push raises: [].} - type ConnectionType* {.pure.} = enum NonSecure, Secure diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 1e307a07e..e8326ccd7 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import std/[tables, uri, strutils] import stew/[base10], httputils, results import ../../asyncloop, ../../asyncsync @@ -67,11 +70,11 @@ type HttpConnectionCallback* = proc(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. - gcsafe, raises: [].} + async: (raises: [CancelledError, HttpConnectionError]), gcsafe.} HttpCloseConnectionCallback* = proc(connection: HttpConnectionRef): Future[void] {. - gcsafe, raises: [].} + async: (raises: []), gcsafe.} HttpConnectionHolder* = object of RootObj connection*: HttpConnectionRef @@ -94,7 +97,7 @@ type flags*: set[HttpServerFlags] socketFlags*: set[ServerFlags] connections*: OrderedTable[string, HttpConnectionHolderRef] - acceptLoop*: Future[void] + acceptLoop*: Future[void].Raising([]) lifetime*: Future[void] headersTimeout*: Duration bufferSize*: int @@ -157,13 +160,11 @@ type proc init(htype: typedesc[HttpProcessError], error: HttpServerError, exc: ref CatchableError, remote: Opt[TransportAddress], - code: HttpCode): HttpProcessError {. - raises: [].} = + code: HttpCode): HttpProcessError = HttpProcessError(kind: error, exc: exc, remote: remote, code: code) proc init(htype: typedesc[HttpProcessError], - error: HttpServerError): HttpProcessError {. - raises: [].} = + error: HttpServerError): HttpProcessError = HttpProcessError(kind: error) proc new(htype: typedesc[HttpConnectionHolderRef], server: HttpServerRef, @@ -176,8 +177,8 @@ proc new(htype: typedesc[HttpConnectionHolderRef], server: HttpServerRef, proc error*(e: HttpProcessError): HttpServerError = e.kind proc createConnection(server: HttpServerRef, - transp: StreamTransport): Future[HttpConnectionRef] {. - gcsafe.} + transp: StreamTransport): Future[HttpConnectionRef] {. + async: (raises: [CancelledError, HttpConnectionError]).} proc new*(htype: typedesc[HttpServerRef], address: TransportAddress, @@ -192,8 +193,7 @@ proc new*(htype: typedesc[HttpServerRef], httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576, - dualstack = DualStackType.Auto): HttpResult[HttpServerRef] {. - raises: [].} = + dualstack = DualStackType.Auto): HttpResult[HttpServerRef] = let serverUri = if len(serverUri.hostname) > 0: @@ -210,8 +210,6 @@ proc new*(htype: typedesc[HttpServerRef], backlog = backlogSize, dualstack = dualstack) except TransportOsError as exc: return err(exc.msg) - except CatchableError as exc: - return err(exc.msg) var res = HttpServerRef( address: serverInstance.localAddress(), @@ -259,13 +257,13 @@ proc getResponseFlags(req: HttpRequestRef): set[HttpResponseFlags] = else: defaultFlags -proc getResponseVersion(reqFence: RequestFence): HttpVersion {.raises: [].} = +proc getResponseVersion(reqFence: RequestFence): HttpVersion = if reqFence.isErr(): HttpVersion11 else: reqFence.get().version -proc getResponse*(req: HttpRequestRef): HttpResponseRef {.raises: [].} = +proc getResponse*(req: HttpRequestRef): HttpResponseRef = if req.response.isNone(): var resp = HttpResponseRef( status: Http200, @@ -286,30 +284,29 @@ proc getHostname*(server: HttpServerRef): string = else: server.baseUri.hostname -proc defaultResponse*(): HttpResponseRef {.raises: [].} = +proc defaultResponse*(): HttpResponseRef = ## Create an empty response to return when request processor got no request. HttpResponseRef(state: HttpResponseState.Default, version: HttpVersion11) -proc dumbResponse*(): HttpResponseRef {.raises: [], +proc dumbResponse*(): HttpResponseRef {. deprecated: "Please use defaultResponse() instead".} = ## Create an empty response to return when request processor got no request. defaultResponse() -proc getId(transp: StreamTransport): Result[string, string] {.inline.} = +proc getId(transp: StreamTransport): Result[string, string] {.inline.} = ## Returns string unique transport's identifier as string. try: ok($transp.remoteAddress() & "_" & $transp.localAddress()) except TransportOsError as exc: err($exc.msg) -proc hasBody*(request: HttpRequestRef): bool {.raises: [].} = +proc hasBody*(request: HttpRequestRef): bool = ## Returns ``true`` if request has body. request.requestFlags * {HttpRequestFlags.BoundBody, HttpRequestFlags.UnboundBody} != {} proc prepareRequest(conn: HttpConnectionRef, - req: HttpRequestHeader): HttpResultCode[HttpRequestRef] {. - raises: [].}= + req: HttpRequestHeader): HttpResultCode[HttpRequestRef] = var request = HttpRequestRef(connection: conn, state: HttpState.Alive) if req.version notin {HttpVersion10, HttpVersion11}: @@ -450,7 +447,8 @@ proc getBodyReader*(request: HttpRequestRef): HttpResult[HttpBodyReader] = else: err("Request do not have body available") -proc handleExpect*(request: HttpRequestRef) {.async.} = +proc handleExpect*(request: HttpRequestRef) {. + async: (raises: [CancelledError, HttpError]).} = ## Handle expectation for ``Expect`` header. ## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect if HttpServerFlags.NoExpectHandler notin request.connection.server.flags: @@ -461,10 +459,12 @@ proc handleExpect*(request: HttpRequestRef) {.async.} = await request.connection.writer.write(message) except CancelledError as exc: raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: - raiseHttpCriticalError("Unable to send `100-continue` response") + except AsyncStreamError as exc: + raiseHttpCriticalError( + "Unable to send `100-continue` response, reason: " & $exc.msg) -proc getBody*(request: HttpRequestRef): Future[seq[byte]] {.async.} = +proc getBody*(request: HttpRequestRef): Future[seq[byte]] {. + async: (raises: [CancelledError, HttpError]).} = ## Obtain request's body as sequence of bytes. let bodyReader = request.getBodyReader() if bodyReader.isErr(): @@ -486,12 +486,18 @@ proc getBody*(request: HttpRequestRef): Future[seq[byte]] {.async.} = if not(isNil(reader)): await reader.closeWait() raise exc - except AsyncStreamError: + except HttpError as exc: if not(isNil(reader)): await reader.closeWait() - raiseHttpCriticalError("Unable to read request's body") + raise exc + except AsyncStreamError as exc: + let msg = "Unable to read request's body, reason: " & $exc.msg + if not(isNil(reader)): + await reader.closeWait() + raiseHttpCriticalError(msg) -proc consumeBody*(request: HttpRequestRef): Future[void] {.async.} = +proc consumeBody*(request: HttpRequestRef): Future[void] {. + async: (raises: [CancelledError, HttpError]).} = ## Consume/discard request's body. let bodyReader = request.getBodyReader() if bodyReader.isErr(): @@ -513,10 +519,15 @@ proc consumeBody*(request: HttpRequestRef): Future[void] {.async.} = if not(isNil(reader)): await reader.closeWait() raise exc - except AsyncStreamError: + except HttpError as exc: + if not(isNil(reader)): + await reader.closeWait() + raise exc + except AsyncStreamError as exc: + let msg = "Unable to consume request's body, reason: " & $exc.msg if not(isNil(reader)): await reader.closeWait() - raiseHttpCriticalError("Unable to read request's body") + raiseHttpCriticalError(msg) proc getAcceptInfo*(request: HttpRequestRef): Result[AcceptInfo, cstring] = ## Returns value of `Accept` header as `AcceptInfo` object. @@ -636,7 +647,8 @@ proc preferredContentType*(request: HttpRequestRef, proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, code: HttpCode, keepAlive = true, datatype = "text/text", - databody = "") {.async.} = + databody = "") {. + async: (raises: [CancelledError]).} = var answer = $version & " " & $code & "\r\n" answer.add(DateHeader) answer.add(": ") @@ -664,7 +676,7 @@ proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, await conn.writer.write(answer) except CancelledError as exc: raise exc - except CatchableError: + except AsyncStreamError: # We ignore errors here, because we indicating error already. discard @@ -672,7 +684,7 @@ proc sendErrorResponse( conn: HttpConnectionRef, reqFence: RequestFence, respError: HttpProcessError - ): Future[HttpProcessExitType] {.async.} = + ): Future[HttpProcessExitType] {.async: (raises: []).} = let version = getResponseVersion(reqFence) try: if reqFence.isOk(): @@ -694,14 +706,12 @@ proc sendErrorResponse( HttpProcessExitType.Graceful except CancelledError: HttpProcessExitType.Immediate - except CatchableError: - HttpProcessExitType.Immediate proc sendDefaultResponse( conn: HttpConnectionRef, reqFence: RequestFence, response: HttpResponseRef - ): Future[HttpProcessExitType] {.async.} = + ): Future[HttpProcessExitType] {.async: (raises: []).} = let version = getResponseVersion(reqFence) keepConnection = @@ -772,7 +782,8 @@ proc sendDefaultResponse( except CatchableError: HttpProcessExitType.Immediate -proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {.async.} = +proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {. + async: (raises: [CancelledError, HttpError]).} = try: conn.buffer.setLen(conn.server.maxHeadersSize) let res = await conn.reader.readUntil(addr conn.buffer[0], len(conn.buffer), @@ -787,10 +798,10 @@ proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {.async.} = raiseHttpCriticalError("Invalid request received", res.error) else: return res.get() - except AsyncStreamIncompleteError, AsyncStreamReadError: - raiseHttpDisconnectError() except AsyncStreamLimitError: raiseHttpCriticalError("Maximum size of request headers reached", Http431) + except AsyncStreamError: + raiseHttpDisconnectError() proc init*(value: var HttpConnection, server: HttpServerRef, transp: StreamTransport) = @@ -803,7 +814,7 @@ proc init*(value: var HttpConnection, server: HttpServerRef, mainWriter: newAsyncStreamWriter(transp) ) -proc closeUnsecureConnection(conn: HttpConnectionRef) {.async.} = +proc closeUnsecureConnection(conn: HttpConnectionRef) {.async: (raises: []).} = if conn.state == HttpState.Alive: conn.state = HttpState.Closing var pending: seq[Future[void]] @@ -826,14 +837,20 @@ proc new(ht: typedesc[HttpConnectionRef], server: HttpServerRef, trackCounter(HttpServerUnsecureConnectionTrackerName) res -proc gracefulCloseWait*(conn: HttpConnectionRef) {.async.} = - await noCancel(conn.transp.shutdownWait()) +proc gracefulCloseWait*(conn: HttpConnectionRef) {.async: (raises: []).} = + try: + await noCancel(conn.transp.shutdownWait()) + except TransportError: + # We try to gracefully close connection, so we ignore any errors here, + # because right after this operation we closing connection. + discard await conn.closeCb(conn) -proc closeWait*(conn: HttpConnectionRef): Future[void] = +proc closeWait*(conn: HttpConnectionRef): Future[void] {. + async: (raw: true, raises: []).} = conn.closeCb(conn) -proc closeWait*(req: HttpRequestRef) {.async.} = +proc closeWait*(req: HttpRequestRef) {.async: (raises: []).} = if req.state == HttpState.Alive: if req.response.isSome(): req.state = HttpState.Closing @@ -847,8 +864,8 @@ proc closeWait*(req: HttpRequestRef) {.async.} = proc createConnection(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. - async.} = - return HttpConnectionRef.new(server, transp) + async: (raises: [CancelledError, HttpConnectionError]).} = + HttpConnectionRef.new(server, transp) proc `keepalive=`*(resp: HttpResponseRef, value: bool) = doAssert(resp.state == HttpResponseState.Empty) @@ -857,25 +874,23 @@ proc `keepalive=`*(resp: HttpResponseRef, value: bool) = else: resp.flags.excl(HttpResponseFlags.KeepAlive) -proc keepalive*(resp: HttpResponseRef): bool {.raises: [].} = +proc keepalive*(resp: HttpResponseRef): bool = HttpResponseFlags.KeepAlive in resp.flags -proc getRemoteAddress(transp: StreamTransport): Opt[TransportAddress] {. - raises: [].} = +proc getRemoteAddress(transp: StreamTransport): Opt[TransportAddress] = if isNil(transp): return Opt.none(TransportAddress) try: Opt.some(transp.remoteAddress()) - except CatchableError: + except TransportOsError: Opt.none(TransportAddress) -proc getRemoteAddress(connection: HttpConnectionRef): Opt[TransportAddress] {. - raises: [].} = +proc getRemoteAddress(connection: HttpConnectionRef): Opt[TransportAddress] = if isNil(connection): return Opt.none(TransportAddress) getRemoteAddress(connection.transp) proc getResponseFence*(connection: HttpConnectionRef, reqFence: RequestFence): Future[ResponseFence] {. - async.} = + async: (raises: []).} = try: let res = await connection.server.processCallback(reqFence) ResponseFence.ok(res) @@ -897,7 +912,7 @@ proc getResponseFence*(connection: HttpConnectionRef, proc getResponseFence*(server: HttpServerRef, connFence: ConnectionFence): Future[ResponseFence] {. - async.} = + async: (raises: []).} = doAssert(connFence.isErr()) try: let @@ -922,7 +937,7 @@ proc getResponseFence*(server: HttpServerRef, proc getRequestFence*(server: HttpServerRef, connection: HttpConnectionRef): Future[RequestFence] {. - async.} = + async: (raises: []).} = try: let res = if server.headersTimeout.isInfinite(): @@ -956,7 +971,7 @@ proc getRequestFence*(server: HttpServerRef, proc getConnectionFence*(server: HttpServerRef, transp: StreamTransport): Future[ConnectionFence] {. - async.} = + async: (raises: []).} = try: let res = await server.createConnCallback(server, transp) ConnectionFence.ok(res) @@ -975,7 +990,8 @@ proc getConnectionFence*(server: HttpServerRef, proc processRequest(server: HttpServerRef, connection: HttpConnectionRef, - connId: string): Future[HttpProcessExitType] {.async.} = + connId: string): Future[HttpProcessExitType] {. + async: (raises: []).} = let requestFence = await getRequestFence(server, connection) if requestFence.isErr(): case requestFence.error.kind @@ -1005,7 +1021,7 @@ proc processRequest(server: HttpServerRef, res -proc processLoop(holder: HttpConnectionHolderRef) {.async.} = +proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = let server = holder.server transp = holder.transp @@ -1042,7 +1058,7 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async.} = server.connections.del(connectionId) -proc acceptClientLoop(server: HttpServerRef) {.async.} = +proc acceptClientLoop(server: HttpServerRef) {.async: (raises: []).} = var runLoop = true while runLoop: try: @@ -1067,7 +1083,7 @@ proc acceptClientLoop(server: HttpServerRef) {.async.} = # Critical, cancellation or unexpected error runLoop = false -proc state*(server: HttpServerRef): HttpServerState {.raises: [].} = +proc state*(server: HttpServerRef): HttpServerState = ## Returns current HTTP server's state. if server.lifetime.finished(): ServerClosed @@ -1085,12 +1101,12 @@ proc start*(server: HttpServerRef) = if server.state == ServerStopped: server.acceptLoop = acceptClientLoop(server) -proc stop*(server: HttpServerRef) {.async.} = +proc stop*(server: HttpServerRef) {.async: (raises: []).} = ## Stop HTTP server from accepting new connections. if server.state == ServerRunning: await server.acceptLoop.cancelAndWait() -proc drop*(server: HttpServerRef) {.async.} = +proc drop*(server: HttpServerRef) {.async: (raises: []).} = ## Drop all pending HTTP connections. var pending: seq[Future[void]] if server.state in {ServerStopped, ServerRunning}: @@ -1100,7 +1116,7 @@ proc drop*(server: HttpServerRef) {.async.} = await noCancel(allFutures(pending)) server.connections.clear() -proc closeWait*(server: HttpServerRef) {.async.} = +proc closeWait*(server: HttpServerRef) {.async: (raises: []).} = ## Stop HTTP server and drop all the pending connections. if server.state != ServerClosed: await server.stop() @@ -1108,7 +1124,8 @@ proc closeWait*(server: HttpServerRef) {.async.} = await server.instance.closeWait() server.lifetime.complete() -proc join*(server: HttpServerRef): Future[void] = +proc join*(server: HttpServerRef): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = ## Wait until HTTP server will not be closed. var retFuture = newFuture[void]("http.server.join") @@ -1128,8 +1145,7 @@ proc join*(server: HttpServerRef): Future[void] = retFuture -proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] {. - raises: [].} = +proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] = ## Create new MultiPartReader interface for specific request. if req.meth in PostMethods: if MultipartForm in req.requestFlags: @@ -1144,7 +1160,8 @@ proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] {. else: err("Request's method do not supports multipart") -proc post*(req: HttpRequestRef): Future[HttpTable] {.async.} = +proc post*(req: HttpRequestRef): Future[HttpTable] {. + async: (raises: [CancelledError, HttpError]).} = ## Return POST parameters if req.postTable.isSome(): return req.postTable.get() @@ -1224,31 +1241,28 @@ proc post*(req: HttpRequestRef): Future[HttpTable] {.async.} = elif HttpRequestFlags.UnboundBody in req.requestFlags: raiseHttpCriticalError("Unsupported request body") -proc setHeader*(resp: HttpResponseRef, key, value: string) {. - raises: [].} = +proc setHeader*(resp: HttpResponseRef, key, value: string) = ## Sets value of header ``key`` to ``value``. doAssert(resp.state == HttpResponseState.Empty) resp.headersTable.set(key, value) -proc setHeaderDefault*(resp: HttpResponseRef, key, value: string) {. - raises: [].} = +proc setHeaderDefault*(resp: HttpResponseRef, key, value: string) = ## Sets value of header ``key`` to ``value``, only if header ``key`` is not ## present in the headers table. discard resp.headersTable.hasKeyOrPut(key, value) -proc addHeader*(resp: HttpResponseRef, key, value: string) {. - raises: [].} = +proc addHeader*(resp: HttpResponseRef, key, value: string) = ## Adds value ``value`` to header's ``key`` value. doAssert(resp.state == HttpResponseState.Empty) resp.headersTable.add(key, value) proc getHeader*(resp: HttpResponseRef, key: string, - default: string = ""): string {.raises: [].} = + default: string = ""): string = ## Returns value of header with name ``name`` or ``default``, if header is ## not present in the table. resp.headersTable.getString(key, default) -proc hasHeader*(resp: HttpResponseRef, key: string): bool {.raises: [].} = +proc hasHeader*(resp: HttpResponseRef, key: string): bool = ## Returns ``true`` if header with name ``key`` present in the headers table. key in resp.headersTable @@ -1267,8 +1281,7 @@ func createHeaders(resp: HttpResponseRef): string = answer.add("\r\n") answer -proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string {. - raises: [].}= +proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if length > 0: @@ -1285,8 +1298,7 @@ proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string {. resp.setHeader(ConnectionHeader, "close") resp.createHeaders() -proc prepareChunkedHeaders(resp: HttpResponseRef): string {. - raises: [].} = +proc prepareChunkedHeaders(resp: HttpResponseRef): string = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if not(resp.hasHeader(ContentTypeHeader)): @@ -1302,8 +1314,7 @@ proc prepareChunkedHeaders(resp: HttpResponseRef): string {. resp.setHeader(ConnectionHeader, "close") resp.createHeaders() -proc prepareServerSideEventHeaders(resp: HttpResponseRef): string {. - raises: [].} = +proc prepareServerSideEventHeaders(resp: HttpResponseRef): string = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if not(resp.hasHeader(ContentTypeHeader)): @@ -1315,8 +1326,7 @@ proc prepareServerSideEventHeaders(resp: HttpResponseRef): string {. resp.setHeader(ConnectionHeader, "close") resp.createHeaders() -proc preparePlainHeaders(resp: HttpResponseRef): string {. - raises: [].} = +proc preparePlainHeaders(resp: HttpResponseRef): string = if not(resp.hasHeader(DateHeader)): resp.setHeader(DateHeader, httpDate()) if not(resp.hasHeader(ServerHeader)): @@ -1326,7 +1336,8 @@ proc preparePlainHeaders(resp: HttpResponseRef): string {. resp.setHeader(ConnectionHeader, "close") resp.createHeaders() -proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {.async.} = +proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. + async: (raises: [CancelledError, HttpError]).} = ## Send HTTP response at once by using bytes pointer ``pbytes`` and length ## ``nbytes``. doAssert(not(isNil(pbytes)), "pbytes must not be nil") @@ -1343,11 +1354,12 @@ proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {.async.} = except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) -proc sendBody*(resp: HttpResponseRef, data: ByteChar) {.async.} = +proc sendBody*(resp: HttpResponseRef, data: ByteChar) {. + async: (raises: [CancelledError, HttpError]).} = ## Send HTTP response at once by using data ``data``. checkPending(resp) let responseHeaders = resp.prepareLengthHeaders(len(data)) @@ -1361,11 +1373,12 @@ proc sendBody*(resp: HttpResponseRef, data: ByteChar) {.async.} = except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) -proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {.async.} = +proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {. + async: (raises: [CancelledError, HttpError]).} = ## Send HTTP error status response. checkPending(resp) resp.status = code @@ -1380,12 +1393,13 @@ proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {.async.} = except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc prepare*(resp: HttpResponseRef, - streamType = HttpResponseStreamType.Chunked) {.async.} = + streamType = HttpResponseStreamType.Chunked) {. + async: (raises: [CancelledError, HttpError]).} = ## Prepare for HTTP stream response. ## ## Such responses will be sent chunk by chunk using ``chunked`` encoding. @@ -1412,27 +1426,31 @@ proc prepare*(resp: HttpResponseRef, except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) -proc prepareChunked*(resp: HttpResponseRef): Future[void] = +proc prepareChunked*(resp: HttpResponseRef): Future[void] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Prepare for HTTP chunked stream response. ## ## Such responses will be sent chunk by chunk using ``chunked`` encoding. resp.prepare(HttpResponseStreamType.Chunked) -proc preparePlain*(resp: HttpResponseRef): Future[void] = +proc preparePlain*(resp: HttpResponseRef): Future[void] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Prepare for HTTP plain stream response. ## ## Such responses will be sent without any encoding. resp.prepare(HttpResponseStreamType.Plain) -proc prepareSSE*(resp: HttpResponseRef): Future[void] = +proc prepareSSE*(resp: HttpResponseRef): Future[void] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Prepare for HTTP server-side event stream response. resp.prepare(HttpResponseStreamType.SSE) -proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {.async.} = +proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. + async: (raises: [CancelledError, HttpError]).} = ## Send single chunk of data pointed by ``pbytes`` and ``nbytes``. doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes >= 0, "nbytes should be bigger or equal to zero") @@ -1447,11 +1465,12 @@ proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {.async.} = except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) -proc send*(resp: HttpResponseRef, data: ByteChar) {.async.} = +proc send*(resp: HttpResponseRef, data: ByteChar) {. + async: (raises: [CancelledError, HttpError]).} = ## Send single chunk of data ``data``. if HttpResponseFlags.Stream notin resp.flags: raiseHttpCriticalError("Response was not prepared") @@ -1464,19 +1483,22 @@ proc send*(resp: HttpResponseRef, data: ByteChar) {.async.} = except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc sendChunk*(resp: HttpResponseRef, pbytes: pointer, - nbytes: int): Future[void] = + nbytes: int): Future[void] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = resp.send(pbytes, nbytes) -proc sendChunk*(resp: HttpResponseRef, data: ByteChar): Future[void] = +proc sendChunk*(resp: HttpResponseRef, data: ByteChar): Future[void] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = resp.send(data) proc sendEvent*(resp: HttpResponseRef, eventName: string, - data: string): Future[void] = + data: string): Future[void] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Send server-side event with name ``eventName`` and payload ``data`` to ## remote peer. let data = @@ -1492,7 +1514,8 @@ proc sendEvent*(resp: HttpResponseRef, eventName: string, res resp.send(data) -proc finish*(resp: HttpResponseRef) {.async.} = +proc finish*(resp: HttpResponseRef) {. + async: (raises: [CancelledError, HttpError]).} = ## Sending last chunk of data, so it will indicate end of HTTP response. if HttpResponseFlags.Stream notin resp.flags: raiseHttpCriticalError("Response was not prepared") @@ -1505,12 +1528,13 @@ proc finish*(resp: HttpResponseRef) {.async.} = except CancelledError as exc: resp.state = HttpResponseState.Cancelled raise exc - except AsyncStreamWriteError, AsyncStreamIncompleteError: + except AsyncStreamError as exc: resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response") + raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, - headers: HttpTable): Future[HttpResponseRef] {.async.} = + headers: HttpTable): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpError]).} = ## Responds to the request with the specified ``HttpCode``, HTTP ``headers`` ## and ``content``. let response = req.getResponse() @@ -1518,19 +1542,22 @@ proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, for k, v in headers.stringItems(): response.addHeader(k, v) await response.sendBody(content) - return response + response proc respond*(req: HttpRequestRef, code: HttpCode, - content: ByteChar): Future[HttpResponseRef] = + content: ByteChar): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Responds to the request with specified ``HttpCode`` and ``content``. respond(req, code, content, HttpTable.init()) -proc respond*(req: HttpRequestRef, code: HttpCode): Future[HttpResponseRef] = +proc respond*(req: HttpRequestRef, code: HttpCode): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Responds to the request with specified ``HttpCode`` only. respond(req, code, "", HttpTable.init()) proc redirect*(req: HttpRequestRef, code: HttpCode, - location: string, headers: HttpTable): Future[HttpResponseRef] = + location: string, headers: HttpTable): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Responds to the request with redirection to location ``location`` and ## additional headers ``headers``. ## @@ -1541,7 +1568,8 @@ proc redirect*(req: HttpRequestRef, code: HttpCode, respond(req, code, "", mheaders) proc redirect*(req: HttpRequestRef, code: HttpCode, - location: Uri, headers: HttpTable): Future[HttpResponseRef] = + location: Uri, headers: HttpTable): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Responds to the request with redirection to location ``location`` and ## additional headers ``headers``. ## @@ -1550,12 +1578,14 @@ proc redirect*(req: HttpRequestRef, code: HttpCode, redirect(req, code, $location, headers) proc redirect*(req: HttpRequestRef, code: HttpCode, - location: Uri): Future[HttpResponseRef] = + location: Uri): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Responds to the request with redirection to location ``location``. redirect(req, code, location, HttpTable.init()) proc redirect*(req: HttpRequestRef, code: HttpCode, - location: string): Future[HttpResponseRef] = + location: string): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]).} = ## Responds to the request with redirection to location ``location``. redirect(req, code, location, HttpTable.init()) @@ -1569,16 +1599,20 @@ proc responded*(req: HttpRequestRef): bool = else: false -proc remoteAddress*(conn: HttpConnectionRef): TransportAddress = +proc remoteAddress*(conn: HttpConnectionRef): TransportAddress {. + raises: [HttpAddressError].} = ## Returns address of the remote host that established connection ``conn``. - conn.transp.remoteAddress() + try: + conn.transp.remoteAddress() + except TransportOsError as exc: + raiseHttpAddressError($exc.msg) -proc remoteAddress*(request: HttpRequestRef): TransportAddress = +proc remoteAddress*(request: HttpRequestRef): TransportAddress {. + raises: [HttpAddressError].} = ## Returns address of the remote host that made request ``request``. request.connection.remoteAddress() -proc requestInfo*(req: HttpRequestRef, contentType = "text/text"): string {. - raises: [].} = +proc requestInfo*(req: HttpRequestRef, contentType = "text/text"): string = ## Returns comprehensive information about request for specific content ## type. ## diff --git a/chronos/apps/http/multipart.nim b/chronos/apps/http/multipart.nim index b936996bf..83a4b566e 100644 --- a/chronos/apps/http/multipart.nim +++ b/chronos/apps/http/multipart.nim @@ -7,6 +7,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import std/[monotimes, strutils] import results, httputils import ../../asyncloop @@ -71,8 +74,7 @@ type BChar* = byte | char -proc startsWith(s, prefix: openArray[byte]): bool {. - raises: [].} = +proc startsWith(s, prefix: openArray[byte]): bool = # This procedure is copy of strutils.startsWith() procedure, however, # it is intended to work with arrays of bytes, but not with strings. var i = 0 @@ -81,8 +83,7 @@ proc startsWith(s, prefix: openArray[byte]): bool {. if i >= len(s) or s[i] != prefix[i]: return false inc(i) -proc parseUntil(s, until: openArray[byte]): int {. - raises: [].} = +proc parseUntil(s, until: openArray[byte]): int = # This procedure is copy of parseutils.parseUntil() procedure, however, # it is intended to work with arrays of bytes, but not with strings. var i = 0 @@ -95,8 +96,7 @@ proc parseUntil(s, until: openArray[byte]): int {. inc(i) -1 -func setPartNames(part: var MultiPart): HttpResult[void] {. - raises: [].} = +func setPartNames(part: var MultiPart): HttpResult[void] = if part.headers.count("content-disposition") != 1: return err("Content-Disposition header is incorrect") var header = part.headers.getString("content-disposition") @@ -120,8 +120,7 @@ func setPartNames(part: var MultiPart): HttpResult[void] {. proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader], buffer: openArray[A], - boundary: openArray[B]): MultiPartReader {. - raises: [].} = + boundary: openArray[B]): MultiPartReader = ## Create new MultiPartReader instance with `buffer` interface. ## ## ``buffer`` - is buffer which will be used to read data. @@ -145,8 +144,7 @@ proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader], proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef], stream: HttpBodyReader, boundary: openArray[B], - partHeadersMaxSize = 4096): MultiPartReaderRef {. - raises: [].} = + partHeadersMaxSize = 4096): MultiPartReaderRef = ## Create new MultiPartReader instance with `stream` interface. ## ## ``stream`` is stream used to read data. @@ -173,7 +171,8 @@ proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef], stream: stream, offset: 0, boundary: fboundary, buffer: newSeq[byte](partHeadersMaxSize)) -proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} = +proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {. + async: (raises: [CancelledError, HttpCriticalError]).} = doAssert(mpr.kind == MultiPartSource.Stream) if mpr.firstTime: try: @@ -240,7 +239,8 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {.async.} = else: raiseHttpCriticalError(UnableToReadMultipartBody) -proc getBody*(mp: MultiPart): Future[seq[byte]] {.async.} = +proc getBody*(mp: MultiPart): Future[seq[byte]] {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Get multipart's ``mp`` value as sequence of bytes. case mp.kind of MultiPartSource.Stream: @@ -255,7 +255,8 @@ proc getBody*(mp: MultiPart): Future[seq[byte]] {.async.} = of MultiPartSource.Buffer: return mp.buffer -proc consumeBody*(mp: MultiPart) {.async.} = +proc consumeBody*(mp: MultiPart) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Discard multipart's ``mp`` value. case mp.kind of MultiPartSource.Stream: @@ -269,8 +270,7 @@ proc consumeBody*(mp: MultiPart) {.async.} = of MultiPartSource.Buffer: discard -proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] {. - raises: [].} = +proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] = ## Get multipart's ``mp`` stream, which can be used to obtain value of the ## part. case mp.kind @@ -279,7 +279,7 @@ proc getBodyStream*(mp: MultiPart): HttpResult[AsyncStreamReader] {. else: err("Could not obtain stream from buffer-like part") -proc closeWait*(mp: MultiPart) {.async.} = +proc closeWait*(mp: MultiPart) {.async: (raises: []).} = ## Close and release MultiPart's ``mp`` stream and resources. case mp.kind of MultiPartSource.Stream: @@ -287,7 +287,7 @@ proc closeWait*(mp: MultiPart) {.async.} = else: discard -proc closeWait*(mpr: MultiPartReaderRef) {.async.} = +proc closeWait*(mpr: MultiPartReaderRef) {.async: (raises: []).} = ## Close and release MultiPartReader's ``mpr`` stream and resources. case mpr.kind of MultiPartSource.Stream: @@ -295,7 +295,7 @@ proc closeWait*(mpr: MultiPartReaderRef) {.async.} = else: discard -proc getBytes*(mp: MultiPart): seq[byte] {.raises: [].} = +proc getBytes*(mp: MultiPart): seq[byte] = ## Returns value for MultiPart ``mp`` as sequence of bytes. case mp.kind of MultiPartSource.Buffer: @@ -304,7 +304,7 @@ proc getBytes*(mp: MultiPart): seq[byte] {.raises: [].} = doAssert(not(mp.stream.atEof()), "Value is not obtained yet") mp.buffer -proc getString*(mp: MultiPart): string {.raises: [].} = +proc getString*(mp: MultiPart): string = ## Returns value for MultiPart ``mp`` as string. case mp.kind of MultiPartSource.Buffer: @@ -313,7 +313,7 @@ proc getString*(mp: MultiPart): string {.raises: [].} = doAssert(not(mp.stream.atEof()), "Value is not obtained yet") bytesToString(mp.buffer) -proc atEoM*(mpr: var MultiPartReader): bool {.raises: [].} = +proc atEoM*(mpr: var MultiPartReader): bool = ## Procedure returns ``true`` if MultiPartReader has reached the end of ## multipart message. case mpr.kind @@ -322,7 +322,7 @@ proc atEoM*(mpr: var MultiPartReader): bool {.raises: [].} = of MultiPartSource.Stream: mpr.stream.atEof() -proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [].} = +proc atEoM*(mpr: MultiPartReaderRef): bool = ## Procedure returns ``true`` if MultiPartReader has reached the end of ## multipart message. case mpr.kind @@ -331,8 +331,7 @@ proc atEoM*(mpr: MultiPartReaderRef): bool {.raises: [].} = of MultiPartSource.Stream: mpr.stream.atEof() -proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] {. - raises: [].} = +proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] = ## Get multipart part from MultiPartReader instance. ## ## This procedure will work only for MultiPartReader with buffer source. @@ -422,8 +421,7 @@ proc getPart*(mpr: var MultiPartReader): Result[MultiPart, string] {. else: err("Incorrect multipart form") -func isEmpty*(mp: MultiPart): bool {. - raises: [].} = +func isEmpty*(mp: MultiPart): bool = ## Returns ``true`` is multipart ``mp`` is not initialized/filled yet. mp.counter == 0 @@ -439,8 +437,7 @@ func validateBoundary[B: BChar](boundary: openArray[B]): HttpResult[void] = return err("Content-Type boundary alphabet incorrect") ok() -func getMultipartBoundary*(contentData: ContentTypeData): HttpResult[string] {. - raises: [].} = +func getMultipartBoundary*(contentData: ContentTypeData): HttpResult[string] = ## Returns ``multipart/form-data`` boundary value from ``Content-Type`` ## header. ## @@ -480,8 +477,7 @@ proc quoteCheck(name: string): HttpResult[string] = ok(name) proc init*[B: BChar](mpt: typedesc[MultiPartWriter], - boundary: openArray[B]): MultiPartWriter {. - raises: [].} = + boundary: openArray[B]): MultiPartWriter = ## Create new MultiPartWriter instance with `buffer` interface. ## ## ``boundary`` - is multipart boundary, this value must not be empty. @@ -510,8 +506,7 @@ proc init*[B: BChar](mpt: typedesc[MultiPartWriter], proc new*[B: BChar](mpt: typedesc[MultiPartWriterRef], stream: HttpBodyWriter, - boundary: openArray[B]): MultiPartWriterRef {. - raises: [].} = + boundary: openArray[B]): MultiPartWriterRef = doAssert(validateBoundary(boundary).isOk()) doAssert(not(isNil(stream))) @@ -576,7 +571,8 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string, buffer.add("\r\n") buffer -proc begin*(mpw: MultiPartWriterRef) {.async.} = +proc begin*(mpw: MultiPartWriterRef) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Starts multipart message form and write approprate markers to output ## stream. doAssert(mpw.kind == MultiPartSource.Stream) @@ -599,7 +595,8 @@ proc begin*(mpw: var MultiPartWriter) = mpw.state = MultiPartWriterState.MessageStarted proc beginPart*(mpw: MultiPartWriterRef, name: string, - filename: string, headers: HttpTable) {.async.} = + filename: string, headers: HttpTable) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Starts part of multipart message and write appropriate ``headers`` to the ## output stream. ## @@ -634,38 +631,44 @@ proc beginPart*(mpw: var MultiPartWriter, name: string, mpw.buffer.add(buffer.toOpenArrayByte(0, len(buffer) - 1)) mpw.state = MultiPartWriterState.PartStarted -proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {.async.} = +proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Write part's data ``data`` to the output stream. doAssert(mpw.kind == MultiPartSource.Stream) doAssert(mpw.state == MultiPartWriterState.PartStarted) try: # write of data await mpw.stream.write(pbytes, nbytes) - except AsyncStreamError: + except AsyncStreamError as exc: mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to write multipart data") + raiseHttpCriticalError( + "Unable to write multipart data, reason: " & $exc.msg) -proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {.async.} = +proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Write part's data ``data`` to the output stream. doAssert(mpw.kind == MultiPartSource.Stream) doAssert(mpw.state == MultiPartWriterState.PartStarted) try: # write of data await mpw.stream.write(data) - except AsyncStreamError: + except AsyncStreamError as exc: mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to write multipart data") + raiseHttpCriticalError( + "Unable to write multipart data, reason: " & $exc.msg) -proc write*(mpw: MultiPartWriterRef, data: string) {.async.} = +proc write*(mpw: MultiPartWriterRef, data: string) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Write part's data ``data`` to the output stream. doAssert(mpw.kind == MultiPartSource.Stream) doAssert(mpw.state == MultiPartWriterState.PartStarted) try: # write of data await mpw.stream.write(data) - except AsyncStreamError: + except AsyncStreamError as exc: mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to write multipart data") + raiseHttpCriticalError( + "Unable to write multipart data, reason: " & $exc.msg) proc write*(mpw: var MultiPartWriter, pbytes: pointer, nbytes: int) = ## Write part's data ``data`` to the output stream. @@ -688,16 +691,18 @@ proc write*(mpw: var MultiPartWriter, data: openArray[char]) = doAssert(mpw.state == MultiPartWriterState.PartStarted) mpw.buffer.add(data.toOpenArrayByte(0, len(data) - 1)) -proc finishPart*(mpw: MultiPartWriterRef) {.async.} = +proc finishPart*(mpw: MultiPartWriterRef) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Finish multipart's message part and send proper markers to output stream. doAssert(mpw.state == MultiPartWriterState.PartStarted) try: # write "--" await mpw.stream.write(mpw.finishPartMark) mpw.state = MultiPartWriterState.PartFinished - except AsyncStreamError: + except AsyncStreamError as exc: mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to finish multipart message part") + raiseHttpCriticalError( + "Unable to finish multipart message part, reason: " & $exc.msg) proc finishPart*(mpw: var MultiPartWriter) = ## Finish multipart's message part and send proper markers to output stream. @@ -707,7 +712,8 @@ proc finishPart*(mpw: var MultiPartWriter) = mpw.buffer.add(mpw.finishPartMark) mpw.state = MultiPartWriterState.PartFinished -proc finish*(mpw: MultiPartWriterRef) {.async.} = +proc finish*(mpw: MultiPartWriterRef) {. + async: (raises: [CancelledError, HttpCriticalError]).} = ## Finish multipart's message form and send finishing markers to the output ## stream. doAssert(mpw.kind == MultiPartSource.Stream) @@ -716,9 +722,10 @@ proc finish*(mpw: MultiPartWriterRef) {.async.} = # write "--" await mpw.stream.write(mpw.finishMark) mpw.state = MultiPartWriterState.MessageFinished - except AsyncStreamError: + except AsyncStreamError as exc: mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to finish multipart message") + raiseHttpCriticalError( + "Unable to finish multipart message, reason: " & $exc.msg) proc finish*(mpw: var MultiPartWriter): seq[byte] = ## Finish multipart's message form and send finishing markers to the output diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 030059711..2373d9580 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -6,6 +6,9 @@ # Licensed under either of # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) + +{.push raises: [].} + import httpserver import ../../asyncloop, ../../asyncsync import ../../streams/[asyncstream, tlsstream] @@ -24,7 +27,7 @@ type SecureHttpConnectionRef* = ref SecureHttpConnection -proc closeSecConnection(conn: HttpConnectionRef) {.async.} = +proc closeSecConnection(conn: HttpConnectionRef) {.async: (raises: []).} = if conn.state == HttpState.Alive: conn.state = HttpState.Closing var pending: seq[Future[void]] @@ -38,44 +41,44 @@ proc closeSecConnection(conn: HttpConnectionRef) {.async.} = untrackCounter(HttpServerSecureConnectionTrackerName) conn.state = HttpState.Closed -proc new*(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef, - transp: StreamTransport): SecureHttpConnectionRef = +proc new(ht: typedesc[SecureHttpConnectionRef], server: SecureHttpServerRef, + transp: StreamTransport): Result[SecureHttpConnectionRef, string] = var res = SecureHttpConnectionRef() HttpConnection(res[]).init(HttpServerRef(server), transp) let tlsStream = - newTLSServerAsyncStream(res.mainReader, res.mainWriter, - server.tlsPrivateKey, - server.tlsCertificate, - minVersion = TLSVersion.TLS12, - flags = server.secureFlags) + try: + newTLSServerAsyncStream(res.mainReader, res.mainWriter, + server.tlsPrivateKey, + server.tlsCertificate, + minVersion = TLSVersion.TLS12, + flags = server.secureFlags) + except TLSStreamError as exc: + return err(exc.msg) res.tlsStream = tlsStream res.reader = AsyncStreamReader(tlsStream.reader) res.writer = AsyncStreamWriter(tlsStream.writer) res.closeCb = closeSecConnection trackCounter(HttpServerSecureConnectionTrackerName) - res + ok(res) proc createSecConnection(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. - async.} = - let secureServ = cast[SecureHttpServerRef](server) - var sconn = SecureHttpConnectionRef.new(secureServ, transp) + async: (raises: [CancelledError, HttpConnectionError]).} = + let + secureServ = cast[SecureHttpServerRef](server) + sconn = SecureHttpConnectionRef.new(secureServ, transp).valueOr: + raiseHttpConnectionError(error) + try: await handshake(sconn.tlsStream) - return HttpConnectionRef(sconn) + HttpConnectionRef(sconn) except CancelledError as exc: await HttpConnectionRef(sconn).closeWait() raise exc - except TLSStreamError as exc: + except AsyncStreamError as exc: await HttpConnectionRef(sconn).closeWait() - let msg = "Unable to establish secure connection, reason [" & - $exc.msg & "]" - raiseHttpCriticalError(msg) - except CatchableError as exc: - await HttpConnectionRef(sconn).closeWait() - let msg = "Unexpected error while trying to establish secure connection, " & - "reason [" & $exc.msg & "]" - raiseHttpCriticalError(msg) + let msg = "Unable to establish secure connection, reason: " & $exc.msg + raiseHttpConnectionError(msg) proc new*(htype: typedesc[SecureHttpServerRef], address: TransportAddress, @@ -94,7 +97,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576, dualstack = DualStackType.Auto - ): HttpResult[SecureHttpServerRef] {.raises: [].} = + ): HttpResult[SecureHttpServerRef] = doAssert(not(isNil(tlsPrivateKey)), "TLS private key must not be nil!") doAssert(not(isNil(tlsCertificate)), "TLS certificate must not be nil!") @@ -114,8 +117,6 @@ proc new*(htype: typedesc[SecureHttpServerRef], backlog = backlogSize, dualstack = dualstack) except TransportOsError as exc: return err(exc.msg) - except CatchableError as exc: - return err(exc.msg) let res = SecureHttpServerRef( address: address, diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index ba7eaf0ac..a7fd96124 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -983,7 +983,7 @@ template cancel*(future: FutureBase) {. cancelSoon(future, nil, nil, getSrcLocation()) proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {. - async: (raw: true, raises: [CancelledError]).} = + async: (raw: true, raises: []).} = ## Perform cancellation ``future`` return Future which will be completed when ## ``future`` become finished (completed with value, failed or cancelled). ## @@ -1003,7 +1003,7 @@ proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {. retFuture -template cancelAndWait*(future: FutureBase): Future[void].Raising([CancelledError]) = +template cancelAndWait*(future: FutureBase): Future[void].Raising([]) = ## Cancel ``future``. cancelAndWait(future, getSrcLocation()) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 79384d2ee..20fa6ed0d 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -57,8 +57,25 @@ template init*[T, E]( res, getSrcLocation(fromProc), FutureState.Pending, flags) res +proc dig(n: NimNode): NimNode {.compileTime.} = + # Dig through the layers of type to find the raises list + if n.eqIdent("void"): + n + elif n.kind == nnkBracketExpr: + if n[0].eqIdent("tuple"): + n + elif n[0].eqIdent("typeDesc"): + dig(getType(n[1])) + else: + echo astGenRepr(n) + raiseAssert "Unkown bracket" + elif n.kind == nnkTupleConstr: + n + else: + dig(getType(getTypeInst(n))) + proc isNoRaises*(n: NimNode): bool {.compileTime.} = - n.eqIdent("void") + dig(n).eqIdent("void") iterator members(tup: NimNode): NimNode = # Given a typedesc[tuple] = (A, B, C), yields the tuple members (A, B C) @@ -79,7 +96,7 @@ proc containsSignature(members: openArray[NimNode], typ: NimNode): bool {.compil false # Utilities for working with the E part of InternalRaisesFuture - unstable -macro prepend*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = +macro prepend*(tup: typedesc, typs: varargs[typed]): typedesc = result = nnkTupleConstr.newTree() for err in typs: if not tup.members().containsSignature(err): @@ -91,7 +108,7 @@ macro prepend*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = if result.len == 0: result = makeNoRaises() -macro remove*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = +macro remove*(tup: typedesc, typs: varargs[typed]): typedesc = result = nnkTupleConstr.newTree() for err in tup.members(): if not typs[0..^1].containsSignature(err): @@ -100,7 +117,7 @@ macro remove*(tup: typedesc[tuple], typs: varargs[typed]): typedesc = if result.len == 0: result = makeNoRaises() -macro union*(tup0: typedesc[tuple], tup1: typedesc[tuple]): typedesc = +macro union*(tup0: typedesc, tup1: typedesc): typedesc = ## Join the types of the two tuples deduplicating the entries result = nnkTupleConstr.newTree() diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 58aabc38d..107bc6e68 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1832,10 +1832,21 @@ proc close*(server: StreamServer) = server.sock.closeSocket(continuation) proc closeWait*(server: StreamServer): Future[void] {. - async: (raw: true, raises: [CancelledError]).} = + async: (raw: true, raises: []).} = ## Close server ``server`` and release all resources. + let retFuture = newFuture[void]( + "stream.server.closeWait", {FutureFlag.OwnCancelSchedule}) + + proc continuation(udata: pointer) = + retFuture.complete() + server.close() - server.join() + + if not(server.loopFuture.finished()): + server.loopFuture.addCallback(continuation, cast[pointer](retFuture)) + else: + retFuture.complete() + retFuture proc getBacklogSize(backlog: int): cint = doAssert(backlog >= 0 and backlog <= high(int32)) From 28a100b1350d52668ce7b9c9ae8159fdc0acace2 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 28 Nov 2023 18:57:13 +0200 Subject: [PATCH 093/146] Fix processing callback missing asyncraises. (#479) --- chronos/apps/http/httpcommon.nim | 6 +-- chronos/apps/http/httpserver.nim | 72 ++++++++++++++++---------------- tests/testhttpclient.nim | 30 +++++++------ tests/testhttpserver.nim | 33 ++++++++------- tests/testshttpserver.nim | 4 +- 5 files changed, 77 insertions(+), 68 deletions(-) diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index d2148fbf6..3ebe3ca20 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -53,10 +53,10 @@ type HttpDefect* = object of Defect HttpError* = object of AsyncError - HttpCriticalError* = object of HttpError - code*: HttpCode - HttpRecoverableError* = object of HttpError + HttpResponseError* = object of HttpError code*: HttpCode + HttpCriticalError* = object of HttpResponseError + HttpRecoverableError* = object of HttpResponseError HttpDisconnectError* = object of HttpError HttpConnectionError* = object of HttpError HttpInterruptError* = object of HttpError diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index e8326ccd7..7d1aea0e1 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -65,7 +65,7 @@ type HttpProcessCallback* = proc(req: RequestFence): Future[HttpResponseRef] {. - gcsafe, raises: [].} + async: (raises: [CancelledError, HttpResponseError]), gcsafe.} HttpConnectionCallback* = proc(server: HttpServerRef, @@ -448,7 +448,7 @@ proc getBodyReader*(request: HttpRequestRef): HttpResult[HttpBodyReader] = err("Request do not have body available") proc handleExpect*(request: HttpRequestRef) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Handle expectation for ``Expect`` header. ## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect if HttpServerFlags.NoExpectHandler notin request.connection.server.flags: @@ -464,7 +464,7 @@ proc handleExpect*(request: HttpRequestRef) {. "Unable to send `100-continue` response, reason: " & $exc.msg) proc getBody*(request: HttpRequestRef): Future[seq[byte]] {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Obtain request's body as sequence of bytes. let bodyReader = request.getBodyReader() if bodyReader.isErr(): @@ -486,7 +486,7 @@ proc getBody*(request: HttpRequestRef): Future[seq[byte]] {. if not(isNil(reader)): await reader.closeWait() raise exc - except HttpError as exc: + except HttpCriticalError as exc: if not(isNil(reader)): await reader.closeWait() raise exc @@ -497,7 +497,7 @@ proc getBody*(request: HttpRequestRef): Future[seq[byte]] {. raiseHttpCriticalError(msg) proc consumeBody*(request: HttpRequestRef): Future[void] {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Consume/discard request's body. let bodyReader = request.getBodyReader() if bodyReader.isErr(): @@ -519,7 +519,7 @@ proc consumeBody*(request: HttpRequestRef): Future[void] {. if not(isNil(reader)): await reader.closeWait() raise exc - except HttpError as exc: + except HttpCriticalError as exc: if not(isNil(reader)): await reader.closeWait() raise exc @@ -905,10 +905,11 @@ proc getResponseFence*(connection: HttpConnectionRef, let address = connection.getRemoteAddress() ResponseFence.err(HttpProcessError.init( HttpServerError.RecoverableError, exc, address, exc.code)) - except CatchableError as exc: - let address = connection.getRemoteAddress() - ResponseFence.err(HttpProcessError.init( - HttpServerError.CatchableError, exc, address, Http503)) + except HttpResponseError as exc: + # There should be only 2 children of HttpResponseError, and all of them + # should be handled. + raiseAssert "Unexpected response error " & $exc.name & ", reason: " & + $exc.msg proc getResponseFence*(server: HttpServerRef, connFence: ConnectionFence): Future[ResponseFence] {. @@ -930,10 +931,11 @@ proc getResponseFence*(server: HttpServerRef, let address = Opt.none(TransportAddress) ResponseFence.err(HttpProcessError.init( HttpServerError.RecoverableError, exc, address, exc.code)) - except CatchableError as exc: - let address = Opt.none(TransportAddress) - ResponseFence.err(HttpProcessError.init( - HttpServerError.CatchableError, exc, address, Http503)) + except HttpResponseError as exc: + # There should be only 2 children of HttpResponseError, and all of them + # should be handled. + raiseAssert "Unexpected response error " & $exc.name & ", reason: " & + $exc.msg proc getRequestFence*(server: HttpServerRef, connection: HttpConnectionRef): Future[RequestFence] {. @@ -1161,7 +1163,7 @@ proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] = err("Request's method do not supports multipart") proc post*(req: HttpRequestRef): Future[HttpTable] {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Return POST parameters if req.postTable.isSome(): return req.postTable.get() @@ -1337,7 +1339,7 @@ proc preparePlainHeaders(resp: HttpResponseRef): string = resp.createHeaders() proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Send HTTP response at once by using bytes pointer ``pbytes`` and length ## ``nbytes``. doAssert(not(isNil(pbytes)), "pbytes must not be nil") @@ -1359,7 +1361,7 @@ proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc sendBody*(resp: HttpResponseRef, data: ByteChar) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Send HTTP response at once by using data ``data``. checkPending(resp) let responseHeaders = resp.prepareLengthHeaders(len(data)) @@ -1378,7 +1380,7 @@ proc sendBody*(resp: HttpResponseRef, data: ByteChar) {. raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Send HTTP error status response. checkPending(resp) resp.status = code @@ -1399,7 +1401,7 @@ proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {. proc prepare*(resp: HttpResponseRef, streamType = HttpResponseStreamType.Chunked) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Prepare for HTTP stream response. ## ## Such responses will be sent chunk by chunk using ``chunked`` encoding. @@ -1431,26 +1433,26 @@ proc prepare*(resp: HttpResponseRef, raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc prepareChunked*(resp: HttpResponseRef): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Prepare for HTTP chunked stream response. ## ## Such responses will be sent chunk by chunk using ``chunked`` encoding. resp.prepare(HttpResponseStreamType.Chunked) proc preparePlain*(resp: HttpResponseRef): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Prepare for HTTP plain stream response. ## ## Such responses will be sent without any encoding. resp.prepare(HttpResponseStreamType.Plain) proc prepareSSE*(resp: HttpResponseRef): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Prepare for HTTP server-side event stream response. resp.prepare(HttpResponseStreamType.SSE) proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Send single chunk of data pointed by ``pbytes`` and ``nbytes``. doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes >= 0, "nbytes should be bigger or equal to zero") @@ -1470,7 +1472,7 @@ proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) proc send*(resp: HttpResponseRef, data: ByteChar) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Send single chunk of data ``data``. if HttpResponseFlags.Stream notin resp.flags: raiseHttpCriticalError("Response was not prepared") @@ -1489,16 +1491,16 @@ proc send*(resp: HttpResponseRef, data: ByteChar) {. proc sendChunk*(resp: HttpResponseRef, pbytes: pointer, nbytes: int): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = resp.send(pbytes, nbytes) proc sendChunk*(resp: HttpResponseRef, data: ByteChar): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = resp.send(data) proc sendEvent*(resp: HttpResponseRef, eventName: string, data: string): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Send server-side event with name ``eventName`` and payload ``data`` to ## remote peer. let data = @@ -1515,7 +1517,7 @@ proc sendEvent*(resp: HttpResponseRef, eventName: string, resp.send(data) proc finish*(resp: HttpResponseRef) {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Sending last chunk of data, so it will indicate end of HTTP response. if HttpResponseFlags.Stream notin resp.flags: raiseHttpCriticalError("Response was not prepared") @@ -1534,7 +1536,7 @@ proc finish*(resp: HttpResponseRef) {. proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, headers: HttpTable): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with the specified ``HttpCode``, HTTP ``headers`` ## and ``content``. let response = req.getResponse() @@ -1546,18 +1548,18 @@ proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with specified ``HttpCode`` and ``content``. respond(req, code, content, HttpTable.init()) proc respond*(req: HttpRequestRef, code: HttpCode): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with specified ``HttpCode`` only. respond(req, code, "", HttpTable.init()) proc redirect*(req: HttpRequestRef, code: HttpCode, location: string, headers: HttpTable): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with redirection to location ``location`` and ## additional headers ``headers``. ## @@ -1569,7 +1571,7 @@ proc redirect*(req: HttpRequestRef, code: HttpCode, proc redirect*(req: HttpRequestRef, code: HttpCode, location: Uri, headers: HttpTable): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with redirection to location ``location`` and ## additional headers ``headers``. ## @@ -1579,13 +1581,13 @@ proc redirect*(req: HttpRequestRef, code: HttpCode, proc redirect*(req: HttpRequestRef, code: HttpCode, location: Uri): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with redirection to location ``location``. redirect(req, code, location, HttpTable.init()) proc redirect*(req: HttpRequestRef, code: HttpCode, location: string): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpError]).} = + async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = ## Responds to the request with redirection to location ``location``. redirect(req, code, location, HttpTable.init()) diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 1d9992f45..eb1eaacf9 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -128,7 +128,7 @@ suite "HTTP client testing suite": (MethodPatch, "/test/patch") ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -195,7 +195,7 @@ suite "HTTP client testing suite": "LONGCHUNKRESPONSE") ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -311,7 +311,7 @@ suite "HTTP client testing suite": (MethodPost, "/test/big_request", 262400) ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -381,7 +381,7 @@ suite "HTTP client testing suite": (MethodPost, "/test/big_chunk_request", 262400) ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -455,7 +455,7 @@ suite "HTTP client testing suite": ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -554,7 +554,7 @@ suite "HTTP client testing suite": ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -649,7 +649,7 @@ suite "HTTP client testing suite": var lastAddress: Uri proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -706,7 +706,7 @@ suite "HTTP client testing suite": proc testSendCancelLeaksTest(secure: bool): Future[bool] {.async.} = proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) @@ -756,7 +756,7 @@ suite "HTTP client testing suite": proc testOpenCancelLeaksTest(secure: bool): Future[bool] {.async.} = proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = return defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) @@ -867,7 +867,8 @@ suite "HTTP client testing suite": return @[(data1.status, data1.data.bytesToString(), count), (data2.status, data2.data.bytesToString(), count)] - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -1002,7 +1003,8 @@ suite "HTTP client testing suite": await request.closeWait() return (data.status, data.data.bytesToString(), 0) - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -1061,7 +1063,8 @@ suite "HTTP client testing suite": await request.closeWait() return (data.status, data.data.bytesToString(), 0) - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -1176,7 +1179,8 @@ suite "HTTP client testing suite": return false true - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() if request.uri.path.startsWith("/test/single/"): diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 85aeee5b6..33d5ea160 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -64,7 +64,7 @@ suite "HTTP server testing suite": proc testTooBigBodyChunked(operation: TooBigTest): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() try: @@ -128,7 +128,7 @@ suite "HTTP server testing suite": proc testTimeout(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) @@ -158,7 +158,7 @@ suite "HTTP server testing suite": proc testEmpty(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) @@ -188,7 +188,7 @@ suite "HTTP server testing suite": proc testTooBig(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) @@ -219,7 +219,7 @@ suite "HTTP server testing suite": proc testTooBigBody(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): discard else: @@ -266,7 +266,7 @@ suite "HTTP server testing suite": proc testQuery(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() var kres = newSeq[string]() @@ -307,7 +307,7 @@ suite "HTTP server testing suite": proc testHeaders(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() var kres = newSeq[string]() @@ -351,7 +351,7 @@ suite "HTTP server testing suite": proc testPostUrl(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() @@ -395,7 +395,7 @@ suite "HTTP server testing suite": proc testPostUrl2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() @@ -440,7 +440,7 @@ suite "HTTP server testing suite": proc testPostMultipart(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() @@ -496,7 +496,7 @@ suite "HTTP server testing suite": proc testPostMultipart2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() @@ -565,7 +565,8 @@ suite "HTTP server testing suite": var eventContinue = newAsyncEvent() var count = 0 - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() inc(count) @@ -1229,7 +1230,7 @@ suite "HTTP server testing suite": proc testPostMultipart2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() let response = request.getResponse() @@ -1304,7 +1305,8 @@ suite "HTTP server testing suite": {}, false, "close") ] - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) @@ -1357,7 +1359,8 @@ suite "HTTP server testing suite": TestsCount = 10 TestRequest = "GET /httpdebug HTTP/1.1\r\nConnection: keep-alive\r\n\r\n" - proc process(r: RequestFence): Future[HttpResponseRef] {.async.} = + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() return await request.respond(Http200, "TEST_OK", HttpTable.init()) diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index 8aacb8e43..3ff2565a0 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -108,7 +108,7 @@ suite "Secure HTTP server testing suite": proc testHTTPS(address: TransportAddress): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() serverRes = true @@ -146,7 +146,7 @@ suite "Secure HTTP server testing suite": var serverRes = false var testFut = newFuture[void]() proc process(r: RequestFence): Future[HttpResponseRef] {. - async.} = + async: (raises: [CancelledError, HttpResponseError]).} = if r.isOk(): let request = r.get() serverRes = false From 48b2b08cfbe057242997a8aee615155ea76f3744 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 1 Dec 2023 12:33:28 +0100 Subject: [PATCH 094/146] Update docs (#480) * new mdbook version with built-in Nim highlighting support * describe examples in a dedicated page * fixes --- .github/workflows/doc.yml | 4 +-- docs/src/SUMMARY.md | 6 ++++- docs/src/async_procs.md | 13 ++++++---- docs/src/examples.md | 18 +++++++++++++ docs/src/introduction.md | 30 +++++++++++++++++----- docs/theme/highlight.js | 53 --------------------------------------- 6 files changed, 57 insertions(+), 67 deletions(-) create mode 100644 docs/src/examples.md delete mode 100644 docs/theme/highlight.js diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index dc718f8de..5d4022c88 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -22,7 +22,7 @@ jobs: with: crate: mdbook use-tool-cache: true - version: "0.4.35" + version: "0.4.36" - uses: actions-rs/install@v0.1 with: crate: mdbook-toc @@ -37,7 +37,7 @@ jobs: with: crate: mdbook-admonish use-tool-cache: true - version: "1.13.1" + version: "1.14.0" - uses: jiro4989/setup-nim-action@v1 with: diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 186fadd08..4f2ee56ff 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -1,5 +1,5 @@ - [Introduction](./introduction.md) -- [Getting started](./getting_started.md) +- [Examples](./examples.md) # User guide @@ -8,3 +8,7 @@ - [Errors and exceptions](./error_handling.md) - [Tips, tricks and best practices](./tips.md) - [Porting code to `chronos`](./porting.md) + +# Developer guide + +- [Updating this book](./book.md) diff --git a/docs/src/async_procs.md b/docs/src/async_procs.md index 648f19be1..c7ee9f335 100644 --- a/docs/src/async_procs.md +++ b/docs/src/async_procs.md @@ -2,8 +2,8 @@ Async procedures are those that interact with `chronos` to cooperatively suspend and resume their execution depending on the completion of other -async procedures which themselves may be waiting for I/O to complete, timers to -expire or tasks running on other threads to complete. +async procedures, timers, tasks on other threads or asynchronous I/O scheduled +with the operating system. Async procedures are marked with the `{.async.}` pragma and return a `Future` indicating the state of the operation. @@ -25,6 +25,9 @@ echo p().type # prints "Future[system.void]" ## `await` keyword +The `await` keyword operates on `Future` instances typically returned from an +`async` procedure. + Whenever `await` is encountered inside an async procedure, control is given back to the dispatcher for as many steps as it's necessary for the awaited future to complete, fail or be cancelled. `await` calls the @@ -53,13 +56,13 @@ waitFor p3() ```admonition warning Because `async` procedures are executed concurrently, they are subject to many -of the same risks that typically accompany multithreaded programming +of the same risks that typically accompany multithreaded programming. In particular, if two `async` procedures have access to the same mutable state, the value before and after `await` might not be the same as the order of execution is not guaranteed! ``` -## Raw procedures +## Raw async procedures Raw async procedures are those that interact with `chronos` via the `Future` type but whose body does not go through the async transformation. @@ -83,7 +86,7 @@ proc rawFailure(): Future[void] {.async: (raw: true).} = fut ``` -Raw functions can also use checked exceptions: +Raw procedures can also use checked exceptions: ```nim proc rawAsyncRaises(): Future[void] {.async: (raw: true, raises: [IOError]).} = diff --git a/docs/src/examples.md b/docs/src/examples.md new file mode 100644 index 000000000..c71247c65 --- /dev/null +++ b/docs/src/examples.md @@ -0,0 +1,18 @@ +# Examples + +Examples are available in the [`docs/examples/`](https://github.com/status-im/nim-chronos/tree/master/docs/examples/) folder. + +## Basic concepts + +* [cancellation](https://github.com/status-im/nim-chronos/tree/master/docs/examples/cancellation.nim) - Cancellation primer +* [timeoutsimple](https://github.com/status-im/nim-chronos/tree/master/docs/examples/timeoutsimple.nim) - Simple timeouts +* [timeoutcomposed](https://github.com/status-im/nim-chronos/tree/master/docs/examples/examples/timeoutcomposed.nim) - Shared timeout of multiple tasks + +## TCP + +* [tcpserver](https://github.com/status-im/nim-chronos/tree/master/docs/examples/tcpserver.nim) - Simple TCP/IP v4/v6 echo server + +## HTTP + +* [httpget](https://github.com/status-im/nim-chronos/tree/master/docs/examples/httpget.nim) - Downloading a web page using the http client +* [twogets](https://github.com/status-im/nim-chronos/tree/master/docs/examples/twogets.nim) - Download two pages concurrently diff --git a/docs/src/introduction.md b/docs/src/introduction.md index 9c2a308aa..bc43686be 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -7,12 +7,34 @@ transformation features provided by Nim. Features include: * Asynchronous socket and process I/O -* HTTP server with SSL/TLS support out of the box (no OpenSSL needed) +* HTTP client / server with SSL/TLS support out of the box (no OpenSSL needed) * Synchronization primitivies like queues, events and locks -* Cancellation +* [Cancellation](./concepts.md#cancellation) * Efficient dispatch pipeline with excellent multi-platform support * Exception [effect support](./guide.md#error-handling) +## Installation + +Install `chronos` using `nimble`: + +```text +nimble install chronos +``` + +or add a dependency to your `.nimble` file: + +```text +requires "chronos" +``` + +and start using it: + +```nim +{{#include ../examples/httpget.nim}} +``` + +There are more [examples](./examples.md) throughout the manual! + ## Platform support Several platforms are supported, with different backend [options](./concepts.md#compile-time-configuration): @@ -22,10 +44,6 @@ Several platforms are supported, with different backend [options](./concepts.md# * OSX / BSD: [`kqueue`](https://en.wikipedia.org/wiki/Kqueue) / `poll` * Android / Emscripten / posix: `poll` -## Examples - -Examples are available in the [`docs/examples/`](https://github.com/status-im/nim-chronos/docs/examples) folder. - ## API documentation This guide covers basic usage of chronos - for details, see the diff --git a/docs/theme/highlight.js b/docs/theme/highlight.js deleted file mode 100644 index 3256c00ed..000000000 --- a/docs/theme/highlight.js +++ /dev/null @@ -1,53 +0,0 @@ -/* - Highlight.js 10.1.1 (93fd0d73) - License: BSD-3-Clause - Copyright (c) 2006-2020, Ivan Sagalaev -*/ -var hljs=function(){"use strict";function e(n){Object.freeze(n);var t="function"==typeof n;return Object.getOwnPropertyNames(n).forEach((function(r){!Object.hasOwnProperty.call(n,r)||null===n[r]||"object"!=typeof n[r]&&"function"!=typeof n[r]||t&&("caller"===r||"callee"===r||"arguments"===r)||Object.isFrozen(n[r])||e(n[r])})),n}class n{constructor(e){void 0===e.data&&(e.data={}),this.data=e.data}ignoreMatch(){this.ignore=!0}}function t(e){return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'")}function r(e,...n){var t={};for(const n in e)t[n]=e[n];return n.forEach((function(e){for(const n in e)t[n]=e[n]})),t}function a(e){return e.nodeName.toLowerCase()}var i=Object.freeze({__proto__:null,escapeHTML:t,inherit:r,nodeStream:function(e){var n=[];return function e(t,r){for(var i=t.firstChild;i;i=i.nextSibling)3===i.nodeType?r+=i.nodeValue.length:1===i.nodeType&&(n.push({event:"start",offset:r,node:i}),r=e(i,r),a(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:r,node:i}));return r}(e,0),n},mergeStreams:function(e,n,r){var i=0,s="",o=[];function l(){return e.length&&n.length?e[0].offset!==n[0].offset?e[0].offset"}function u(e){s+=""}function d(e){("start"===e.event?c:u)(e.node)}for(;e.length||n.length;){var g=l();if(s+=t(r.substring(i,g[0].offset)),i=g[0].offset,g===e){o.reverse().forEach(u);do{d(g.splice(0,1)[0]),g=l()}while(g===e&&g.length&&g[0].offset===i);o.reverse().forEach(c)}else"start"===g[0].event?o.push(g[0].node):o.pop(),d(g.splice(0,1)[0])}return s+t(r.substr(i))}});const s="
",o=e=>!!e.kind;class l{constructor(e,n){this.buffer="",this.classPrefix=n.classPrefix,e.walk(this)}addText(e){this.buffer+=t(e)}openNode(e){if(!o(e))return;let n=e.kind;e.sublanguage||(n=`${this.classPrefix}${n}`),this.span(n)}closeNode(e){o(e)&&(this.buffer+=s)}value(){return this.buffer}span(e){this.buffer+=``}}class c{constructor(){this.rootNode={children:[]},this.stack=[this.rootNode]}get top(){return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){this.top.children.push(e)}openNode(e){const n={kind:e,children:[]};this.add(n),this.stack.push(n)}closeNode(){if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)}walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,n){return"string"==typeof n?e.addText(n):n.children&&(e.openNode(n),n.children.forEach(n=>this._walk(e,n)),e.closeNode(n)),e}static _collapse(e){"string"!=typeof e&&e.children&&(e.children.every(e=>"string"==typeof e)?e.children=[e.children.join("")]:e.children.forEach(e=>{c._collapse(e)}))}}class u extends c{constructor(e){super(),this.options=e}addKeyword(e,n){""!==e&&(this.openNode(n),this.addText(e),this.closeNode())}addText(e){""!==e&&this.add(e)}addSublanguage(e,n){const t=e.root;t.kind=n,t.sublanguage=!0,this.add(t)}toHTML(){return new l(this,this.options).value()}finalize(){return!0}}function d(e){return e?"string"==typeof e?e:e.source:null}const g="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",h={begin:"\\\\[\\s\\S]",relevance:0},f={className:"string",begin:"'",end:"'",illegal:"\\n",contains:[h]},p={className:"string",begin:'"',end:'"',illegal:"\\n",contains:[h]},b={begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},m=function(e,n,t={}){var a=r({className:"comment",begin:e,end:n,contains:[]},t);return a.contains.push(b),a.contains.push({className:"doctag",begin:"(?:TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):",relevance:0}),a},v=m("//","$"),x=m("/\\*","\\*/"),E=m("#","$");var _=Object.freeze({__proto__:null,IDENT_RE:"[a-zA-Z]\\w*",UNDERSCORE_IDENT_RE:"[a-zA-Z_]\\w*",NUMBER_RE:"\\b\\d+(\\.\\d+)?",C_NUMBER_RE:g,BINARY_NUMBER_RE:"\\b(0b[01]+)",RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",SHEBANG:(e={})=>{const n=/^#![ ]*\//;return e.binary&&(e.begin=function(...e){return e.map(e=>d(e)).join("")}(n,/.*\b/,e.binary,/\b.*/)),r({className:"meta",begin:n,end:/$/,relevance:0,"on:begin":(e,n)=>{0!==e.index&&n.ignoreMatch()}},e)},BACKSLASH_ESCAPE:h,APOS_STRING_MODE:f,QUOTE_STRING_MODE:p,PHRASAL_WORDS_MODE:b,COMMENT:m,C_LINE_COMMENT_MODE:v,C_BLOCK_COMMENT_MODE:x,HASH_COMMENT_MODE:E,NUMBER_MODE:{className:"number",begin:"\\b\\d+(\\.\\d+)?",relevance:0},C_NUMBER_MODE:{className:"number",begin:g,relevance:0},BINARY_NUMBER_MODE:{className:"number",begin:"\\b(0b[01]+)",relevance:0},CSS_NUMBER_MODE:{className:"number",begin:"\\b\\d+(\\.\\d+)?(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{className:"regexp",begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[h,{begin:/\[/,end:/\]/,relevance:0,contains:[h]}]}]},TITLE_MODE:{className:"title",begin:"[a-zA-Z]\\w*",relevance:0},UNDERSCORE_TITLE_MODE:{className:"title",begin:"[a-zA-Z_]\\w*",relevance:0},METHOD_GUARD:{begin:"\\.\\s*[a-zA-Z_]\\w*",relevance:0},END_SAME_AS_BEGIN:function(e){return Object.assign(e,{"on:begin":(e,n)=>{n.data._beginMatch=e[1]},"on:end":(e,n)=>{n.data._beginMatch!==e[1]&&n.ignoreMatch()}})}}),N="of and for in not or if then".split(" ");function w(e,n){return n?+n:function(e){return N.includes(e.toLowerCase())}(e)?0:1}const R=t,y=r,{nodeStream:k,mergeStreams:O}=i,M=Symbol("nomatch");return function(t){var a=[],i={},s={},o=[],l=!0,c=/(^(<[^>]+>|\t|)+|\n)/gm,g="Could not find the language '{}', did you forget to load/include a language module?";const h={disableAutodetect:!0,name:"Plain text",contains:[]};var f={noHighlightRe:/^(no-?highlight)$/i,languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:null,__emitter:u};function p(e){return f.noHighlightRe.test(e)}function b(e,n,t,r){var a={code:n,language:e};S("before:highlight",a);var i=a.result?a.result:m(a.language,a.code,t,r);return i.code=a.code,S("after:highlight",i),i}function m(e,t,a,s){var o=t;function c(e,n){var t=E.case_insensitive?n[0].toLowerCase():n[0];return Object.prototype.hasOwnProperty.call(e.keywords,t)&&e.keywords[t]}function u(){null!=y.subLanguage?function(){if(""!==A){var e=null;if("string"==typeof y.subLanguage){if(!i[y.subLanguage])return void O.addText(A);e=m(y.subLanguage,A,!0,k[y.subLanguage]),k[y.subLanguage]=e.top}else e=v(A,y.subLanguage.length?y.subLanguage:null);y.relevance>0&&(I+=e.relevance),O.addSublanguage(e.emitter,e.language)}}():function(){if(!y.keywords)return void O.addText(A);let e=0;y.keywordPatternRe.lastIndex=0;let n=y.keywordPatternRe.exec(A),t="";for(;n;){t+=A.substring(e,n.index);const r=c(y,n);if(r){const[e,a]=r;O.addText(t),t="",I+=a,O.addKeyword(n[0],e)}else t+=n[0];e=y.keywordPatternRe.lastIndex,n=y.keywordPatternRe.exec(A)}t+=A.substr(e),O.addText(t)}(),A=""}function h(e){return e.className&&O.openNode(e.className),y=Object.create(e,{parent:{value:y}})}function p(e){return 0===y.matcher.regexIndex?(A+=e[0],1):(L=!0,0)}var b={};function x(t,r){var i=r&&r[0];if(A+=t,null==i)return u(),0;if("begin"===b.type&&"end"===r.type&&b.index===r.index&&""===i){if(A+=o.slice(r.index,r.index+1),!l){const n=Error("0 width match regex");throw n.languageName=e,n.badRule=b.rule,n}return 1}if(b=r,"begin"===r.type)return function(e){var t=e[0],r=e.rule;const a=new n(r),i=[r.__beforeBegin,r["on:begin"]];for(const n of i)if(n&&(n(e,a),a.ignore))return p(t);return r&&r.endSameAsBegin&&(r.endRe=RegExp(t.replace(/[-/\\^$*+?.()|[\]{}]/g,"\\$&"),"m")),r.skip?A+=t:(r.excludeBegin&&(A+=t),u(),r.returnBegin||r.excludeBegin||(A=t)),h(r),r.returnBegin?0:t.length}(r);if("illegal"===r.type&&!a){const e=Error('Illegal lexeme "'+i+'" for mode "'+(y.className||"")+'"');throw e.mode=y,e}if("end"===r.type){var s=function(e){var t=e[0],r=o.substr(e.index),a=function e(t,r,a){let i=function(e,n){var t=e&&e.exec(n);return t&&0===t.index}(t.endRe,a);if(i){if(t["on:end"]){const e=new n(t);t["on:end"](r,e),e.ignore&&(i=!1)}if(i){for(;t.endsParent&&t.parent;)t=t.parent;return t}}if(t.endsWithParent)return e(t.parent,r,a)}(y,e,r);if(!a)return M;var i=y;i.skip?A+=t:(i.returnEnd||i.excludeEnd||(A+=t),u(),i.excludeEnd&&(A=t));do{y.className&&O.closeNode(),y.skip||y.subLanguage||(I+=y.relevance),y=y.parent}while(y!==a.parent);return a.starts&&(a.endSameAsBegin&&(a.starts.endRe=a.endRe),h(a.starts)),i.returnEnd?0:t.length}(r);if(s!==M)return s}if("illegal"===r.type&&""===i)return 1;if(B>1e5&&B>3*r.index)throw Error("potential infinite loop, way more iterations than matches");return A+=i,i.length}var E=T(e);if(!E)throw console.error(g.replace("{}",e)),Error('Unknown language: "'+e+'"');var _=function(e){function n(n,t){return RegExp(d(n),"m"+(e.case_insensitive?"i":"")+(t?"g":""))}class t{constructor(){this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0}addRule(e,n){n.position=this.position++,this.matchIndexes[this.matchAt]=n,this.regexes.push([n,e]),this.matchAt+=function(e){return RegExp(e.toString()+"|").exec("").length-1}(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null);const e=this.regexes.map(e=>e[1]);this.matcherRe=n(function(e,n="|"){for(var t=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./,r=0,a="",i=0;i0&&(a+=n),a+="(";o.length>0;){var l=t.exec(o);if(null==l){a+=o;break}a+=o.substring(0,l.index),o=o.substring(l.index+l[0].length),"\\"===l[0][0]&&l[1]?a+="\\"+(+l[1]+s):(a+=l[0],"("===l[0]&&r++)}a+=")"}return a}(e),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex;const n=this.matcherRe.exec(e);if(!n)return null;const t=n.findIndex((e,n)=>n>0&&void 0!==e),r=this.matchIndexes[t];return n.splice(0,t),Object.assign(n,r)}}class a{constructor(){this.rules=[],this.multiRegexes=[],this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){if(this.multiRegexes[e])return this.multiRegexes[e];const n=new t;return this.rules.slice(e).forEach(([e,t])=>n.addRule(e,t)),n.compile(),this.multiRegexes[e]=n,n}considerAll(){this.regexIndex=0}addRule(e,n){this.rules.push([e,n]),"begin"===n.type&&this.count++}exec(e){const n=this.getMatcher(this.regexIndex);n.lastIndex=this.lastIndex;const t=n.exec(e);return t&&(this.regexIndex+=t.position+1,this.regexIndex===this.count&&(this.regexIndex=0)),t}}function i(e,n){const t=e.input[e.index-1],r=e.input[e.index+e[0].length];"."!==t&&"."!==r||n.ignoreMatch()}if(e.contains&&e.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.");return function t(s,o){const l=s;if(s.compiled)return l;s.compiled=!0,s.__beforeBegin=null,s.keywords=s.keywords||s.beginKeywords;let c=null;if("object"==typeof s.keywords&&(c=s.keywords.$pattern,delete s.keywords.$pattern),s.keywords&&(s.keywords=function(e,n){var t={};return"string"==typeof e?r("keyword",e):Object.keys(e).forEach((function(n){r(n,e[n])})),t;function r(e,r){n&&(r=r.toLowerCase()),r.split(" ").forEach((function(n){var r=n.split("|");t[r[0]]=[e,w(r[0],r[1])]}))}}(s.keywords,e.case_insensitive)),s.lexemes&&c)throw Error("ERR: Prefer `keywords.$pattern` to `mode.lexemes`, BOTH are not allowed. (see mode reference) ");return l.keywordPatternRe=n(s.lexemes||c||/\w+/,!0),o&&(s.beginKeywords&&(s.begin="\\b("+s.beginKeywords.split(" ").join("|")+")(?=\\b|\\s)",s.__beforeBegin=i),s.begin||(s.begin=/\B|\b/),l.beginRe=n(s.begin),s.endSameAsBegin&&(s.end=s.begin),s.end||s.endsWithParent||(s.end=/\B|\b/),s.end&&(l.endRe=n(s.end)),l.terminator_end=d(s.end)||"",s.endsWithParent&&o.terminator_end&&(l.terminator_end+=(s.end?"|":"")+o.terminator_end)),s.illegal&&(l.illegalRe=n(s.illegal)),void 0===s.relevance&&(s.relevance=1),s.contains||(s.contains=[]),s.contains=[].concat(...s.contains.map((function(e){return function(e){return e.variants&&!e.cached_variants&&(e.cached_variants=e.variants.map((function(n){return r(e,{variants:null},n)}))),e.cached_variants?e.cached_variants:function e(n){return!!n&&(n.endsWithParent||e(n.starts))}(e)?r(e,{starts:e.starts?r(e.starts):null}):Object.isFrozen(e)?r(e):e}("self"===e?s:e)}))),s.contains.forEach((function(e){t(e,l)})),s.starts&&t(s.starts,o),l.matcher=function(e){const n=new a;return e.contains.forEach(e=>n.addRule(e.begin,{rule:e,type:"begin"})),e.terminator_end&&n.addRule(e.terminator_end,{type:"end"}),e.illegal&&n.addRule(e.illegal,{type:"illegal"}),n}(l),l}(e)}(E),N="",y=s||_,k={},O=new f.__emitter(f);!function(){for(var e=[],n=y;n!==E;n=n.parent)n.className&&e.unshift(n.className);e.forEach(e=>O.openNode(e))}();var A="",I=0,S=0,B=0,L=!1;try{for(y.matcher.considerAll();;){B++,L?L=!1:(y.matcher.lastIndex=S,y.matcher.considerAll());const e=y.matcher.exec(o);if(!e)break;const n=x(o.substring(S,e.index),e);S=e.index+n}return x(o.substr(S)),O.closeAllNodes(),O.finalize(),N=O.toHTML(),{relevance:I,value:N,language:e,illegal:!1,emitter:O,top:y}}catch(n){if(n.message&&n.message.includes("Illegal"))return{illegal:!0,illegalBy:{msg:n.message,context:o.slice(S-100,S+100),mode:n.mode},sofar:N,relevance:0,value:R(o),emitter:O};if(l)return{illegal:!1,relevance:0,value:R(o),emitter:O,language:e,top:y,errorRaised:n};throw n}}function v(e,n){n=n||f.languages||Object.keys(i);var t=function(e){const n={relevance:0,emitter:new f.__emitter(f),value:R(e),illegal:!1,top:h};return n.emitter.addText(e),n}(e),r=t;return n.filter(T).filter(I).forEach((function(n){var a=m(n,e,!1);a.language=n,a.relevance>r.relevance&&(r=a),a.relevance>t.relevance&&(r=t,t=a)})),r.language&&(t.second_best=r),t}function x(e){return f.tabReplace||f.useBR?e.replace(c,e=>"\n"===e?f.useBR?"
":e:f.tabReplace?e.replace(/\t/g,f.tabReplace):e):e}function E(e){let n=null;const t=function(e){var n=e.className+" ";n+=e.parentNode?e.parentNode.className:"";const t=f.languageDetectRe.exec(n);if(t){var r=T(t[1]);return r||(console.warn(g.replace("{}",t[1])),console.warn("Falling back to no-highlight mode for this block.",e)),r?t[1]:"no-highlight"}return n.split(/\s+/).find(e=>p(e)||T(e))}(e);if(p(t))return;S("before:highlightBlock",{block:e,language:t}),f.useBR?(n=document.createElement("div")).innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n"):n=e;const r=n.textContent,a=t?b(t,r,!0):v(r),i=k(n);if(i.length){const e=document.createElement("div");e.innerHTML=a.value,a.value=O(i,k(e),r)}a.value=x(a.value),S("after:highlightBlock",{block:e,result:a}),e.innerHTML=a.value,e.className=function(e,n,t){var r=n?s[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),e.includes(r)||a.push(r),a.join(" ").trim()}(e.className,t,a.language),e.result={language:a.language,re:a.relevance,relavance:a.relevance},a.second_best&&(e.second_best={language:a.second_best.language,re:a.second_best.relevance,relavance:a.second_best.relevance})}const N=()=>{if(!N.called){N.called=!0;var e=document.querySelectorAll("pre code");a.forEach.call(e,E)}};function T(e){return e=(e||"").toLowerCase(),i[e]||i[s[e]]}function A(e,{languageName:n}){"string"==typeof e&&(e=[e]),e.forEach(e=>{s[e]=n})}function I(e){var n=T(e);return n&&!n.disableAutodetect}function S(e,n){var t=e;o.forEach((function(e){e[t]&&e[t](n)}))}Object.assign(t,{highlight:b,highlightAuto:v,fixMarkup:x,highlightBlock:E,configure:function(e){f=y(f,e)},initHighlighting:N,initHighlightingOnLoad:function(){window.addEventListener("DOMContentLoaded",N,!1)},registerLanguage:function(e,n){var r=null;try{r=n(t)}catch(n){if(console.error("Language definition for '{}' could not be registered.".replace("{}",e)),!l)throw n;console.error(n),r=h}r.name||(r.name=e),i[e]=r,r.rawDefinition=n.bind(null,t),r.aliases&&A(r.aliases,{languageName:e})},listLanguages:function(){return Object.keys(i)},getLanguage:T,registerAliases:A,requireLanguage:function(e){var n=T(e);if(n)return n;throw Error("The '{}' language is required, but not loaded.".replace("{}",e))},autoDetection:I,inherit:y,addPlugin:function(e){o.push(e)}}),t.debugMode=function(){l=!1},t.safeMode=function(){l=!0},t.versionString="10.1.1";for(const n in _)"object"==typeof _[n]&&e(_[n]);return Object.assign(t,_),t}({})}();"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); -hljs.registerLanguage("apache",function(){"use strict";return function(e){var n={className:"number",begin:"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?"};return{name:"Apache config",aliases:["apacheconf"],case_insensitive:!0,contains:[e.HASH_COMMENT_MODE,{className:"section",begin:"",contains:[n,{className:"number",begin:":\\d{1,5}"},e.inherit(e.QUOTE_STRING_MODE,{relevance:0})]},{className:"attribute",begin:/\w+/,relevance:0,keywords:{nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{end:/$/,relevance:0,keywords:{literal:"on off all deny allow"},contains:[{className:"meta",begin:"\\s\\[",end:"\\]$"},{className:"variable",begin:"[\\$%]\\{",end:"\\}",contains:["self",{className:"number",begin:"[\\$%]\\d+"}]},n,{className:"number",begin:"\\d+"},e.QUOTE_STRING_MODE]}}],illegal:/\S/}}}()); -hljs.registerLanguage("bash",function(){"use strict";return function(e){const s={};Object.assign(s,{className:"variable",variants:[{begin:/\$[\w\d#@][\w\d_]*/},{begin:/\$\{/,end:/\}/,contains:[{begin:/:-/,contains:[s]}]}]});const t={className:"subst",begin:/\$\(/,end:/\)/,contains:[e.BACKSLASH_ESCAPE]},n={className:"string",begin:/"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,s,t]};t.contains.push(n);const a={begin:/\$\(\(/,end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},e.NUMBER_MODE,s]},i=e.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10}),c={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{name:"Bash",aliases:["sh","zsh"],keywords:{$pattern:/\b-?[a-z\._]+\b/,keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},contains:[i,e.SHEBANG(),c,a,e.HASH_COMMENT_MODE,n,{className:"",begin:/\\"/},{className:"string",begin:/'/,end:/'/},s]}}}()); -hljs.registerLanguage("c-like",function(){"use strict";return function(e){function t(e){return"(?:"+e+")?"}var n="(decltype\\(auto\\)|"+t("[a-zA-Z_]\\w*::")+"[a-zA-Z_]\\w*"+t("<.*?>")+")",r={className:"keyword",begin:"\\b[a-z\\d_]*_t\\b"},a={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},i={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],relevance:0},s={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{"meta-keyword":"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(a,{className:"meta-string"}),{className:"meta-string",begin:/<.*?>/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},o={className:"title",begin:t("[a-zA-Z_]\\w*::")+e.IDENT_RE,relevance:0},c=t("[a-zA-Z_]\\w*::")+e.IDENT_RE+"\\s*\\(",l={keyword:"int float while private char char8_t char16_t char32_t catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid wchar_t short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignas alignof constexpr consteval constinit decltype concept co_await co_return co_yield requires noexcept static_assert thread_local restrict final override atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and and_eq bitand bitor compl not not_eq or or_eq xor xor_eq",built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr _Bool complex _Complex imaginary _Imaginary",literal:"true false nullptr NULL"},d=[r,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,i,a],_={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/}],keywords:l,contains:d.concat([{begin:/\(/,end:/\)/,keywords:l,contains:d.concat(["self"]),relevance:0}]),relevance:0},u={className:"function",begin:"("+n+"[\\*&\\s]+)+"+c,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:l,illegal:/[^\w\s\*&:<>]/,contains:[{begin:"decltype\\(auto\\)",keywords:l,relevance:0},{begin:c,returnBegin:!0,contains:[o],relevance:0},{className:"params",begin:/\(/,end:/\)/,keywords:l,relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,i,r,{begin:/\(/,end:/\)/,keywords:l,relevance:0,contains:["self",e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,i,r]}]},r,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,s]};return{aliases:["c","cc","h","c++","h++","hpp","hh","hxx","cxx"],keywords:l,disableAutodetect:!0,illegal:"",keywords:l,contains:["self",r]},{begin:e.IDENT_RE+"::",keywords:l},{className:"class",beginKeywords:"class struct",end:/[{;:]/,contains:[{begin://,contains:["self"]},e.TITLE_MODE]}]),exports:{preprocessor:s,strings:a,keywords:l}}}}()); -hljs.registerLanguage("c",function(){"use strict";return function(e){var n=e.getLanguage("c-like").rawDefinition();return n.name="C",n.aliases=["c","h"],n}}()); -hljs.registerLanguage("coffeescript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);return function(r){var t={keyword:e.concat(["then","unless","until","loop","by","when","and","or","is","isnt","not"]).filter((e=>n=>!e.includes(n))(["var","const","let","function","static"])).join(" "),literal:n.concat(["yes","no","on","off"]).join(" "),built_in:a.concat(["npm","print"]).join(" ")},i="[A-Za-z$_][0-9A-Za-z$_]*",s={className:"subst",begin:/#\{/,end:/}/,keywords:t},o=[r.BINARY_NUMBER_MODE,r.inherit(r.C_NUMBER_MODE,{starts:{end:"(\\s*/)?",relevance:0}}),{className:"string",variants:[{begin:/'''/,end:/'''/,contains:[r.BACKSLASH_ESCAPE]},{begin:/'/,end:/'/,contains:[r.BACKSLASH_ESCAPE]},{begin:/"""/,end:/"""/,contains:[r.BACKSLASH_ESCAPE,s]},{begin:/"/,end:/"/,contains:[r.BACKSLASH_ESCAPE,s]}]},{className:"regexp",variants:[{begin:"///",end:"///",contains:[s,r.HASH_COMMENT_MODE]},{begin:"//[gim]{0,3}(?=\\W)",relevance:0},{begin:/\/(?![ *]).*?(?![\\]).\/[gim]{0,3}(?=\W)/}]},{begin:"@"+i},{subLanguage:"javascript",excludeBegin:!0,excludeEnd:!0,variants:[{begin:"```",end:"```"},{begin:"`",end:"`"}]}];s.contains=o;var c=r.inherit(r.TITLE_MODE,{begin:i}),l={className:"params",begin:"\\([^\\(]",returnBegin:!0,contains:[{begin:/\(/,end:/\)/,keywords:t,contains:["self"].concat(o)}]};return{name:"CoffeeScript",aliases:["coffee","cson","iced"],keywords:t,illegal:/\/\*/,contains:o.concat([r.COMMENT("###","###"),r.HASH_COMMENT_MODE,{className:"function",begin:"^\\s*"+i+"\\s*=\\s*(\\(.*\\))?\\s*\\B[-=]>",end:"[-=]>",returnBegin:!0,contains:[c,l]},{begin:/[:\(,=]\s*/,relevance:0,contains:[{className:"function",begin:"(\\(.*\\))?\\s*\\B[-=]>",end:"[-=]>",returnBegin:!0,contains:[l]}]},{className:"class",beginKeywords:"class",end:"$",illegal:/[:="\[\]]/,contains:[{beginKeywords:"extends",endsWithParent:!0,illegal:/[:="\[\]]/,contains:[c]},c]},{begin:i+":",end:":",returnBegin:!0,returnEnd:!0,relevance:0}])}}}()); -hljs.registerLanguage("cpp",function(){"use strict";return function(e){var t=e.getLanguage("c-like").rawDefinition();return t.disableAutodetect=!1,t.name="C++",t.aliases=["cc","c++","h++","hpp","hh","hxx","cxx"],t}}()); -hljs.registerLanguage("csharp",function(){"use strict";return function(e){var n={keyword:"abstract as base bool break byte case catch char checked const continue decimal default delegate do double enum event explicit extern finally fixed float for foreach goto if implicit in int interface internal is lock long object operator out override params private protected public readonly ref sbyte sealed short sizeof stackalloc static string struct switch this try typeof uint ulong unchecked unsafe ushort using virtual void volatile while add alias ascending async await by descending dynamic equals from get global group into join let nameof on orderby partial remove select set value var when where yield",literal:"null false true"},i=e.inherit(e.TITLE_MODE,{begin:"[a-zA-Z](\\.?\\w)*"}),a={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],relevance:0},s={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}]},t=e.inherit(s,{illegal:/\n/}),l={className:"subst",begin:"{",end:"}",keywords:n},r=e.inherit(l,{illegal:/\n/}),c={className:"string",begin:/\$"/,end:'"',illegal:/\n/,contains:[{begin:"{{"},{begin:"}}"},e.BACKSLASH_ESCAPE,r]},o={className:"string",begin:/\$@"/,end:'"',contains:[{begin:"{{"},{begin:"}}"},{begin:'""'},l]},g=e.inherit(o,{illegal:/\n/,contains:[{begin:"{{"},{begin:"}}"},{begin:'""'},r]});l.contains=[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.C_BLOCK_COMMENT_MODE],r.contains=[g,c,t,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.inherit(e.C_BLOCK_COMMENT_MODE,{illegal:/\n/})];var d={variants:[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},E={begin:"<",end:">",contains:[{beginKeywords:"in out"},i]},_=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",b={begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"],keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0,contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{begin:"\x3c!--|--\x3e"},{begin:""}]}]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#",end:"$",keywords:{"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum"}},d,a,{beginKeywords:"class interface",end:/[{;=]/,illegal:/[^\s:,]/,contains:[{beginKeywords:"where class"},i,E,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"namespace",end:/[{;=]/,illegal:/[^\s:]/,contains:[i,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta",begin:"^\\s*\\[",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{className:"meta-string",begin:/"/,end:/"/}]},{beginKeywords:"new return throw await else",relevance:0},{className:"function",begin:"("+_+"\\s+)+"+e.IDENT_RE+"\\s*(\\<.+\\>)?\\s*\\(",returnBegin:!0,end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{begin:e.IDENT_RE+"\\s*(\\<.+\\>)?\\s*\\(",returnBegin:!0,contains:[e.TITLE_MODE,E],relevance:0},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0,contains:[d,a,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},b]}}}()); -hljs.registerLanguage("css",function(){"use strict";return function(e){var n={begin:/(?:[A-Z\_\.\-]+|--[a-zA-Z0-9_-]+)\s*:/,returnBegin:!0,end:";",endsWithParent:!0,contains:[{className:"attribute",begin:/\S/,end:":",excludeEnd:!0,starts:{endsWithParent:!0,excludeEnd:!0,contains:[{begin:/[\w-]+\(/,returnBegin:!0,contains:[{className:"built_in",begin:/[\w-]+/},{begin:/\(/,end:/\)/,contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.CSS_NUMBER_MODE]}]},e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,e.C_BLOCK_COMMENT_MODE,{className:"number",begin:"#[0-9A-Fa-f]+"},{className:"meta",begin:"!important"}]}}]};return{name:"CSS",case_insensitive:!0,illegal:/[=\/|'\$]/,contains:[e.C_BLOCK_COMMENT_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/},{className:"selector-class",begin:/\.[A-Za-z0-9_-]+/},{className:"selector-attr",begin:/\[/,end:/\]/,illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},{className:"selector-pseudo",begin:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{begin:"@(page|font-face)",lexemes:"@[a-z-]+",keywords:"@page @font-face"},{begin:"@",end:"[{;]",illegal:/:/,returnBegin:!0,contains:[{className:"keyword",begin:/@\-?\w[\w]*(\-\w+)*/},{begin:/\s/,endsWithParent:!0,excludeEnd:!0,relevance:0,keywords:"and or not only",contains:[{begin:/[a-z-]+:/,className:"attribute"},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.CSS_NUMBER_MODE]}]},{className:"selector-tag",begin:"[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0},{begin:"{",end:"}",illegal:/\S/,contains:[e.C_BLOCK_COMMENT_MODE,n]}]}}}()); -hljs.registerLanguage("diff",function(){"use strict";return function(e){return{name:"Diff",aliases:["patch"],contains:[{className:"meta",relevance:10,variants:[{begin:/^@@ +\-\d+,\d+ +\+\d+,\d+ +@@$/},{begin:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{begin:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{className:"comment",variants:[{begin:/Index: /,end:/$/},{begin:/={3,}/,end:/$/},{begin:/^\-{3}/,end:/$/},{begin:/^\*{3} /,end:/$/},{begin:/^\+{3}/,end:/$/},{begin:/^\*{15}$/}]},{className:"addition",begin:"^\\+",end:"$"},{className:"deletion",begin:"^\\-",end:"$"},{className:"addition",begin:"^\\!",end:"$"}]}}}()); -hljs.registerLanguage("go",function(){"use strict";return function(e){var n={keyword:"break default func interface select case map struct chan else goto package switch const fallthrough if range type continue for import return var go defer bool byte complex64 complex128 float32 float64 int8 int16 int32 int64 string uint8 uint16 uint32 uint64 int uint uintptr rune",literal:"true false iota nil",built_in:"append cap close complex copy imag len make new panic print println real recover delete"};return{name:"Go",aliases:["golang"],keywords:n,illegal:"e(n)).join("")}return function(a){var s={className:"number",relevance:0,variants:[{begin:/([\+\-]+)?[\d]+_[\d_]+/},{begin:a.NUMBER_RE}]},i=a.COMMENT();i.variants=[{begin:/;/,end:/$/},{begin:/#/,end:/$/}];var t={className:"variable",variants:[{begin:/\$[\w\d"][\w\d_]*/},{begin:/\$\{(.*?)}/}]},r={className:"literal",begin:/\bon|off|true|false|yes|no\b/},l={className:"string",contains:[a.BACKSLASH_ESCAPE],variants:[{begin:"'''",end:"'''",relevance:10},{begin:'"""',end:'"""',relevance:10},{begin:'"',end:'"'},{begin:"'",end:"'"}]},c={begin:/\[/,end:/\]/,contains:[i,r,t,l,s,"self"],relevance:0},g="("+[/[A-Za-z0-9_-]+/,/"(\\"|[^"])*"/,/'[^']*'/].map(n=>e(n)).join("|")+")";return{name:"TOML, also INI",aliases:["toml"],case_insensitive:!0,illegal:/\S/,contains:[i,{className:"section",begin:/\[+/,end:/\]+/},{begin:n(g,"(\\s*\\.\\s*",g,")*",n("(?=",/\s*=\s*[^#\s]/,")")),className:"attr",starts:{end:/$/,contains:[i,c,r,t,l,s]}}]}}}()); -hljs.registerLanguage("java",function(){"use strict";function e(e){return e?"string"==typeof e?e:e.source:null}function n(e){return a("(",e,")?")}function a(...n){return n.map(n=>e(n)).join("")}function s(...n){return"("+n.map(n=>e(n)).join("|")+")"}return function(e){var t="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",i={className:"meta",begin:"@[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*",contains:[{begin:/\(/,end:/\)/,contains:["self"]}]},r=e=>a("[",e,"]+([",e,"_]*[",e,"]+)?"),c={className:"number",variants:[{begin:`\\b(0[bB]${r("01")})[lL]?`},{begin:`\\b(0${r("0-7")})[dDfFlL]?`},{begin:a(/\b0[xX]/,s(a(r("a-fA-F0-9"),/\./,r("a-fA-F0-9")),a(r("a-fA-F0-9"),/\.?/),a(/\./,r("a-fA-F0-9"))),/([pP][+-]?(\d+))?/,/[fFdDlL]?/)},{begin:a(/\b/,s(a(/\d*\./,r("\\d")),r("\\d")),/[eE][+-]?[\d]+[dDfF]?/)},{begin:a(/\b/,r(/\d/),n(/\.?/),n(r(/\d/)),/[dDfFlL]?/)}],relevance:0};return{name:"Java",aliases:["jsp"],keywords:t,illegal:/<\/|#/,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/,relevance:0},{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"class",beginKeywords:"class interface",end:/[{;=]/,excludeEnd:!0,keywords:"class interface",illegal:/[:"\[\]]/,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"new throw return else",relevance:0},{className:"function",begin:"([À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(<[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(\\s*,\\s*[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*)*>)?\\s+)+"+e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:t,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"params",begin:/\(/,end:/\)/,keywords:t,relevance:0,contains:[i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},c,i]}}}()); -hljs.registerLanguage("javascript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);function s(e){return r("(?=",e,")")}function r(...e){return e.map(e=>(function(e){return e?"string"==typeof e?e:e.source:null})(e)).join("")}return function(t){var i="[A-Za-z$_][0-9A-Za-z$_]*",c={begin:/<[A-Za-z0-9\\._:-]+/,end:/\/[A-Za-z0-9\\._:-]+>|\/>/},o={$pattern:"[A-Za-z$_][0-9A-Za-z$_]*",keyword:e.join(" "),literal:n.join(" "),built_in:a.join(" ")},l={className:"number",variants:[{begin:"\\b(0[bB][01]+)n?"},{begin:"\\b(0[oO][0-7]+)n?"},{begin:t.C_NUMBER_RE+"n?"}],relevance:0},E={className:"subst",begin:"\\$\\{",end:"\\}",keywords:o,contains:[]},d={begin:"html`",end:"",starts:{end:"`",returnEnd:!1,contains:[t.BACKSLASH_ESCAPE,E],subLanguage:"xml"}},g={begin:"css`",end:"",starts:{end:"`",returnEnd:!1,contains:[t.BACKSLASH_ESCAPE,E],subLanguage:"css"}},u={className:"string",begin:"`",end:"`",contains:[t.BACKSLASH_ESCAPE,E]};E.contains=[t.APOS_STRING_MODE,t.QUOTE_STRING_MODE,d,g,u,l,t.REGEXP_MODE];var b=E.contains.concat([{begin:/\(/,end:/\)/,contains:["self"].concat(E.contains,[t.C_BLOCK_COMMENT_MODE,t.C_LINE_COMMENT_MODE])},t.C_BLOCK_COMMENT_MODE,t.C_LINE_COMMENT_MODE]),_={className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,contains:b};return{name:"JavaScript",aliases:["js","jsx","mjs","cjs"],keywords:o,contains:[t.SHEBANG({binary:"node",relevance:5}),{className:"meta",relevance:10,begin:/^\s*['"]use (strict|asm)['"]/},t.APOS_STRING_MODE,t.QUOTE_STRING_MODE,d,g,u,t.C_LINE_COMMENT_MODE,t.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+",contains:[{className:"type",begin:"\\{",end:"\\}",relevance:0},{className:"variable",begin:i+"(?=\\s*(-)|$)",endsParent:!0,relevance:0},{begin:/(?=[^\n])\s/,relevance:0}]}]}),t.C_BLOCK_COMMENT_MODE,l,{begin:r(/[{,\n]\s*/,s(r(/(((\/\/.*)|(\/\*(.|\n)*\*\/))\s*)*/,i+"\\s*:"))),relevance:0,contains:[{className:"attr",begin:i+s("\\s*:"),relevance:0}]},{begin:"("+t.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",contains:[t.C_LINE_COMMENT_MODE,t.C_BLOCK_COMMENT_MODE,t.REGEXP_MODE,{className:"function",begin:"(\\([^(]*(\\([^(]*(\\([^(]*\\))?\\))?\\)|"+t.UNDERSCORE_IDENT_RE+")\\s*=>",returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:t.UNDERSCORE_IDENT_RE},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:o,contains:b}]}]},{begin:/,/,relevance:0},{className:"",begin:/\s/,end:/\s*/,skip:!0},{variants:[{begin:"<>",end:""},{begin:c.begin,end:c.end}],subLanguage:"xml",contains:[{begin:c.begin,end:c.end,skip:!0,contains:["self"]}]}],relevance:0},{className:"function",beginKeywords:"function",end:/\{/,excludeEnd:!0,contains:[t.inherit(t.TITLE_MODE,{begin:i}),_],illegal:/\[|%/},{begin:/\$[(.]/},t.METHOD_GUARD,{className:"class",beginKeywords:"class",end:/[{;=]/,excludeEnd:!0,illegal:/[:"\[\]]/,contains:[{beginKeywords:"extends"},t.UNDERSCORE_TITLE_MODE]},{beginKeywords:"constructor",end:/\{/,excludeEnd:!0},{begin:"(get|set)\\s+(?="+i+"\\()",end:/{/,keywords:"get set",contains:[t.inherit(t.TITLE_MODE,{begin:i}),{begin:/\(\)/},_]}],illegal:/#(?!!)/}}}()); -hljs.registerLanguage("json",function(){"use strict";return function(n){var e={literal:"true false null"},i=[n.C_LINE_COMMENT_MODE,n.C_BLOCK_COMMENT_MODE],t=[n.QUOTE_STRING_MODE,n.C_NUMBER_MODE],a={end:",",endsWithParent:!0,excludeEnd:!0,contains:t,keywords:e},l={begin:"{",end:"}",contains:[{className:"attr",begin:/"/,end:/"/,contains:[n.BACKSLASH_ESCAPE],illegal:"\\n"},n.inherit(a,{begin:/:/})].concat(i),illegal:"\\S"},s={begin:"\\[",end:"\\]",contains:[n.inherit(a)],illegal:"\\S"};return t.push(l,s),i.forEach((function(n){t.push(n)})),{name:"JSON",contains:t,keywords:e,illegal:"\\S"}}}()); -hljs.registerLanguage("kotlin",function(){"use strict";return function(e){var n={keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual trait volatile transient native default",built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing",literal:"true false null"},a={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@"},i={className:"subst",begin:"\\${",end:"}",contains:[e.C_NUMBER_MODE]},s={className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},t={className:"string",variants:[{begin:'"""',end:'"""(?=[^"])',contains:[s,i]},{begin:"'",end:"'",illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/,contains:[e.BACKSLASH_ESCAPE,s,i]}]};i.contains.push(t);var r={className:"meta",begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?"},l={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/,end:/\)/,contains:[e.inherit(t,{className:"meta-string"})]}]},c=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),o={variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/,contains:[]}]},d=o;return d.variants[1].contains=[o],o.variants[1].contains=[d],{name:"Kotlin",aliases:["kt"],keywords:n,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,c,{className:"keyword",begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol",begin:/@\w+/}]}},a,r,l,{className:"function",beginKeywords:"fun",end:"[(]|$",returnBegin:!0,excludeEnd:!0,keywords:n,illegal:/fun\s+(<.*>)?[^\s\(]+(\s+[^\s\(]+)\s*=/,relevance:5,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://,keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/,endsWithParent:!0,contains:[o,e.C_LINE_COMMENT_MODE,c],relevance:0},e.C_LINE_COMMENT_MODE,c,r,l,t,e.C_NUMBER_MODE]},c]},{className:"class",beginKeywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0,illegal:"extends implements",contains:[{beginKeywords:"public protected internal private constructor"},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,]|$/,excludeBegin:!0,returnEnd:!0},r,l]},t,{className:"meta",begin:"^#!/usr/bin/env",end:"$",illegal:"\n"},{className:"number",begin:"\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",relevance:0}]}}}()); -hljs.registerLanguage("less",function(){"use strict";return function(e){var n="([\\w-]+|@{[\\w-]+})",a=[],s=[],t=function(e){return{className:"string",begin:"~?"+e+".*?"+e}},r=function(e,n,a){return{className:e,begin:n,relevance:a}},i={begin:"\\(",end:"\\)",contains:s,relevance:0};s.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,t("'"),t('"'),e.CSS_NUMBER_MODE,{begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]",excludeEnd:!0}},r("number","#[0-9A-Fa-f]+\\b"),i,r("variable","@@?[\\w-]+",10),r("variable","@{[\\w-]+}"),r("built_in","~?`[^`]*?`"),{className:"attribute",begin:"[\\w-]+\\s*:",end:":",returnBegin:!0,excludeEnd:!0},{className:"meta",begin:"!important"});var c=s.concat({begin:"{",end:"}",contains:a}),l={beginKeywords:"when",endsWithParent:!0,contains:[{beginKeywords:"and not"}].concat(s)},o={begin:n+"\\s*:",returnBegin:!0,end:"[;}]",relevance:0,contains:[{className:"attribute",begin:n,end:":",excludeEnd:!0,starts:{endsWithParent:!0,illegal:"[<=$]",relevance:0,contains:s}}]},g={className:"keyword",begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b",starts:{end:"[;{}]",returnEnd:!0,contains:s,relevance:0}},d={className:"variable",variants:[{begin:"@[\\w-]+\\s*:",relevance:15},{begin:"@[\\w-]+"}],starts:{end:"[;}]",returnEnd:!0,contains:c}},b={variants:[{begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:n,end:"{"}],returnBegin:!0,returnEnd:!0,illegal:"[<='$\"]",relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,l,r("keyword","all\\b"),r("variable","@{[\\w-]+}"),r("selector-tag",n+"%?",0),r("selector-id","#"+n),r("selector-class","\\."+n,0),r("selector-tag","&",0),{className:"selector-attr",begin:"\\[",end:"\\]"},{className:"selector-pseudo",begin:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{begin:"\\(",end:"\\)",contains:c},{begin:"!important"}]};return a.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,g,d,o,b),{name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:a}}}()); -hljs.registerLanguage("lua",function(){"use strict";return function(e){var t={begin:"\\[=*\\[",end:"\\]=*\\]",contains:["self"]},a=[e.COMMENT("--(?!\\[=*\\[)","$"),e.COMMENT("--\\[=*\\[","\\]=*\\]",{contains:[t],relevance:10})];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE,literal:"true false nil",keyword:"and break do else elseif end for goto if in local not or repeat return then until while",built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove"},contains:a.concat([{className:"function",beginKeywords:"function",end:"\\)",contains:[e.inherit(e.TITLE_MODE,{begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params",begin:"\\(",endsWithParent:!0,contains:a}].concat(a)},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string",begin:"\\[=*\\[",end:"\\]=*\\]",contains:[t],relevance:5}])}}}()); -hljs.registerLanguage("makefile",function(){"use strict";return function(e){var i={className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)",contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%`]+/}]}]}]};return{name:"HTML, XML",aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg"],case_insensitive:!0,contains:[{className:"meta",begin:"",relevance:10,contains:[a,i,t,s,{begin:"\\[",end:"\\]",contains:[{className:"meta",begin:"",contains:[a,s,i,t]}]}]},e.COMMENT("\x3c!--","--\x3e",{relevance:10}),{begin:"<\\!\\[CDATA\\[",end:"\\]\\]>",relevance:10},n,{className:"meta",begin:/<\?xml/,end:/\?>/,relevance:10},{className:"tag",begin:")",end:">",keywords:{name:"style"},contains:[c],starts:{end:"",returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag",begin:")",end:">",keywords:{name:"script"},contains:[c],starts:{end:"<\/script>",returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{className:"tag",begin:"",contains:[{className:"name",begin:/[^\/><\s]+/,relevance:0},c]}]}}}()); -hljs.registerLanguage("markdown",function(){"use strict";return function(n){const e={begin:"<",end:">",subLanguage:"xml",relevance:0},a={begin:"\\[.+?\\][\\(\\[].*?[\\)\\]]",returnBegin:!0,contains:[{className:"string",begin:"\\[",end:"\\]",excludeBegin:!0,returnEnd:!0,relevance:0},{className:"link",begin:"\\]\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0},{className:"symbol",begin:"\\]\\[",end:"\\]",excludeBegin:!0,excludeEnd:!0}],relevance:10},i={className:"strong",contains:[],variants:[{begin:/_{2}/,end:/_{2}/},{begin:/\*{2}/,end:/\*{2}/}]},s={className:"emphasis",contains:[],variants:[{begin:/\*(?!\*)/,end:/\*/},{begin:/_(?!_)/,end:/_/,relevance:0}]};i.contains.push(s),s.contains.push(i);var c=[e,a];return i.contains=i.contains.concat(c),s.contains=s.contains.concat(c),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:c=c.concat(i,s)},{begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n",contains:c}]}]},e,{className:"bullet",begin:"^[ \t]*([*+-]|(\\d+\\.))(?=\\s+)",end:"\\s+",excludeEnd:!0},i,s,{className:"quote",begin:"^>\\s+",contains:c,end:"$"},{className:"code",variants:[{begin:"(`{3,})(.|\\n)*?\\1`*[ ]*"},{begin:"(~{3,})(.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))",contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0}]},{begin:"^[-\\*]{3,}",end:"$"},a,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0}]}]}}}()); -hljs.registerLanguage("nginx",function(){"use strict";return function(e){var n={className:"variable",variants:[{begin:/\$\d+/},{begin:/\$\{/,end:/}/},{begin:"[\\$\\@]"+e.UNDERSCORE_IDENT_RE}]},a={endsWithParent:!0,keywords:{$pattern:"[a-z/_]+",literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},relevance:0,illegal:"=>",contains:[e.HASH_COMMENT_MODE,{className:"string",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:/"/,end:/"/},{begin:/'/,end:/'/}]},{begin:"([a-z]+):/",end:"\\s",endsWithParent:!0,excludeEnd:!0,contains:[n]},{className:"regexp",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:"\\s\\^",end:"\\s|{|;",returnEnd:!0},{begin:"~\\*?\\s+",end:"\\s|{|;",returnEnd:!0},{begin:"\\*(\\.[a-z\\-]+)+"},{begin:"([a-z\\-]+\\.)+\\*"}]},{className:"number",begin:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{className:"number",begin:"\\b\\d+[kKmMgGdshdwy]*\\b",relevance:0},n]};return{name:"Nginx config",aliases:["nginxconf"],contains:[e.HASH_COMMENT_MODE,{begin:e.UNDERSCORE_IDENT_RE+"\\s+{",returnBegin:!0,end:"{",contains:[{className:"section",begin:e.UNDERSCORE_IDENT_RE}],relevance:0},{begin:e.UNDERSCORE_IDENT_RE+"\\s",end:";|{",returnBegin:!0,contains:[{className:"attribute",begin:e.UNDERSCORE_IDENT_RE,starts:a}],relevance:0}],illegal:"[^\\s\\}]"}}}()); -hljs.registerLanguage("objectivec",function(){"use strict";return function(e){var n=/[a-zA-Z@][a-zA-Z0-9_]*/,_={$pattern:n,keyword:"@interface @class @protocol @implementation"};return{name:"Objective-C",aliases:["mm","objc","obj-c"],keywords:{$pattern:n,keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN",literal:"false true FALSE TRUE nil YES NO NULL",built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once"},illegal:"/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"class",begin:"("+_.keyword.split(" ").join("|")+")\\b",end:"({|$)",excludeEnd:!0,keywords:_,contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"\\."+e.UNDERSCORE_IDENT_RE,relevance:0}]}}}()); -hljs.registerLanguage("perl",function(){"use strict";return function(e){var n={$pattern:/[\w.]+/,keyword:"getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qq fileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmget sub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedir ioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when"},t={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:n},s={begin:"->{",end:"}"},r={variants:[{begin:/\$\d/},{begin:/[\$%@](\^\w\b|#\w+(::\w+)*|{\w+}|\w+(::\w*)*)/},{begin:/[\$%@][^\s\w{]/,relevance:0}]},i=[e.BACKSLASH_ESCAPE,t,r],a=[r,e.HASH_COMMENT_MODE,e.COMMENT("^\\=\\w","\\=cut",{endsWithParent:!0}),s,{className:"string",contains:i,variants:[{begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[",end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*\\<",end:"\\>",relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'",contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`",contains:[e.BACKSLASH_ESCAPE]},{begin:"{\\w+}",contains:[],relevance:0},{begin:"-?\\w+\\s*\\=\\>",contains:[],relevance:0}]},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*",keywords:"split return print reverse grep",relevance:0,contains:[e.HASH_COMMENT_MODE,{className:"regexp",begin:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",relevance:10},{className:"regexp",begin:"(m|qr)?/",end:"/[a-z]*",contains:[e.BACKSLASH_ESCAPE],relevance:0}]},{className:"function",beginKeywords:"sub",end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$",subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}]}];return t.contains=a,s.contains=a,{name:"Perl",aliases:["pl","pm"],keywords:n,contains:a}}}()); -hljs.registerLanguage("php",function(){"use strict";return function(e){var r={begin:"\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*"},t={className:"meta",variants:[{begin:/<\?php/,relevance:10},{begin:/<\?[=]?/},{begin:/\?>/}]},a={className:"string",contains:[e.BACKSLASH_ESCAPE,t],variants:[{begin:'b"',end:'"'},{begin:"b'",end:"'"},e.inherit(e.APOS_STRING_MODE,{illegal:null}),e.inherit(e.QUOTE_STRING_MODE,{illegal:null})]},n={variants:[e.BINARY_NUMBER_MODE,e.C_NUMBER_MODE]},i={keyword:"__CLASS__ __DIR__ __FILE__ __FUNCTION__ __LINE__ __METHOD__ __NAMESPACE__ __TRAIT__ die echo exit include include_once print require require_once array abstract and as binary bool boolean break callable case catch class clone const continue declare default do double else elseif empty enddeclare endfor endforeach endif endswitch endwhile eval extends final finally float for foreach from global goto if implements instanceof insteadof int integer interface isset iterable list new object or private protected public real return string switch throw trait try unset use var void while xor yield",literal:"false null true",built_in:"Error|0 AppendIterator ArgumentCountError ArithmeticError ArrayIterator ArrayObject AssertionError BadFunctionCallException BadMethodCallException CachingIterator CallbackFilterIterator CompileError Countable DirectoryIterator DivisionByZeroError DomainException EmptyIterator ErrorException Exception FilesystemIterator FilterIterator GlobIterator InfiniteIterator InvalidArgumentException IteratorIterator LengthException LimitIterator LogicException MultipleIterator NoRewindIterator OutOfBoundsException OutOfRangeException OuterIterator OverflowException ParentIterator ParseError RangeException RecursiveArrayIterator RecursiveCachingIterator RecursiveCallbackFilterIterator RecursiveDirectoryIterator RecursiveFilterIterator RecursiveIterator RecursiveIteratorIterator RecursiveRegexIterator RecursiveTreeIterator RegexIterator RuntimeException SeekableIterator SplDoublyLinkedList SplFileInfo SplFileObject SplFixedArray SplHeap SplMaxHeap SplMinHeap SplObjectStorage SplObserver SplObserver SplPriorityQueue SplQueue SplStack SplSubject SplSubject SplTempFileObject TypeError UnderflowException UnexpectedValueException ArrayAccess Closure Generator Iterator IteratorAggregate Serializable Throwable Traversable WeakReference Directory __PHP_Incomplete_Class parent php_user_filter self static stdClass"};return{aliases:["php","php3","php4","php5","php6","php7"],case_insensitive:!0,keywords:i,contains:[e.HASH_COMMENT_MODE,e.COMMENT("//","$",{contains:[t]}),e.COMMENT("/\\*","\\*/",{contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.COMMENT("__halt_compiler.+?;",!1,{endsWithParent:!0,keywords:"__halt_compiler"}),{className:"string",begin:/<<<['"]?\w+['"]?$/,end:/^\w+;?$/,contains:[e.BACKSLASH_ESCAPE,{className:"subst",variants:[{begin:/\$\w+/},{begin:/\{\$/,end:/\}/}]}]},t,{className:"keyword",begin:/\$this\b/},r,{begin:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{className:"function",beginKeywords:"fn function",end:/[;{]/,excludeEnd:!0,illegal:"[$%\\[]",contains:[e.UNDERSCORE_TITLE_MODE,{className:"params",begin:"\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0,keywords:i,contains:["self",r,e.C_BLOCK_COMMENT_MODE,a,n]}]},{className:"class",beginKeywords:"class interface",end:"{",excludeEnd:!0,illegal:/[:\(\$"]/,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"namespace",end:";",illegal:/[\.']/,contains:[e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"use",end:";",contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"=>"},a,n]}}}()); -hljs.registerLanguage("php-template",function(){"use strict";return function(n){return{name:"PHP template",subLanguage:"xml",contains:[{begin:/<\?(php|=)?/,end:/\?>/,subLanguage:"php",contains:[{begin:"/\\*",end:"\\*/",skip:!0},{begin:'b"',end:'"',skip:!0},{begin:"b'",end:"'",skip:!0},n.inherit(n.APOS_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),n.inherit(n.QUOTE_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0})]}]}}}()); -hljs.registerLanguage("plaintext",function(){"use strict";return function(t){return{name:"Plain text",aliases:["text","txt"],disableAutodetect:!0}}}()); -hljs.registerLanguage("properties",function(){"use strict";return function(e){var n="[ \\t\\f]*",t="("+n+"[:=]"+n+"|[ \\t\\f]+)",a="([^\\\\:= \\t\\f\\n]|\\\\.)+",s={end:t,relevance:0,starts:{className:"string",end:/$/,relevance:0,contains:[{begin:"\\\\\\n"}]}};return{name:".properties",case_insensitive:!0,illegal:/\S/,contains:[e.COMMENT("^\\s*[!#]","$"),{begin:"([^\\\\\\W:= \\t\\f\\n]|\\\\.)+"+t,returnBegin:!0,contains:[{className:"attr",begin:"([^\\\\\\W:= \\t\\f\\n]|\\\\.)+",endsParent:!0,relevance:0}],starts:s},{begin:a+t,returnBegin:!0,relevance:0,contains:[{className:"meta",begin:a,endsParent:!0,relevance:0}],starts:s},{className:"attr",relevance:0,begin:a+n+"$"}]}}}()); -hljs.registerLanguage("python",function(){"use strict";return function(e){var n={keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10",built_in:"Ellipsis NotImplemented",literal:"False None True"},a={className:"meta",begin:/^(>>>|\.\.\.) /},i={className:"subst",begin:/\{/,end:/\}/,keywords:n,illegal:/#/},s={begin:/\{\{/,relevance:0},r={className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:/(u|b)?r?'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{begin:/(u|b)?r?"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{begin:/(fr|rf|f)'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/(fr|rf|f)"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/(u|r|ur)'/,end:/'/,relevance:10},{begin:/(u|r|ur)"/,end:/"/,relevance:10},{begin:/(b|br)'/,end:/'/},{begin:/(b|br)"/,end:/"/},{begin:/(fr|rf|f)'/,end:/'/,contains:[e.BACKSLASH_ESCAPE,s,i]},{begin:/(fr|rf|f)"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,s,i]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},l={className:"number",relevance:0,variants:[{begin:e.BINARY_NUMBER_RE+"[lLjJ]?"},{begin:"\\b(0o[0-7]+)[lLjJ]?"},{begin:e.C_NUMBER_RE+"[lLjJ]?"}]},t={className:"params",variants:[{begin:/\(\s*\)/,skip:!0,className:null},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,contains:["self",a,l,r,e.HASH_COMMENT_MODE]}]};return i.contains=[r,l,a],{name:"Python",aliases:["py","gyp","ipython"],keywords:n,illegal:/(<\/|->|\?)|=>/,contains:[a,l,{beginKeywords:"if",relevance:0},r,e.HASH_COMMENT_MODE,{variants:[{className:"function",beginKeywords:"def"},{className:"class",beginKeywords:"class"}],end:/:/,illegal:/[${=;\n,]/,contains:[e.UNDERSCORE_TITLE_MODE,t,{begin:/->/,endsWithParent:!0,keywords:"None"}]},{className:"meta",begin:/^[\t ]*@/,end:/$/},{begin:/\b(print|exec)\(/}]}}}()); -hljs.registerLanguage("python-repl",function(){"use strict";return function(n){return{aliases:["pycon"],contains:[{className:"meta",starts:{end:/ |$/,starts:{end:"$",subLanguage:"python"}},variants:[{begin:/^>>>(?=[ ]|$)/},{begin:/^\.\.\.(?=[ ]|$)/}]}]}}}()); -hljs.registerLanguage("ruby",function(){"use strict";return function(e){var n="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?",a={keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor",literal:"true false nil"},s={className:"doctag",begin:"@[A-Za-z]+"},i={begin:"#<",end:">"},r=[e.COMMENT("#","$",{contains:[s]}),e.COMMENT("^\\=begin","^\\=end",{contains:[s],relevance:10}),e.COMMENT("^__END__","\\n$")],c={className:"subst",begin:"#\\{",end:"}",keywords:a},t={className:"string",contains:[e.BACKSLASH_ESCAPE,c],variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{begin:"%[qQwWx]?\\(",end:"\\)"},{begin:"%[qQwWx]?\\[",end:"\\]"},{begin:"%[qQwWx]?{",end:"}"},{begin:"%[qQwWx]?<",end:">"},{begin:"%[qQwWx]?/",end:"/"},{begin:"%[qQwWx]?%",end:"%"},{begin:"%[qQwWx]?-",end:"-"},{begin:"%[qQwWx]?\\|",end:"\\|"},{begin:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/},{begin:/<<[-~]?'?(\w+)(?:.|\n)*?\n\s*\1\b/,returnBegin:!0,contains:[{begin:/<<[-~]?'?/},e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,contains:[e.BACKSLASH_ESCAPE,c]})]}]},b={className:"params",begin:"\\(",end:"\\)",endsParent:!0,keywords:a},d=[t,i,{className:"class",beginKeywords:"class module",end:"$|;",illegal:/=/,contains:[e.inherit(e.TITLE_MODE,{begin:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{begin:"<\\s*",contains:[{begin:"("+e.IDENT_RE+"::)?"+e.IDENT_RE}]}].concat(r)},{className:"function",beginKeywords:"def",end:"$|;",contains:[e.inherit(e.TITLE_MODE,{begin:n}),b].concat(r)},{begin:e.IDENT_RE+"::"},{className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"(\\!|\\?)?:",relevance:0},{className:"symbol",begin:":(?!\\s)",contains:[t,{begin:n}],relevance:0},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{className:"params",begin:/\|/,end:/\|/,keywords:a},{begin:"("+e.RE_STARTERS_RE+"|unless)\\s*",keywords:"unless",contains:[i,{className:"regexp",contains:[e.BACKSLASH_ESCAPE,c],illegal:/\n/,variants:[{begin:"/",end:"/[a-z]*"},{begin:"%r{",end:"}[a-z]*"},{begin:"%r\\(",end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[",end:"\\][a-z]*"}]}].concat(r),relevance:0}].concat(r);c.contains=d,b.contains=d;var g=[{begin:/^\s*=>/,starts:{end:"$",contains:d}},{className:"meta",begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+>|(\\w+-)?\\d+\\.\\d+\\.\\d(p\\d+)?[^>]+>)",starts:{end:"$",contains:d}}];return{name:"Ruby",aliases:["rb","gemspec","podspec","thor","irb"],keywords:a,illegal:/\/\*/,contains:r.concat(g).concat(d)}}}()); -hljs.registerLanguage("rust",function(){"use strict";return function(e){var n="([ui](8|16|32|64|128|size)|f(32|64))?",t="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!";return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?",keyword:"abstract as async await become box break const continue crate do dyn else enum extern false final fn for if impl in let loop macro match mod move mut override priv pub ref return self Self static struct super trait true try type typeof unsafe unsized use virtual where while yield",literal:"true false Some None Ok Err",built_in:t},illegal:""}]}}}()); -hljs.registerLanguage("scss",function(){"use strict";return function(e){var t={className:"variable",begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b"},i={className:"number",begin:"#[0-9A-Fa-f]+"};return e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,e.C_BLOCK_COMMENT_MODE,{name:"SCSS",case_insensitive:!0,illegal:"[=/|']",contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"selector-id",begin:"\\#[A-Za-z0-9_-]+",relevance:0},{className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0},{className:"selector-attr",begin:"\\[",end:"\\]",illegal:"$"},{className:"selector-tag",begin:"\\b(a|abbr|acronym|address|area|article|aside|audio|b|base|big|blockquote|body|br|button|canvas|caption|cite|code|col|colgroup|command|datalist|dd|del|details|dfn|div|dl|dt|em|embed|fieldset|figcaption|figure|footer|form|frame|frameset|(h[1-6])|head|header|hgroup|hr|html|i|iframe|img|input|ins|kbd|keygen|label|legend|li|link|map|mark|meta|meter|nav|noframes|noscript|object|ol|optgroup|option|output|p|param|pre|progress|q|rp|rt|ruby|samp|script|section|select|small|span|strike|strong|style|sub|sup|table|tbody|td|textarea|tfoot|th|thead|time|title|tr|tt|ul|var|video)\\b",relevance:0},{className:"selector-pseudo",begin:":(visited|valid|root|right|required|read-write|read-only|out-range|optional|only-of-type|only-child|nth-of-type|nth-last-of-type|nth-last-child|nth-child|not|link|left|last-of-type|last-child|lang|invalid|indeterminate|in-range|hover|focus|first-of-type|first-line|first-letter|first-child|first|enabled|empty|disabled|default|checked|before|after|active)"},{className:"selector-pseudo",begin:"::(after|before|choices|first-letter|first-line|repeat-index|repeat-item|selection|value)"},t,{className:"attribute",begin:"\\b(src|z-index|word-wrap|word-spacing|word-break|width|widows|white-space|visibility|vertical-align|unicode-bidi|transition-timing-function|transition-property|transition-duration|transition-delay|transition|transform-style|transform-origin|transform|top|text-underline-position|text-transform|text-shadow|text-rendering|text-overflow|text-indent|text-decoration-style|text-decoration-line|text-decoration-color|text-decoration|text-align-last|text-align|tab-size|table-layout|right|resize|quotes|position|pointer-events|perspective-origin|perspective|page-break-inside|page-break-before|page-break-after|padding-top|padding-right|padding-left|padding-bottom|padding|overflow-y|overflow-x|overflow-wrap|overflow|outline-width|outline-style|outline-offset|outline-color|outline|orphans|order|opacity|object-position|object-fit|normal|none|nav-up|nav-right|nav-left|nav-index|nav-down|min-width|min-height|max-width|max-height|mask|marks|margin-top|margin-right|margin-left|margin-bottom|margin|list-style-type|list-style-position|list-style-image|list-style|line-height|letter-spacing|left|justify-content|initial|inherit|ime-mode|image-orientation|image-resolution|image-rendering|icon|hyphens|height|font-weight|font-variant-ligatures|font-variant|font-style|font-stretch|font-size-adjust|font-size|font-language-override|font-kerning|font-feature-settings|font-family|font|float|flex-wrap|flex-shrink|flex-grow|flex-flow|flex-direction|flex-basis|flex|filter|empty-cells|display|direction|cursor|counter-reset|counter-increment|content|column-width|column-span|column-rule-width|column-rule-style|column-rule-color|column-rule|column-gap|column-fill|column-count|columns|color|clip-path|clip|clear|caption-side|break-inside|break-before|break-after|box-sizing|box-shadow|box-decoration-break|bottom|border-width|border-top-width|border-top-style|border-top-right-radius|border-top-left-radius|border-top-color|border-top|border-style|border-spacing|border-right-width|border-right-style|border-right-color|border-right|border-radius|border-left-width|border-left-style|border-left-color|border-left|border-image-width|border-image-source|border-image-slice|border-image-repeat|border-image-outset|border-image|border-color|border-collapse|border-bottom-width|border-bottom-style|border-bottom-right-radius|border-bottom-left-radius|border-bottom-color|border-bottom|border|background-size|background-repeat|background-position|background-origin|background-image|background-color|background-clip|background-attachment|background-blend-mode|background|backface-visibility|auto|animation-timing-function|animation-play-state|animation-name|animation-iteration-count|animation-fill-mode|animation-duration|animation-direction|animation-delay|animation|align-self|align-items|align-content)\\b",illegal:"[^\\s]"},{begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b"},{begin:":",end:";",contains:[t,i,e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,{className:"meta",begin:"!important"}]},{begin:"@(page|font-face)",lexemes:"@[a-z-]+",keywords:"@page @font-face"},{begin:"@",end:"[{;]",returnBegin:!0,keywords:"and or not only",contains:[{begin:"@[a-z-]+",className:"keyword"},t,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,i,e.CSS_NUMBER_MODE]}]}}}()); -hljs.registerLanguage("shell",function(){"use strict";return function(s){return{name:"Shell Session",aliases:["console"],contains:[{className:"meta",begin:"^\\s{0,3}[/\\w\\d\\[\\]()@-]*[>%$#]",starts:{end:"$",subLanguage:"bash"}}]}}}()); -hljs.registerLanguage("sql",function(){"use strict";return function(e){var t=e.COMMENT("--","$");return{name:"SQL",case_insensitive:!0,illegal:/[<>{}*]/,contains:[{beginKeywords:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke comment values with",end:/;/,endsWithParent:!0,keywords:{$pattern:/[\w\.]+/,keyword:"as abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias all allocate allow alter always analyze ancillary and anti any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound bucket buffer_cache buffer_pool build bulk by byte byteordermark bytes cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain explode export export_set extended extent external external_1 external_2 externally extract failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force foreign form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour hours http id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists keep keep_duplicates key keys kill language large last last_day last_insert_id last_value lateral lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minutes minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notnull notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second seconds section securefile security seed segment select self semi sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime table tables tablespace tablesample tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unnest unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace window with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek",literal:"true false null unknown",built_in:"array bigint binary bit blob bool boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text time timestamp tinyint varchar varchar2 varying void"},contains:[{className:"string",begin:"'",end:"'",contains:[{begin:"''"}]},{className:"string",begin:'"',end:'"',contains:[{begin:'""'}]},{className:"string",begin:"`",end:"`"},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,t,e.HASH_COMMENT_MODE]},e.C_BLOCK_COMMENT_MODE,t,e.HASH_COMMENT_MODE]}}}()); -hljs.registerLanguage("swift",function(){"use strict";return function(e){var i={keyword:"#available #colorLiteral #column #else #elseif #endif #file #fileLiteral #function #if #imageLiteral #line #selector #sourceLocation _ __COLUMN__ __FILE__ __FUNCTION__ __LINE__ Any as as! as? associatedtype associativity break case catch class continue convenience default defer deinit didSet do dynamic dynamicType else enum extension fallthrough false fileprivate final for func get guard if import in indirect infix init inout internal is lazy left let mutating nil none nonmutating open operator optional override postfix precedence prefix private protocol Protocol public repeat required rethrows return right self Self set static struct subscript super switch throw throws true try try! try? Type typealias unowned var weak where while willSet",literal:"true false nil",built_in:"abs advance alignof alignofValue anyGenerator assert assertionFailure bridgeFromObjectiveC bridgeFromObjectiveCUnconditional bridgeToObjectiveC bridgeToObjectiveCUnconditional c compactMap contains count countElements countLeadingZeros debugPrint debugPrintln distance dropFirst dropLast dump encodeBitsAsWords enumerate equal fatalError filter find getBridgedObjectiveCType getVaList indices insertionSort isBridgedToObjectiveC isBridgedVerbatimToObjectiveC isUniquelyReferenced isUniquelyReferencedNonObjC join lazy lexicographicalCompare map max maxElement min minElement numericCast overlaps partition posix precondition preconditionFailure print println quickSort readLine reduce reflect reinterpretCast reverse roundUpToAlignment sizeof sizeofValue sort split startsWith stride strideof strideofValue swap toString transcode underestimateCount unsafeAddressOf unsafeBitCast unsafeDowncast unsafeUnwrap unsafeReflect withExtendedLifetime withObjectAtPlusZero withUnsafePointer withUnsafePointerToObject withUnsafeMutablePointer withUnsafeMutablePointers withUnsafePointer withUnsafePointers withVaList zip"},n=e.COMMENT("/\\*","\\*/",{contains:["self"]}),t={className:"subst",begin:/\\\(/,end:"\\)",keywords:i,contains:[]},a={className:"string",contains:[e.BACKSLASH_ESCAPE,t],variants:[{begin:/"""/,end:/"""/},{begin:/"/,end:/"/}]},r={className:"number",begin:"\\b([\\d_]+(\\.[\\deE_]+)?|0x[a-fA-F0-9_]+(\\.[a-fA-F0-9p_]+)?|0b[01_]+|0o[0-7_]+)\\b",relevance:0};return t.contains=[r],{name:"Swift",keywords:i,contains:[a,e.C_LINE_COMMENT_MODE,n,{className:"type",begin:"\\b[A-Z][\\wÀ-ʸ']*[!?]"},{className:"type",begin:"\\b[A-Z][\\wÀ-ʸ']*",relevance:0},r,{className:"function",beginKeywords:"func",end:"{",excludeEnd:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/[A-Za-z$_][0-9A-Za-z$_]*/}),{begin://},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:i,contains:["self",r,a,e.C_BLOCK_COMMENT_MODE,{begin:":"}],illegal:/["']/}],illegal:/\[|%/},{className:"class",beginKeywords:"struct protocol class extension enum",keywords:i,end:"\\{",excludeEnd:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/})]},{className:"meta",begin:"(@discardableResult|@warn_unused_result|@exported|@lazy|@noescape|@NSCopying|@NSManaged|@objc|@objcMembers|@convention|@required|@noreturn|@IBAction|@IBDesignable|@IBInspectable|@IBOutlet|@infix|@prefix|@postfix|@autoclosure|@testable|@available|@nonobjc|@NSApplicationMain|@UIApplicationMain|@dynamicMemberLookup|@propertyWrapper)\\b"},{beginKeywords:"import",end:/$/,contains:[e.C_LINE_COMMENT_MODE,n]}]}}}()); -hljs.registerLanguage("typescript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);return function(r){var t={$pattern:"[A-Za-z$_][0-9A-Za-z$_]*",keyword:e.concat(["type","namespace","typedef","interface","public","private","protected","implements","declare","abstract","readonly"]).join(" "),literal:n.join(" "),built_in:a.concat(["any","void","number","boolean","string","object","never","enum"]).join(" ")},s={className:"meta",begin:"@[A-Za-z$_][0-9A-Za-z$_]*"},i={className:"number",variants:[{begin:"\\b(0[bB][01]+)n?"},{begin:"\\b(0[oO][0-7]+)n?"},{begin:r.C_NUMBER_RE+"n?"}],relevance:0},o={className:"subst",begin:"\\$\\{",end:"\\}",keywords:t,contains:[]},c={begin:"html`",end:"",starts:{end:"`",returnEnd:!1,contains:[r.BACKSLASH_ESCAPE,o],subLanguage:"xml"}},l={begin:"css`",end:"",starts:{end:"`",returnEnd:!1,contains:[r.BACKSLASH_ESCAPE,o],subLanguage:"css"}},E={className:"string",begin:"`",end:"`",contains:[r.BACKSLASH_ESCAPE,o]};o.contains=[r.APOS_STRING_MODE,r.QUOTE_STRING_MODE,c,l,E,i,r.REGEXP_MODE];var d={begin:"\\(",end:/\)/,keywords:t,contains:["self",r.QUOTE_STRING_MODE,r.APOS_STRING_MODE,r.NUMBER_MODE]},u={className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:t,contains:[r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,s,d]};return{name:"TypeScript",aliases:["ts"],keywords:t,contains:[r.SHEBANG(),{className:"meta",begin:/^\s*['"]use strict['"]/},r.APOS_STRING_MODE,r.QUOTE_STRING_MODE,c,l,E,r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,i,{begin:"("+r.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",contains:[r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,r.REGEXP_MODE,{className:"function",begin:"(\\([^(]*(\\([^(]*(\\([^(]*\\))?\\))?\\)|"+r.UNDERSCORE_IDENT_RE+")\\s*=>",returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:r.UNDERSCORE_IDENT_RE},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:t,contains:d.contains}]}]}],relevance:0},{className:"function",beginKeywords:"function",end:/[\{;]/,excludeEnd:!0,keywords:t,contains:["self",r.inherit(r.TITLE_MODE,{begin:"[A-Za-z$_][0-9A-Za-z$_]*"}),u],illegal:/%/,relevance:0},{beginKeywords:"constructor",end:/[\{;]/,excludeEnd:!0,contains:["self",u]},{begin:/module\./,keywords:{built_in:"module"},relevance:0},{beginKeywords:"module",end:/\{/,excludeEnd:!0},{beginKeywords:"interface",end:/\{/,excludeEnd:!0,keywords:"interface extends"},{begin:/\$[(.]/},{begin:"\\."+r.IDENT_RE,relevance:0},s,d]}}}()); -hljs.registerLanguage("yaml",function(){"use strict";return function(e){var n="true false yes no null",a="[\\w#;/?:@&=+$,.~*\\'()[\\]]+",s={className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/\S+/}],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable",variants:[{begin:"{{",end:"}}"},{begin:"%{",end:"}"}]}]},i=e.inherit(s,{variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/}]}),l={end:",",endsWithParent:!0,excludeEnd:!0,contains:[],keywords:n,relevance:0},t={begin:"{",end:"}",contains:[l],illegal:"\\n",relevance:0},g={begin:"\\[",end:"\\]",contains:[l],illegal:"\\n",relevance:0},b=[{className:"attr",variants:[{begin:"\\w[\\w :\\/.-]*:(?=[ \t]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ \t]|$)'},{begin:"'\\w[\\w :\\/.-]*':(?=[ \t]|$)"}]},{className:"meta",begin:"^---s*$",relevance:10},{className:"string",begin:"[\\|>]([0-9]?[+-])?[ ]*\\n( *)[\\S ]+\\n(\\2[\\S ]+\\n?)*"},{begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:"!\\w+!"+a},{className:"type",begin:"!<"+a+">"},{className:"type",begin:"!"+a},{className:"type",begin:"!!"+a},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta",begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"\\-(?=[ ]|$)",relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{className:"number",begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b"},{className:"number",begin:e.C_NUMBER_RE+"\\b"},t,g,s],c=[...b];return c.pop(),c.push(i),l.contains=c,{name:"YAML",case_insensitive:!0,aliases:["yml","YAML"],contains:b}}}()); -hljs.registerLanguage("armasm",function(){"use strict";return function(s){const e={variants:[s.COMMENT("^[ \\t]*(?=#)","$",{relevance:0,excludeBegin:!0}),s.COMMENT("[;@]","$",{relevance:0}),s.C_LINE_COMMENT_MODE,s.C_BLOCK_COMMENT_MODE]};return{name:"ARM Assembly",case_insensitive:!0,aliases:["arm"],keywords:{$pattern:"\\.?"+s.IDENT_RE,meta:".2byte .4byte .align .ascii .asciz .balign .byte .code .data .else .end .endif .endm .endr .equ .err .exitm .extern .global .hword .if .ifdef .ifndef .include .irp .long .macro .rept .req .section .set .skip .space .text .word .arm .thumb .code16 .code32 .force_thumb .thumb_func .ltorg ALIAS ALIGN ARM AREA ASSERT ATTR CN CODE CODE16 CODE32 COMMON CP DATA DCB DCD DCDU DCDO DCFD DCFDU DCI DCQ DCQU DCW DCWU DN ELIF ELSE END ENDFUNC ENDIF ENDP ENTRY EQU EXPORT EXPORTAS EXTERN FIELD FILL FUNCTION GBLA GBLL GBLS GET GLOBAL IF IMPORT INCBIN INCLUDE INFO KEEP LCLA LCLL LCLS LTORG MACRO MAP MEND MEXIT NOFP OPT PRESERVE8 PROC QN READONLY RELOC REQUIRE REQUIRE8 RLIST FN ROUT SETA SETL SETS SN SPACE SUBT THUMB THUMBX TTL WHILE WEND ",built_in:"r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 pc lr sp ip sl sb fp a1 a2 a3 a4 v1 v2 v3 v4 v5 v6 v7 v8 f0 f1 f2 f3 f4 f5 f6 f7 p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 cpsr_c cpsr_x cpsr_s cpsr_f cpsr_cx cpsr_cxs cpsr_xs cpsr_xsf cpsr_sf cpsr_cxsf spsr_c spsr_x spsr_s spsr_f spsr_cx spsr_cxs spsr_xs spsr_xsf spsr_sf spsr_cxsf s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 {PC} {VAR} {TRUE} {FALSE} {OPT} {CONFIG} {ENDIAN} {CODESIZE} {CPU} {FPU} {ARCHITECTURE} {PCSTOREOFFSET} {ARMASM_VERSION} {INTER} {ROPI} {RWPI} {SWST} {NOSWST} . @"},contains:[{className:"keyword",begin:"\\b(adc|(qd?|sh?|u[qh]?)?add(8|16)?|usada?8|(q|sh?|u[qh]?)?(as|sa)x|and|adrl?|sbc|rs[bc]|asr|b[lx]?|blx|bxj|cbn?z|tb[bh]|bic|bfc|bfi|[su]bfx|bkpt|cdp2?|clz|clrex|cmp|cmn|cpsi[ed]|cps|setend|dbg|dmb|dsb|eor|isb|it[te]{0,3}|lsl|lsr|ror|rrx|ldm(([id][ab])|f[ds])?|ldr((s|ex)?[bhd])?|movt?|mvn|mra|mar|mul|[us]mull|smul[bwt][bt]|smu[as]d|smmul|smmla|mla|umlaal|smlal?([wbt][bt]|d)|mls|smlsl?[ds]|smc|svc|sev|mia([bt]{2}|ph)?|mrr?c2?|mcrr2?|mrs|msr|orr|orn|pkh(tb|bt)|rbit|rev(16|sh)?|sel|[su]sat(16)?|nop|pop|push|rfe([id][ab])?|stm([id][ab])?|str(ex)?[bhd]?|(qd?)?sub|(sh?|q|u[qh]?)?sub(8|16)|[su]xt(a?h|a?b(16)?)|srs([id][ab])?|swpb?|swi|smi|tst|teq|wfe|wfi|yield)(eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|hs|lo)?[sptrx]?(?=\\s)"},e,s.QUOTE_STRING_MODE,{className:"string",begin:"'",end:"[^\\\\]'",relevance:0},{className:"title",begin:"\\|",end:"\\|",illegal:"\\n",relevance:0},{className:"number",variants:[{begin:"[#$=]?0x[0-9a-f]+"},{begin:"[#$=]?0b[01]+"},{begin:"[#$=]\\d+"},{begin:"\\b\\d+"}],relevance:0},{className:"symbol",variants:[{begin:"^[ \\t]*[a-z_\\.\\$][a-z0-9_\\.\\$]+:"},{begin:"^[a-z_\\.\\$][a-z0-9_\\.\\$]+"},{begin:"[=#]\\w+"}],relevance:0}]}}}()); -hljs.registerLanguage("d",function(){"use strict";return function(e){var a={$pattern:e.UNDERSCORE_IDENT_RE,keyword:"abstract alias align asm assert auto body break byte case cast catch class const continue debug default delete deprecated do else enum export extern final finally for foreach foreach_reverse|10 goto if immutable import in inout int interface invariant is lazy macro mixin module new nothrow out override package pragma private protected public pure ref return scope shared static struct super switch synchronized template this throw try typedef typeid typeof union unittest version void volatile while with __FILE__ __LINE__ __gshared|10 __thread __traits __DATE__ __EOF__ __TIME__ __TIMESTAMP__ __VENDOR__ __VERSION__",built_in:"bool cdouble cent cfloat char creal dchar delegate double dstring float function idouble ifloat ireal long real short string ubyte ucent uint ulong ushort wchar wstring",literal:"false null true"},d="((0|[1-9][\\d_]*)|0[bB][01_]+|0[xX]([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*))",n="\\\\(['\"\\?\\\\abfnrtv]|u[\\dA-Fa-f]{4}|[0-7]{1,3}|x[\\dA-Fa-f]{2}|U[\\dA-Fa-f]{8})|&[a-zA-Z\\d]{2,};",t={className:"number",begin:"\\b"+d+"(L|u|U|Lu|LU|uL|UL)?",relevance:0},_={className:"number",begin:"\\b(((0[xX](([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)\\.([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)|\\.?([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*))[pP][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d))|((0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)(\\.\\d*|([eE][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)))|\\d+\\.(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)|\\.(0|[1-9][\\d_]*)([eE][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d))?))([fF]|L|i|[fF]i|Li)?|"+d+"(i|[fF]i|Li))",relevance:0},r={className:"string",begin:"'("+n+"|.)",end:"'",illegal:"."},i={className:"string",begin:'"',contains:[{begin:n,relevance:0}],end:'"[cwd]?'},s=e.COMMENT("\\/\\+","\\+\\/",{contains:["self"],relevance:10});return{name:"D",keywords:a,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,s,{className:"string",begin:'x"[\\da-fA-F\\s\\n\\r]*"[cwd]?',relevance:10},i,{className:"string",begin:'[rq]"',end:'"[cwd]?',relevance:5},{className:"string",begin:"`",end:"`[cwd]?"},{className:"string",begin:'q"\\{',end:'\\}"'},_,t,r,{className:"meta",begin:"^#!",end:"$",relevance:5},{className:"meta",begin:"#(line)",end:"$",relevance:5},{className:"keyword",begin:"@[a-zA-Z_][a-zA-Z_\\d]*"}]}}}()); -hljs.registerLanguage("handlebars",function(){"use strict";function e(...e){return e.map(e=>(function(e){return e?"string"==typeof e?e:e.source:null})(e)).join("")}return function(n){const a={"builtin-name":"action bindattr collection component concat debugger each each-in get hash if in input link-to loc log lookup mut outlet partial query-params render template textarea unbound unless view with yield"},t=/\[.*?\]/,s=/[^\s!"#%&'()*+,.\/;<=>@\[\\\]^`{|}~]+/,i=e("(",/'.*?'/,"|",/".*?"/,"|",t,"|",s,"|",/\.|\//,")+"),r=e("(",t,"|",s,")(?==)"),l={begin:i,lexemes:/[\w.\/]+/},c=n.inherit(l,{keywords:{literal:"true false undefined null"}}),o={begin:/\(/,end:/\)/},m={className:"attr",begin:r,relevance:0,starts:{begin:/=/,end:/=/,starts:{contains:[n.NUMBER_MODE,n.QUOTE_STRING_MODE,n.APOS_STRING_MODE,c,o]}}},d={contains:[n.NUMBER_MODE,n.QUOTE_STRING_MODE,n.APOS_STRING_MODE,{begin:/as\s+\|/,keywords:{keyword:"as"},end:/\|/,contains:[{begin:/\w+/}]},m,c,o],returnEnd:!0},g=n.inherit(l,{className:"name",keywords:a,starts:n.inherit(d,{end:/\)/})});o.contains=[g];const u=n.inherit(l,{keywords:a,className:"name",starts:n.inherit(d,{end:/}}/})}),b=n.inherit(l,{keywords:a,className:"name"}),h=n.inherit(l,{className:"name",keywords:a,starts:n.inherit(d,{end:/}}/})});return{name:"Handlebars",aliases:["hbs","html.hbs","html.handlebars","htmlbars"],case_insensitive:!0,subLanguage:"xml",contains:[{begin:/\\\{\{/,skip:!0},{begin:/\\\\(?=\{\{)/,skip:!0},n.COMMENT(/\{\{!--/,/--\}\}/),n.COMMENT(/\{\{!/,/\}\}/),{className:"template-tag",begin:/\{\{\{\{(?!\/)/,end:/\}\}\}\}/,contains:[u],starts:{end:/\{\{\{\{\//,returnEnd:!0,subLanguage:"xml"}},{className:"template-tag",begin:/\{\{\{\{\//,end:/\}\}\}\}/,contains:[b]},{className:"template-tag",begin:/\{\{#/,end:/\}\}/,contains:[u]},{className:"template-tag",begin:/\{\{(?=else\}\})/,end:/\}\}/,keywords:"else"},{className:"template-tag",begin:/\{\{\//,end:/\}\}/,contains:[b]},{className:"template-variable",begin:/\{\{\{/,end:/\}\}\}/,contains:[h]},{className:"template-variable",begin:/\{\{/,end:/\}\}/,contains:[h]}]}}}()); -hljs.registerLanguage("haskell",function(){"use strict";return function(e){var n={variants:[e.COMMENT("--","$"),e.COMMENT("{-","-}",{contains:["self"]})]},i={className:"meta",begin:"{-#",end:"#-}"},a={className:"meta",begin:"^#",end:"$"},s={className:"type",begin:"\\b[A-Z][\\w']*",relevance:0},l={begin:"\\(",end:"\\)",illegal:'"',contains:[i,a,{className:"type",begin:"\\b[A-Z][\\w]*(\\((\\.\\.|,|\\w+)\\))?"},e.inherit(e.TITLE_MODE,{begin:"[_a-z][\\w']*"}),n]};return{name:"Haskell",aliases:["hs"],keywords:"let in if then else case of where do module import hiding qualified type data newtype deriving class instance as default infix infixl infixr foreign export ccall stdcall cplusplus jvm dotnet safe unsafe family forall mdo proc rec",contains:[{beginKeywords:"module",end:"where",keywords:"module where",contains:[l,n],illegal:"\\W\\.|;"},{begin:"\\bimport\\b",end:"$",keywords:"import qualified as hiding",contains:[l,n],illegal:"\\W\\.|;"},{className:"class",begin:"^(\\s*)?(class|instance)\\b",end:"where",keywords:"class family instance where",contains:[s,l,n]},{className:"class",begin:"\\b(data|(new)?type)\\b",end:"$",keywords:"data family type newtype deriving",contains:[i,s,l,{begin:"{",end:"}",contains:l.contains},n]},{beginKeywords:"default",end:"$",contains:[s,l,n]},{beginKeywords:"infix infixl infixr",end:"$",contains:[e.C_NUMBER_MODE,n]},{begin:"\\bforeign\\b",end:"$",keywords:"foreign import export ccall stdcall cplusplus jvm dotnet safe unsafe",contains:[s,e.QUOTE_STRING_MODE,n]},{className:"meta",begin:"#!\\/usr\\/bin\\/env runhaskell",end:"$"},i,a,e.QUOTE_STRING_MODE,e.C_NUMBER_MODE,s,e.inherit(e.TITLE_MODE,{begin:"^[_a-z][\\w']*"}),n,{begin:"->|<-"}]}}}()); -hljs.registerLanguage("julia",function(){"use strict";return function(e){var r="[A-Za-z_\\u00A1-\\uFFFF][A-Za-z_0-9\\u00A1-\\uFFFF]*",t={$pattern:r,keyword:"in isa where baremodule begin break catch ccall const continue do else elseif end export false finally for function global if import importall let local macro module quote return true try using while type immutable abstract bitstype typealias ",literal:"true false ARGS C_NULL DevNull ENDIAN_BOM ENV I Inf Inf16 Inf32 Inf64 InsertionSort JULIA_HOME LOAD_PATH MergeSort NaN NaN16 NaN32 NaN64 PROGRAM_FILE QuickSort RoundDown RoundFromZero RoundNearest RoundNearestTiesAway RoundNearestTiesUp RoundToZero RoundUp STDERR STDIN STDOUT VERSION catalan e|0 eu|0 eulergamma golden im nothing pi γ π φ ",built_in:"ANY AbstractArray AbstractChannel AbstractFloat AbstractMatrix AbstractRNG AbstractSerializer AbstractSet AbstractSparseArray AbstractSparseMatrix AbstractSparseVector AbstractString AbstractUnitRange AbstractVecOrMat AbstractVector Any ArgumentError Array AssertionError Associative Base64DecodePipe Base64EncodePipe Bidiagonal BigFloat BigInt BitArray BitMatrix BitVector Bool BoundsError BufferStream CachingPool CapturedException CartesianIndex CartesianRange Cchar Cdouble Cfloat Channel Char Cint Cintmax_t Clong Clonglong ClusterManager Cmd CodeInfo Colon Complex Complex128 Complex32 Complex64 CompositeException Condition ConjArray ConjMatrix ConjVector Cptrdiff_t Cshort Csize_t Cssize_t Cstring Cuchar Cuint Cuintmax_t Culong Culonglong Cushort Cwchar_t Cwstring DataType Date DateFormat DateTime DenseArray DenseMatrix DenseVecOrMat DenseVector Diagonal Dict DimensionMismatch Dims DirectIndexString Display DivideError DomainError EOFError EachLine Enum Enumerate ErrorException Exception ExponentialBackOff Expr Factorization FileMonitor Float16 Float32 Float64 Function Future GlobalRef GotoNode HTML Hermitian IO IOBuffer IOContext IOStream IPAddr IPv4 IPv6 IndexCartesian IndexLinear IndexStyle InexactError InitError Int Int128 Int16 Int32 Int64 Int8 IntSet Integer InterruptException InvalidStateException Irrational KeyError LabelNode LinSpace LineNumberNode LoadError LowerTriangular MIME Matrix MersenneTwister Method MethodError MethodTable Module NTuple NewvarNode NullException Nullable Number ObjectIdDict OrdinalRange OutOfMemoryError OverflowError Pair ParseError PartialQuickSort PermutedDimsArray Pipe PollingFileWatcher ProcessExitedException Ptr QuoteNode RandomDevice Range RangeIndex Rational RawFD ReadOnlyMemoryError Real ReentrantLock Ref Regex RegexMatch RemoteChannel RemoteException RevString RoundingMode RowVector SSAValue SegmentationFault SerializationState Set SharedArray SharedMatrix SharedVector Signed SimpleVector Slot SlotNumber SparseMatrixCSC SparseVector StackFrame StackOverflowError StackTrace StepRange StepRangeLen StridedArray StridedMatrix StridedVecOrMat StridedVector String SubArray SubString SymTridiagonal Symbol Symmetric SystemError TCPSocket Task Text TextDisplay Timer Tridiagonal Tuple Type TypeError TypeMapEntry TypeMapLevel TypeName TypeVar TypedSlot UDPSocket UInt UInt128 UInt16 UInt32 UInt64 UInt8 UndefRefError UndefVarError UnicodeError UniformScaling Union UnionAll UnitRange Unsigned UpperTriangular Val Vararg VecElement VecOrMat Vector VersionNumber Void WeakKeyDict WeakRef WorkerConfig WorkerPool "},a={keywords:t,illegal:/<\//},n={className:"subst",begin:/\$\(/,end:/\)/,keywords:t},o={className:"variable",begin:"\\$"+r},i={className:"string",contains:[e.BACKSLASH_ESCAPE,n,o],variants:[{begin:/\w*"""/,end:/"""\w*/,relevance:10},{begin:/\w*"/,end:/"\w*/}]},l={className:"string",contains:[e.BACKSLASH_ESCAPE,n,o],begin:"`",end:"`"},s={className:"meta",begin:"@"+r};return a.name="Julia",a.contains=[{className:"number",begin:/(\b0x[\d_]*(\.[\d_]*)?|0x\.\d[\d_]*)p[-+]?\d+|\b0[box][a-fA-F0-9][a-fA-F0-9_]*|(\b\d[\d_]*(\.[\d_]*)?|\.\d[\d_]*)([eEfF][-+]?\d+)?/,relevance:0},{className:"string",begin:/'(.|\\[xXuU][a-zA-Z0-9]+)'/},i,l,s,{className:"comment",variants:[{begin:"#=",end:"=#",relevance:10},{begin:"#",end:"$"}]},e.HASH_COMMENT_MODE,{className:"keyword",begin:"\\b(((abstract|primitive)\\s+)type|(mutable\\s+)?struct)\\b"},{begin:/<:/}],n.contains=a.contains,a}}()); -hljs.registerLanguage("nim",function(){"use strict";return function(e){return{name:"Nim",aliases:["nim"],keywords:{keyword:"addr and as asm bind block break case cast const continue converter discard distinct div do elif else end enum except export finally for from func generic if import in include interface is isnot iterator let macro method mixin mod nil not notin object of or out proc ptr raise ref return shl shr static template try tuple type using var when while with without xor yield",literal:"shared guarded stdin stdout stderr result true false",built_in:"int int8 int16 int32 int64 uint uint8 uint16 uint32 uint64 float float32 float64 bool char string cstring pointer expr stmt void auto any range array openarray varargs seq set clong culong cchar cschar cshort cint csize clonglong cfloat cdouble clongdouble cuchar cushort cuint culonglong cstringarray semistatic"},contains:[{className:"meta",begin:/{\./,end:/\.}/,relevance:10},{className:"string",begin:/[a-zA-Z]\w*"/,end:/"/,contains:[{begin:/""/}]},{className:"string",begin:/([a-zA-Z]\w*)?"""/,end:/"""/},e.QUOTE_STRING_MODE,{className:"type",begin:/\b[A-Z]\w+\b/,relevance:0},{className:"number",relevance:0,variants:[{begin:/\b(0[xX][0-9a-fA-F][_0-9a-fA-F]*)('?[iIuU](8|16|32|64))?/},{begin:/\b(0o[0-7][_0-7]*)('?[iIuUfF](8|16|32|64))?/},{begin:/\b(0(b|B)[01][_01]*)('?[iIuUfF](8|16|32|64))?/},{begin:/\b(\d[_\d]*)('?[iIuUfF](8|16|32|64))?/}]},e.HASH_COMMENT_MODE]}}}()); -hljs.registerLanguage("r",function(){"use strict";return function(e){var n="([a-zA-Z]|\\.[a-zA-Z.])[a-zA-Z0-9._]*";return{name:"R",contains:[e.HASH_COMMENT_MODE,{begin:n,keywords:{$pattern:n,keyword:"function if in break next repeat else for return switch while try tryCatch stop warning require library attach detach source setMethod setGeneric setGroupGeneric setClass ...",literal:"NULL NA TRUE FALSE T F Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10"},relevance:0},{className:"number",begin:"0[xX][0-9a-fA-F]+[Li]?\\b",relevance:0},{className:"number",begin:"\\d+(?:[eE][+\\-]?\\d*)?L\\b",relevance:0},{className:"number",begin:"\\d+\\.(?!\\d)(?:i\\b)?",relevance:0},{className:"number",begin:"\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{className:"number",begin:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{begin:"`",end:"`",relevance:0},{className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:'"',end:'"'},{begin:"'",end:"'"}]}]}}}()); -hljs.registerLanguage("scala",function(){"use strict";return function(e){var n={className:"subst",variants:[{begin:"\\$[A-Za-z0-9_]+"},{begin:"\\${",end:"}"}]},a={className:"string",variants:[{begin:'"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:'"""',end:'"""',relevance:10},{begin:'[a-z]+"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE,n]},{className:"string",begin:'[a-z]+"""',end:'"""',contains:[n],relevance:10}]},s={className:"type",begin:"\\b[A-Z][A-Za-z0-9_]*",relevance:0},t={className:"title",begin:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,relevance:0},i={className:"class",beginKeywords:"class object trait type",end:/[:={\[\n;]/,excludeEnd:!0,contains:[{beginKeywords:"extends with",relevance:10},{begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},t]},l={className:"function",beginKeywords:"def",end:/[:={\[(\n;]/,excludeEnd:!0,contains:[t]};return{name:"Scala",keywords:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,{className:"symbol",begin:"'\\w[\\w\\d_]*(?!')"},s,l,i,e.C_NUMBER_MODE,{className:"meta",begin:"@[A-Za-z]+"}]}}}()); -hljs.registerLanguage("x86asm",function(){"use strict";return function(s){return{name:"Intel x86 Assembly",case_insensitive:!0,keywords:{$pattern:"[.%]?"+s.IDENT_RE,keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},contains:[s.COMMENT(";","$",{relevance:0}),{className:"number",variants:[{begin:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",relevance:0},{begin:"\\$[0-9][0-9A-Fa-f]*",relevance:0},{begin:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{begin:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QUOTE_STRING_MODE,{className:"string",variants:[{begin:"'",end:"[^\\\\]'"},{begin:"`",end:"[^\\\\]`"}],relevance:0},{className:"symbol",variants:[{begin:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{begin:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],relevance:0},{className:"subst",begin:"%[0-9]+",relevance:0},{className:"subst",begin:"%!S+",relevance:0},{className:"meta",begin:/^\s*\.[\w_-]+/}]}}}()); \ No newline at end of file From e38ceb5378e7ce945eedbe1c6fb670095cfb9cc5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 4 Dec 2023 14:19:29 +0100 Subject: [PATCH 095/146] fix v3 backwards compatibility for callbacks (#481) Because the callback types were used explicitly in some consumers of chronos, the change of type introduces a backwards incompatibility preventing a smooth transition to v4 for code that doesn't uses `raises`. This PR restores backwards compatibility at the expense of introducing a new type with a potentially ugly name - that said, there is already precedence for using numbered names to provide new error handling strategy in chronos. --- chronos/apps/http/httpserver.nim | 59 +++++++++++++++++++++++++++---- chronos/apps/http/shttpserver.nim | 52 ++++++++++++++++++++++++++- chronos/transports/stream.nim | 23 ++++++------ tests/testhttpclient.nim | 2 +- 4 files changed, 117 insertions(+), 19 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 7d1aea0e1..3bbee0f30 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -63,18 +63,22 @@ type HttpResponseState* {.pure.} = enum Empty, Prepared, Sending, Finished, Failed, Cancelled, Default - HttpProcessCallback* = + # TODO Evaluate naming of raises-annotated callbacks + HttpProcessCallback2* = proc(req: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]), gcsafe.} + async: (raises: [CancelledError, HttpResponseError]).} + + HttpProcessCallback* {.deprecated.} = + proc(req: RequestFence): Future[HttpResponseRef] {.async.} HttpConnectionCallback* = proc(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. - async: (raises: [CancelledError, HttpConnectionError]), gcsafe.} + async: (raises: [CancelledError, HttpConnectionError]).} HttpCloseConnectionCallback* = proc(connection: HttpConnectionRef): Future[void] {. - async: (raises: []), gcsafe.} + async: (raises: []).} HttpConnectionHolder* = object of RootObj connection*: HttpConnectionRef @@ -103,7 +107,7 @@ type bufferSize*: int maxHeadersSize*: int maxRequestBodySize*: int - processCallback*: HttpProcessCallback + processCallback*: HttpProcessCallback2 createConnCallback*: HttpConnectionCallback HttpServerRef* = ref HttpServer @@ -182,7 +186,7 @@ proc createConnection(server: HttpServerRef, proc new*(htype: typedesc[HttpServerRef], address: TransportAddress, - processCallback: HttpProcessCallback, + processCallback: HttpProcessCallback2, serverFlags: set[HttpServerFlags] = {}, socketFlags: set[ServerFlags] = {ReuseAddr}, serverUri = Uri(), @@ -236,6 +240,49 @@ proc new*(htype: typedesc[HttpServerRef], ) ok(res) +proc new*(htype: typedesc[HttpServerRef], + address: TransportAddress, + processCallback: HttpProcessCallback, + serverFlags: set[HttpServerFlags] = {}, + socketFlags: set[ServerFlags] = {ReuseAddr}, + serverUri = Uri(), + serverIdent = "", + maxConnections: int = -1, + bufferSize: int = 4096, + backlogSize: int = DefaultBacklogSize, + httpHeadersTimeout = 10.seconds, + maxHeadersSize: int = 8192, + maxRequestBodySize: int = 1_048_576, + dualstack = DualStackType.Auto): HttpResult[HttpServerRef] {. + deprecated: "raises missing from process callback".} = + + proc processCallback2(req: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = + try: + await processCallback(req) + except CancelledError as exc: + raise exc + except HttpResponseError as exc: + raise exc + except CatchableError as exc: + # Emulate 3.x behavior + raise (ref HttpCriticalError)(msg: exc.msg, code: Http503) + + HttpServerRef.new( + address = address, + processCallback = processCallback2, + serverFlags = serverFlags, + socketFlags = socketFlags, + serverUri = serverUri, + serverIdent = serverIdent, + maxConnections = maxConnections, + bufferSize = bufferSize, + backlogSize = backlogSize, + httpHeadersTimeout = httpHeadersTimeout, + maxHeadersSize = maxHeadersSize, + maxRequestBodySize = maxRequestBodySize, + dualstack = dualstack) + proc getServerFlags(req: HttpRequestRef): set[HttpServerFlags] = var defaultFlags: set[HttpServerFlags] = {} if isNil(req): return defaultFlags diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 2373d9580..f7e377f93 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -82,7 +82,7 @@ proc createSecConnection(server: HttpServerRef, proc new*(htype: typedesc[SecureHttpServerRef], address: TransportAddress, - processCallback: HttpProcessCallback, + processCallback: HttpProcessCallback2, tlsPrivateKey: TLSPrivateKey, tlsCertificate: TLSCertificate, serverFlags: set[HttpServerFlags] = {}, @@ -145,3 +145,53 @@ proc new*(htype: typedesc[SecureHttpServerRef], secureFlags: secureFlags ) ok(res) + +proc new*(htype: typedesc[SecureHttpServerRef], + address: TransportAddress, + processCallback: HttpProcessCallback, + tlsPrivateKey: TLSPrivateKey, + tlsCertificate: TLSCertificate, + serverFlags: set[HttpServerFlags] = {}, + socketFlags: set[ServerFlags] = {ReuseAddr}, + serverUri = Uri(), + serverIdent = "", + secureFlags: set[TLSFlags] = {}, + maxConnections: int = -1, + bufferSize: int = 4096, + backlogSize: int = DefaultBacklogSize, + httpHeadersTimeout = 10.seconds, + maxHeadersSize: int = 8192, + maxRequestBodySize: int = 1_048_576, + dualstack = DualStackType.Auto + ): HttpResult[SecureHttpServerRef] {. + deprecated: "raises missing from process callback".} = + proc processCallback2(req: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError, HttpResponseError]).} = + try: + await processCallback(req) + except CancelledError as exc: + raise exc + except HttpResponseError as exc: + raise exc + except CatchableError as exc: + # Emulate 3.x behavior + raise (ref HttpCriticalError)(msg: exc.msg, code: Http503) + + SecureHttpServerRef.new( + address = address, + processCallback = processCallback2, + tlsPrivateKey = tlsPrivateKey, + tlsCertificate = tlsCertificate, + serverFlags = serverFlags, + socketFlags = socketFlags, + serverUri = serverUri, + serverIdent = serverIdent, + secureFlags = secureFlags, + maxConnections = maxConnections, + bufferSize = bufferSize, + backlogSize = backlogSize, + httpHeadersTimeout = httpHeadersTimeout, + maxHeadersSize = maxHeadersSize, + maxRequestBodySize = maxRequestBodySize, + dualstack = dualstack + ) \ No newline at end of file diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 107bc6e68..c0d1cfcdb 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -115,14 +115,15 @@ else: discard type - StreamCallback* = proc(server: StreamServer, - client: StreamTransport) {.async: (raises: []).} + # TODO evaluate naming of raises-annotated callbacks + StreamCallback2* = proc(server: StreamServer, + client: StreamTransport) {.async: (raises: []).} ## New remote client connection callback ## ``server`` - StreamServer object. ## ``client`` - accepted client transport. - UnsafeStreamCallback* = proc(server: StreamServer, - client: StreamTransport) {.async.} + StreamCallback* = proc(server: StreamServer, + client: StreamTransport) {.async.} ## Connection callback that doesn't check for exceptions at compile time ## ``server`` - StreamServer object. ## ``client`` - accepted client transport. @@ -135,7 +136,7 @@ type StreamServer* = ref object of SocketServer ## StreamServer object - function*: StreamCallback # callback which will be called after new + function*: StreamCallback2 # callback which will be called after new # client accepted init*: TransportInitCallback # callback which will be called before # transport for new client @@ -1870,7 +1871,7 @@ proc getBacklogSize(backlog: int): cint = cint(backlog) proc createStreamServer*(host: TransportAddress, - cbproc: StreamCallback, + cbproc: StreamCallback2, flags: set[ServerFlags] = {}, sock: AsyncFD = asyncInvalidSocket, backlog: int = DefaultBacklogSize, @@ -2092,7 +2093,7 @@ proc createStreamServer*(host: TransportAddress, sres proc createStreamServer*(host: TransportAddress, - cbproc: UnsafeStreamCallback, + cbproc: StreamCallback, flags: set[ServerFlags] = {}, sock: AsyncFD = asyncInvalidSocket, backlog: int = DefaultBacklogSize, @@ -2124,11 +2125,11 @@ proc createStreamServer*(host: TransportAddress, udata: pointer = nil, dualstack = DualStackType.Auto): StreamServer {. raises: [TransportOsError].} = - createStreamServer(host, StreamCallback(nil), flags, sock, backlog, bufferSize, + createStreamServer(host, StreamCallback2(nil), flags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) proc createStreamServer*[T](host: TransportAddress, - cbproc: StreamCallback, + cbproc: StreamCallback2, flags: set[ServerFlags] = {}, udata: ref T, sock: AsyncFD = asyncInvalidSocket, @@ -2144,7 +2145,7 @@ proc createStreamServer*[T](host: TransportAddress, child, init, cast[pointer](udata), dualstack) proc createStreamServer*[T](host: TransportAddress, - cbproc: UnsafeStreamCallback, + cbproc: StreamCallback, flags: set[ServerFlags] = {}, udata: ref T, sock: AsyncFD = asyncInvalidSocket, @@ -2172,7 +2173,7 @@ proc createStreamServer*[T](host: TransportAddress, raises: [TransportOsError].} = var fflags = flags + {GCUserData} GC_ref(udata) - createStreamServer(host, StreamCallback(nil), fflags, sock, backlog, bufferSize, + createStreamServer(host, StreamCallback2(nil), fflags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) proc getUserData*[T](server: StreamServer): T {.inline.} = diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index eb1eaacf9..d2a355d8c 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -85,7 +85,7 @@ suite "HTTP client testing suite": res proc createServer(address: TransportAddress, - process: HttpProcessCallback, secure: bool): HttpServerRef = + process: HttpProcessCallback2, secure: bool): HttpServerRef = let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} serverFlags = {HttpServerFlags.Http11Pipeline} From c41599a6d6d8b11c729032bf8913e06f4171e0fb Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 9 Dec 2023 06:50:35 +0200 Subject: [PATCH 096/146] Asyncraises HTTP layer V3 (#482) * No Critical and Recoverable errors anymore. * Recover raiseHttpCriticalError() * Post-rebase fixes. * Remove deprecated ResponseFence and getResponseFence(). * HttpProcessCallback and 2. * Fix callback holder. * Fix test issue. * Fix backwards compatibility of `HttpResponse.state` field. --- chronos/apps/http/httpcommon.nim | 79 ++- chronos/apps/http/httpserver.nim | 775 +++++++++++++----------------- chronos/apps/http/multipart.nim | 121 ++--- chronos/apps/http/shttpserver.nim | 27 +- tests/testhttpclient.nim | 395 ++++++++------- tests/testhttpserver.nim | 231 +++++---- tests/testshttpserver.nim | 25 +- 7 files changed, 838 insertions(+), 815 deletions(-) diff --git a/chronos/apps/http/httpcommon.nim b/chronos/apps/http/httpcommon.nim index 3ebe3ca20..0f5370ab4 100644 --- a/chronos/apps/http/httpcommon.nim +++ b/chronos/apps/http/httpcommon.nim @@ -43,30 +43,48 @@ const ServerHeader* = "server" LocationHeader* = "location" AuthorizationHeader* = "authorization" + ContentDispositionHeader* = "content-disposition" UrlEncodedContentType* = MediaType.init("application/x-www-form-urlencoded") MultipartContentType* = MediaType.init("multipart/form-data") type + HttpMessage* = object + code*: HttpCode + contentType*: MediaType + message*: string + HttpResult*[T] = Result[T, string] HttpResultCode*[T] = Result[T, HttpCode] + HttpResultMessage*[T] = Result[T, HttpMessage] - HttpDefect* = object of Defect HttpError* = object of AsyncError - HttpResponseError* = object of HttpError - code*: HttpCode - HttpCriticalError* = object of HttpResponseError - HttpRecoverableError* = object of HttpResponseError - HttpDisconnectError* = object of HttpError - HttpConnectionError* = object of HttpError HttpInterruptError* = object of HttpError - HttpReadError* = object of HttpError - HttpWriteError* = object of HttpError - HttpProtocolError* = object of HttpError - HttpRedirectError* = object of HttpError - HttpAddressError* = object of HttpError - HttpUseClosedError* = object of HttpError + + HttpTransportError* = object of HttpError + HttpAddressError* = object of HttpTransportError + HttpRedirectError* = object of HttpTransportError + HttpConnectionError* = object of HttpTransportError + HttpReadError* = object of HttpTransportError HttpReadLimitError* = object of HttpReadError + HttpDisconnectError* = object of HttpReadError + HttpWriteError* = object of HttpTransportError + + HttpProtocolError* = object of HttpError + code*: HttpCode + + HttpCriticalError* = object of HttpProtocolError # deprecated + HttpRecoverableError* = object of HttpProtocolError # deprecated + + HttpRequestError* = object of HttpProtocolError + HttpRequestHeadersError* = object of HttpRequestError + HttpRequestBodyError* = object of HttpRequestError + HttpRequestHeadersTooLargeError* = object of HttpRequestHeadersError + HttpRequestBodyTooLargeError* = object of HttpRequestBodyError + HttpResponseError* = object of HttpProtocolError + + HttpInvalidUsageError* = object of HttpError + HttpUseClosedError* = object of HttpInvalidUsageError KeyValueTuple* = tuple key: string @@ -127,6 +145,11 @@ func toString*(error: HttpAddressErrorType): string = of HttpAddressErrorType.NoAddressResolved: "No address has been resolved" +proc raiseHttpRequestBodyTooLargeError*() {. + noinline, noreturn, raises: [HttpRequestBodyTooLargeError].} = + raise (ref HttpRequestBodyTooLargeError)( + code: Http413, msg: MaximumBodySizeError) + proc raiseHttpCriticalError*(msg: string, code = Http400) {. noinline, noreturn, raises: [HttpCriticalError].} = raise (ref HttpCriticalError)(code: code, msg: msg) @@ -135,9 +158,6 @@ proc raiseHttpDisconnectError*() {. noinline, noreturn, raises: [HttpDisconnectError].} = raise (ref HttpDisconnectError)(msg: "Remote peer disconnected") -proc raiseHttpDefect*(msg: string) {.noinline, noreturn.} = - raise (ref HttpDefect)(msg: msg) - proc raiseHttpConnectionError*(msg: string) {. noinline, noreturn, raises: [HttpConnectionError].} = raise (ref HttpConnectionError)(msg: msg) @@ -152,7 +172,15 @@ proc raiseHttpReadError*(msg: string) {. proc raiseHttpProtocolError*(msg: string) {. noinline, noreturn, raises: [HttpProtocolError].} = - raise (ref HttpProtocolError)(msg: msg) + raise (ref HttpProtocolError)(code: Http400, msg: msg) + +proc raiseHttpProtocolError*(code: HttpCode, msg: string) {. + noinline, noreturn, raises: [HttpProtocolError].} = + raise (ref HttpProtocolError)(code: code, msg: msg) + +proc raiseHttpProtocolError*(msg: HttpMessage) {. + noinline, noreturn, raises: [HttpProtocolError].} = + raise (ref HttpProtocolError)(code: msg.code, msg: msg.message) proc raiseHttpWriteError*(msg: string) {. noinline, noreturn, raises: [HttpWriteError].} = @@ -178,6 +206,23 @@ template newHttpWriteError*(message: string): ref HttpWriteError = template newHttpUseClosedError*(): ref HttpUseClosedError = newException(HttpUseClosedError, "Connection was already closed") +func init*(t: typedesc[HttpMessage], code: HttpCode, message: string, + contentType: MediaType): HttpMessage = + HttpMessage(code: code, message: message, contentType: contentType) + +func init*(t: typedesc[HttpMessage], code: HttpCode, message: string, + contentType: string): HttpMessage = + HttpMessage(code: code, message: message, + contentType: MediaType.init(contentType)) + +func init*(t: typedesc[HttpMessage], code: HttpCode, + message: string): HttpMessage = + HttpMessage(code: code, message: message, + contentType: MediaType.init("text/plain")) + +func init*(t: typedesc[HttpMessage], code: HttpCode): HttpMessage = + HttpMessage(code: code) + iterator queryParams*(query: string, flags: set[QueryParamsFlag] = {}): KeyValueTuple = ## Iterate over url-encoded query string. diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 3bbee0f30..9646956d6 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -32,8 +32,7 @@ type ## Enable HTTP/1.1 pipelining. HttpServerError* {.pure.} = enum - InterruptError, TimeoutError, CatchableError, RecoverableError, - CriticalError, DisconnectError + InterruptError, TimeoutError, ProtocolError, DisconnectError HttpServerState* {.pure.} = enum ServerRunning, ServerStopped, ServerClosed @@ -41,11 +40,10 @@ type HttpProcessError* = object kind*: HttpServerError code*: HttpCode - exc*: ref CatchableError + exc*: ref HttpError remote*: Opt[TransportAddress] ConnectionFence* = Result[HttpConnectionRef, HttpProcessError] - ResponseFence* = Result[HttpResponseRef, HttpProcessError] RequestFence* = Result[HttpRequestRef, HttpProcessError] HttpRequestFlags* {.pure.} = enum @@ -61,15 +59,15 @@ type KeepAlive, Graceful, Immediate HttpResponseState* {.pure.} = enum - Empty, Prepared, Sending, Finished, Failed, Cancelled, Default + Empty, Prepared, Sending, Finished, Failed, Cancelled, ErrorCode, Default - # TODO Evaluate naming of raises-annotated callbacks - HttpProcessCallback2* = + HttpProcessCallback* = proc(req: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} + gcsafe, raises: [].} - HttpProcessCallback* {.deprecated.} = - proc(req: RequestFence): Future[HttpResponseRef] {.async.} + HttpProcessCallback2* = + proc(req: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} HttpConnectionCallback* = proc(server: HttpServerRef, @@ -138,7 +136,7 @@ type headersTable: HttpTable body: seq[byte] flags: set[HttpResponseFlags] - state*: HttpResponseState + state*: HttpResponseState # TODO (cheatfate): Make this field private connection*: HttpConnectionRef streamType*: HttpResponseStreamType writer: AsyncStreamWriter @@ -163,14 +161,20 @@ type ByteChar* = string | seq[byte] proc init(htype: typedesc[HttpProcessError], error: HttpServerError, - exc: ref CatchableError, remote: Opt[TransportAddress], + exc: ref HttpError, remote: Opt[TransportAddress], code: HttpCode): HttpProcessError = HttpProcessError(kind: error, exc: exc, remote: remote, code: code) +proc init(htype: typedesc[HttpProcessError], error: HttpServerError, + remote: Opt[TransportAddress], code: HttpCode): HttpProcessError = + HttpProcessError(kind: error, remote: remote, code: code) + proc init(htype: typedesc[HttpProcessError], error: HttpServerError): HttpProcessError = HttpProcessError(kind: error) +proc defaultResponse*(exc: ref CatchableError): HttpResponseRef + proc new(htype: typedesc[HttpConnectionHolderRef], server: HttpServerRef, transp: StreamTransport, connectionId: string): HttpConnectionHolderRef = @@ -254,34 +258,22 @@ proc new*(htype: typedesc[HttpServerRef], maxHeadersSize: int = 8192, maxRequestBodySize: int = 1_048_576, dualstack = DualStackType.Auto): HttpResult[HttpServerRef] {. - deprecated: "raises missing from process callback".} = + deprecated: "Callback could raise only CancelledError, annotate with " & + "{.async: (raises: [CancelledError]).}".} = - proc processCallback2(req: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = - try: - await processCallback(req) - except CancelledError as exc: - raise exc - except HttpResponseError as exc: - raise exc - except CatchableError as exc: - # Emulate 3.x behavior - raise (ref HttpCriticalError)(msg: exc.msg, code: Http503) - - HttpServerRef.new( - address = address, - processCallback = processCallback2, - serverFlags = serverFlags, - socketFlags = socketFlags, - serverUri = serverUri, - serverIdent = serverIdent, - maxConnections = maxConnections, - bufferSize = bufferSize, - backlogSize = backlogSize, - httpHeadersTimeout = httpHeadersTimeout, - maxHeadersSize = maxHeadersSize, - maxRequestBodySize = maxRequestBodySize, - dualstack = dualstack) + proc wrap(req: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + try: + await processCallback(req) + except CancelledError as exc: + raise exc + except CatchableError as exc: + defaultResponse(exc) + + HttpServerRef.new(address, wrap, serverFlags, socketFlags, serverUri, + serverIdent, maxConnections, bufferSize, backlogSize, + httpHeadersTimeout, maxHeadersSize, maxRequestBodySize, + dualstack) proc getServerFlags(req: HttpRequestRef): set[HttpServerFlags] = var defaultFlags: set[HttpServerFlags] = {} @@ -304,6 +296,12 @@ proc getResponseFlags(req: HttpRequestRef): set[HttpResponseFlags] = else: defaultFlags +proc getResponseState*(response: HttpResponseRef): HttpResponseState = + response.state + +proc setResponseState(response: HttpResponseRef, state: HttpResponseState) = + response.state = state + proc getResponseVersion(reqFence: RequestFence): HttpVersion = if reqFence.isErr(): HttpVersion11 @@ -335,6 +333,18 @@ proc defaultResponse*(): HttpResponseRef = ## Create an empty response to return when request processor got no request. HttpResponseRef(state: HttpResponseState.Default, version: HttpVersion11) +proc defaultResponse*(exc: ref CatchableError): HttpResponseRef = + ## Create response with error code based on exception type. + if exc of AsyncTimeoutError: + HttpResponseRef(state: HttpResponseState.ErrorCode, status: Http408) + elif exc of HttpTransportError: + HttpResponseRef(state: HttpResponseState.Failed) + elif exc of HttpProtocolError: + let code = cast[ref HttpProtocolError](exc).code + HttpResponseRef(state: HttpResponseState.ErrorCode, status: code) + else: + HttpResponseRef(state: HttpResponseState.ErrorCode, status: Http503) + proc dumbResponse*(): HttpResponseRef {. deprecated: "Please use defaultResponse() instead".} = ## Create an empty response to return when request processor got no request. @@ -353,11 +363,11 @@ proc hasBody*(request: HttpRequestRef): bool = HttpRequestFlags.UnboundBody} != {} proc prepareRequest(conn: HttpConnectionRef, - req: HttpRequestHeader): HttpResultCode[HttpRequestRef] = + req: HttpRequestHeader): HttpResultMessage[HttpRequestRef] = var request = HttpRequestRef(connection: conn, state: HttpState.Alive) if req.version notin {HttpVersion10, HttpVersion11}: - return err(Http505) + return err(HttpMessage.init(Http505, "Unsupported HTTP protocol version")) request.scheme = if HttpServerFlags.Secure in conn.server.flags: @@ -372,14 +382,14 @@ proc prepareRequest(conn: HttpConnectionRef, block: let res = req.uri() if len(res) == 0: - return err(Http400) + return err(HttpMessage.init(Http400, "Invalid request URI")) res request.uri = if request.rawPath != "*": let uri = parseUri(request.rawPath) if uri.scheme notin ["http", "https", ""]: - return err(Http400) + return err(HttpMessage.init(Http400, "Unsupported URI scheme")) uri else: var uri = initUri() @@ -407,59 +417,61 @@ proc prepareRequest(conn: HttpConnectionRef, # Validating HTTP request headers # Some of the headers must be present only once. if table.count(ContentTypeHeader) > 1: - return err(Http400) + return err(HttpMessage.init(Http400, "Multiple Content-Type headers")) if table.count(ContentLengthHeader) > 1: - return err(Http400) + return err(HttpMessage.init(Http400, "Multiple Content-Length headers")) if table.count(TransferEncodingHeader) > 1: - return err(Http400) + return err(HttpMessage.init(Http400, + "Multuple Transfer-Encoding headers")) table # Preprocessing "Content-Encoding" header. request.contentEncoding = - block: - let res = getContentEncoding( - request.headers.getList(ContentEncodingHeader)) - if res.isErr(): - return err(Http400) - else: - res.get() + getContentEncoding( + request.headers.getList(ContentEncodingHeader)).valueOr: + let msg = "Incorrect or unsupported Content-Encoding header value" + return err(HttpMessage.init(Http400, msg)) # Preprocessing "Transfer-Encoding" header. request.transferEncoding = - block: - let res = getTransferEncoding( - request.headers.getList(TransferEncodingHeader)) - if res.isErr(): - return err(Http400) - else: - res.get() + getTransferEncoding( + request.headers.getList(TransferEncodingHeader)).valueOr: + let msg = "Incorrect or unsupported Transfer-Encoding header value" + return err(HttpMessage.init(Http400, msg)) # Almost all HTTP requests could have body (except TRACE), we perform some # steps to reveal information about body. - if ContentLengthHeader in request.headers: - let length = request.headers.getInt(ContentLengthHeader) - if length >= 0: - if request.meth == MethodTrace: - return err(Http400) - # Because of coversion to `int` we should avoid unexpected OverflowError. - if length > uint64(high(int)): - return err(Http413) - if length > uint64(conn.server.maxRequestBodySize): - return err(Http413) - request.contentLength = int(length) - request.requestFlags.incl(HttpRequestFlags.BoundBody) - else: - if TransferEncodingFlags.Chunked in request.transferEncoding: - if request.meth == MethodTrace: - return err(Http400) - request.requestFlags.incl(HttpRequestFlags.UnboundBody) + request.contentLength = + if ContentLengthHeader in request.headers: + let length = request.headers.getInt(ContentLengthHeader) + if length != 0: + if request.meth == MethodTrace: + let msg = "TRACE requests could not have request body" + return err(HttpMessage.init(Http400, msg)) + # Because of coversion to `int` we should avoid unexpected OverflowError. + if length > uint64(high(int)): + return err(HttpMessage.init(Http413, "Unsupported content length")) + if length > uint64(conn.server.maxRequestBodySize): + return err(HttpMessage.init(Http413, "Content length exceeds limits")) + request.requestFlags.incl(HttpRequestFlags.BoundBody) + int(length) + else: + 0 + else: + if TransferEncodingFlags.Chunked in request.transferEncoding: + if request.meth == MethodTrace: + let msg = "TRACE requests could not have request body" + return err(HttpMessage.init(Http400, msg)) + request.requestFlags.incl(HttpRequestFlags.UnboundBody) + 0 if request.hasBody(): # If request has body, we going to understand how its encoded. if ContentTypeHeader in request.headers: let contentType = getContentType(request.headers.getList(ContentTypeHeader)).valueOr: - return err(Http415) + let msg = "Incorrect or missing Content-Type header" + return err(HttpMessage.init(Http415, msg)) if contentType == UrlEncodedContentType: request.requestFlags.incl(HttpRequestFlags.UrlencodedForm) elif contentType == MultipartContentType: @@ -486,16 +498,17 @@ proc getBodyReader*(request: HttpRequestRef): HttpResult[HttpBodyReader] = uint64(request.contentLength)) ok(newHttpBodyReader(bstream)) elif HttpRequestFlags.UnboundBody in request.requestFlags: - let maxBodySize = request.connection.server.maxRequestBodySize - let cstream = newChunkedStreamReader(request.connection.reader) - let bstream = newBoundedStreamReader(cstream, uint64(maxBodySize), - comparison = BoundCmp.LessOrEqual) + let + maxBodySize = request.connection.server.maxRequestBodySize + cstream = newChunkedStreamReader(request.connection.reader) + bstream = newBoundedStreamReader(cstream, uint64(maxBodySize), + comparison = BoundCmp.LessOrEqual) ok(newHttpBodyReader(bstream, cstream)) else: err("Request do not have body available") proc handleExpect*(request: HttpRequestRef) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Handle expectation for ``Expect`` header. ## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect if HttpServerFlags.NoExpectHandler notin request.connection.server.flags: @@ -504,85 +517,50 @@ proc handleExpect*(request: HttpRequestRef) {. try: let message = $request.version & " " & $Http100 & "\r\n\r\n" await request.connection.writer.write(message) - except CancelledError as exc: - raise exc except AsyncStreamError as exc: - raiseHttpCriticalError( + raiseHttpWriteError( "Unable to send `100-continue` response, reason: " & $exc.msg) proc getBody*(request: HttpRequestRef): Future[seq[byte]] {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, + HttpTransportError, HttpProtocolError]).} = ## Obtain request's body as sequence of bytes. - let bodyReader = request.getBodyReader() - if bodyReader.isErr(): + let reader = request.getBodyReader().valueOr: return @[] - else: - var reader = bodyReader.get() - try: - await request.handleExpect() - let res = await reader.read() - if reader.hasOverflow(): - await reader.closeWait() - reader = nil - raiseHttpCriticalError(MaximumBodySizeError, Http413) - else: - await reader.closeWait() - reader = nil - return res - except CancelledError as exc: - if not(isNil(reader)): - await reader.closeWait() - raise exc - except HttpCriticalError as exc: - if not(isNil(reader)): - await reader.closeWait() - raise exc - except AsyncStreamError as exc: - let msg = "Unable to read request's body, reason: " & $exc.msg - if not(isNil(reader)): - await reader.closeWait() - raiseHttpCriticalError(msg) + try: + await request.handleExpect() + let res = await reader.read() + if reader.hasOverflow(): + raiseHttpRequestBodyTooLargeError() + res + except AsyncStreamError as exc: + let msg = "Unable to read request's body, reason: " & $exc.msg + raiseHttpReadError(msg) + finally: + await reader.closeWait() proc consumeBody*(request: HttpRequestRef): Future[void] {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpTransportError, + HttpProtocolError]).} = ## Consume/discard request's body. - let bodyReader = request.getBodyReader() - if bodyReader.isErr(): + let reader = request.getBodyReader().valueOr: return - else: - var reader = bodyReader.get() - try: - await request.handleExpect() - discard await reader.consume() - if reader.hasOverflow(): - await reader.closeWait() - reader = nil - raiseHttpCriticalError(MaximumBodySizeError, Http413) - else: - await reader.closeWait() - reader = nil - return - except CancelledError as exc: - if not(isNil(reader)): - await reader.closeWait() - raise exc - except HttpCriticalError as exc: - if not(isNil(reader)): - await reader.closeWait() - raise exc - except AsyncStreamError as exc: - let msg = "Unable to consume request's body, reason: " & $exc.msg - if not(isNil(reader)): - await reader.closeWait() - raiseHttpCriticalError(msg) + try: + await request.handleExpect() + discard await reader.consume() + if reader.hasOverflow(): raiseHttpRequestBodyTooLargeError() + except AsyncStreamError as exc: + let msg = "Unable to consume request's body, reason: " & $exc.msg + raiseHttpReadError(msg) + finally: + await reader.closeWait() proc getAcceptInfo*(request: HttpRequestRef): Result[AcceptInfo, cstring] = ## Returns value of `Accept` header as `AcceptInfo` object. ## ## If ``Accept`` header is missing in request headers, ``*/*`` content ## type will be returned. - let acceptHeader = request.headers.getString(AcceptHeaderName) - getAcceptInfo(acceptHeader) + getAcceptInfo(request.headers.getString(AcceptHeaderName)) proc preferredContentMediaType*(acceptHeader: string): MediaType = ## Returns preferred content-type using ``Accept`` header value specified by @@ -693,7 +671,7 @@ proc preferredContentType*(request: HttpRequestRef, proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, code: HttpCode, keepAlive = true, - datatype = "text/text", + datatype = "text/plain", databody = "") {. async: (raises: [CancelledError]).} = var answer = $version & " " & $code & "\r\n" @@ -721,39 +699,10 @@ proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion, answer.add(databody) try: await conn.writer.write(answer) - except CancelledError as exc: - raise exc except AsyncStreamError: # We ignore errors here, because we indicating error already. discard -proc sendErrorResponse( - conn: HttpConnectionRef, - reqFence: RequestFence, - respError: HttpProcessError - ): Future[HttpProcessExitType] {.async: (raises: []).} = - let version = getResponseVersion(reqFence) - try: - if reqFence.isOk(): - case respError.kind - of HttpServerError.CriticalError: - await conn.sendErrorResponse(version, respError.code, false) - HttpProcessExitType.Graceful - of HttpServerError.RecoverableError: - await conn.sendErrorResponse(version, respError.code, true) - HttpProcessExitType.Graceful - of HttpServerError.CatchableError: - await conn.sendErrorResponse(version, respError.code, false) - HttpProcessExitType.Graceful - of HttpServerError.DisconnectError, - HttpServerError.InterruptError, - HttpServerError.TimeoutError: - raiseAssert("Unexpected response error: " & $respError.kind) - else: - HttpProcessExitType.Graceful - except CancelledError: - HttpProcessExitType.Immediate - proc sendDefaultResponse( conn: HttpConnectionRef, reqFence: RequestFence, @@ -794,6 +743,10 @@ proc sendDefaultResponse( await conn.sendErrorResponse(HttpVersion11, Http409, keepConnection.toBool()) keepConnection + of HttpResponseState.ErrorCode: + # Response with error code + await conn.sendErrorResponse(version, response.status, false) + HttpProcessExitType.Immediate of HttpResponseState.Sending, HttpResponseState.Failed, HttpResponseState.Cancelled: # Just drop connection, because we dont know at what stage we are @@ -810,27 +763,21 @@ proc sendDefaultResponse( of HttpServerError.TimeoutError: await conn.sendErrorResponse(version, reqFence.error.code, false) HttpProcessExitType.Graceful - of HttpServerError.CriticalError: - await conn.sendErrorResponse(version, reqFence.error.code, false) - HttpProcessExitType.Graceful - of HttpServerError.RecoverableError: - await conn.sendErrorResponse(version, reqFence.error.code, false) - HttpProcessExitType.Graceful - of HttpServerError.CatchableError: + of HttpServerError.ProtocolError: await conn.sendErrorResponse(version, reqFence.error.code, false) HttpProcessExitType.Graceful of HttpServerError.DisconnectError: # When `HttpServerFlags.NotifyDisconnect` is set. HttpProcessExitType.Immediate of HttpServerError.InterruptError: + # InterruptError should be handled earlier raiseAssert("Unexpected request error: " & $reqFence.error.kind) except CancelledError: HttpProcessExitType.Immediate - except CatchableError: - HttpProcessExitType.Immediate proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {. - async: (raises: [CancelledError, HttpError]).} = + async: (raises: [CancelledError, HttpDisconnectError, + HttpProtocolError]).} = try: conn.buffer.setLen(conn.server.maxHeadersSize) let res = await conn.reader.readUntil(addr conn.buffer[0], len(conn.buffer), @@ -838,15 +785,11 @@ proc getRequest(conn: HttpConnectionRef): Future[HttpRequestRef] {. conn.buffer.setLen(res) let header = parseRequest(conn.buffer) if header.failed(): - raiseHttpCriticalError("Malformed request recieved") - else: - let res = prepareRequest(conn, header) - if res.isErr(): - raiseHttpCriticalError("Invalid request received", res.error) - else: - return res.get() + raiseHttpProtocolError(Http400, "Malformed request recieved") + prepareRequest(conn, header).valueOr: + raiseHttpProtocolError(error) except AsyncStreamLimitError: - raiseHttpCriticalError("Maximum size of request headers reached", Http431) + raiseHttpProtocolError(Http431, "Maximum size of request headers reached") except AsyncStreamError: raiseHttpDisconnectError() @@ -915,7 +858,7 @@ proc createConnection(server: HttpServerRef, HttpConnectionRef.new(server, transp) proc `keepalive=`*(resp: HttpResponseRef, value: bool) = - doAssert(resp.state == HttpResponseState.Empty) + doAssert(resp.getResponseState() == HttpResponseState.Empty) if value: resp.flags.incl(HttpResponseFlags.KeepAlive) else: @@ -935,55 +878,6 @@ proc getRemoteAddress(connection: HttpConnectionRef): Opt[TransportAddress] = if isNil(connection): return Opt.none(TransportAddress) getRemoteAddress(connection.transp) -proc getResponseFence*(connection: HttpConnectionRef, - reqFence: RequestFence): Future[ResponseFence] {. - async: (raises: []).} = - try: - let res = await connection.server.processCallback(reqFence) - ResponseFence.ok(res) - except CancelledError: - ResponseFence.err(HttpProcessError.init( - HttpServerError.InterruptError)) - except HttpCriticalError as exc: - let address = connection.getRemoteAddress() - ResponseFence.err(HttpProcessError.init( - HttpServerError.CriticalError, exc, address, exc.code)) - except HttpRecoverableError as exc: - let address = connection.getRemoteAddress() - ResponseFence.err(HttpProcessError.init( - HttpServerError.RecoverableError, exc, address, exc.code)) - except HttpResponseError as exc: - # There should be only 2 children of HttpResponseError, and all of them - # should be handled. - raiseAssert "Unexpected response error " & $exc.name & ", reason: " & - $exc.msg - -proc getResponseFence*(server: HttpServerRef, - connFence: ConnectionFence): Future[ResponseFence] {. - async: (raises: []).} = - doAssert(connFence.isErr()) - try: - let - reqFence = RequestFence.err(connFence.error) - res = await server.processCallback(reqFence) - ResponseFence.ok(res) - except CancelledError: - ResponseFence.err(HttpProcessError.init( - HttpServerError.InterruptError)) - except HttpCriticalError as exc: - let address = Opt.none(TransportAddress) - ResponseFence.err(HttpProcessError.init( - HttpServerError.CriticalError, exc, address, exc.code)) - except HttpRecoverableError as exc: - let address = Opt.none(TransportAddress) - ResponseFence.err(HttpProcessError.init( - HttpServerError.RecoverableError, exc, address, exc.code)) - except HttpResponseError as exc: - # There should be only 2 children of HttpResponseError, and all of them - # should be handled. - raiseAssert "Unexpected response error " & $exc.name & ", reason: " & - $exc.msg - proc getRequestFence*(server: HttpServerRef, connection: HttpConnectionRef): Future[RequestFence] {. async: (raises: []).} = @@ -996,27 +890,21 @@ proc getRequestFence*(server: HttpServerRef, connection.currentRawQuery = Opt.some(res.rawPath) RequestFence.ok(res) except CancelledError: - RequestFence.err(HttpProcessError.init(HttpServerError.InterruptError)) - except AsyncTimeoutError as exc: + RequestFence.err( + HttpProcessError.init(HttpServerError.InterruptError)) + except AsyncTimeoutError: let address = connection.getRemoteAddress() - RequestFence.err(HttpProcessError.init( - HttpServerError.TimeoutError, exc, address, Http408)) - except HttpRecoverableError as exc: + RequestFence.err( + HttpProcessError.init(HttpServerError.TimeoutError, address, Http408)) + except HttpProtocolError as exc: let address = connection.getRemoteAddress() - RequestFence.err(HttpProcessError.init( - HttpServerError.RecoverableError, exc, address, exc.code)) - except HttpCriticalError as exc: + RequestFence.err( + HttpProcessError.init(HttpServerError.ProtocolError, exc, address, + exc.code)) + except HttpDisconnectError: let address = connection.getRemoteAddress() - RequestFence.err(HttpProcessError.init( - HttpServerError.CriticalError, exc, address, exc.code)) - except HttpDisconnectError as exc: - let address = connection.getRemoteAddress() - RequestFence.err(HttpProcessError.init( - HttpServerError.DisconnectError, exc, address, Http400)) - except CatchableError as exc: - let address = connection.getRemoteAddress() - RequestFence.err(HttpProcessError.init( - HttpServerError.CatchableError, exc, address, Http500)) + RequestFence.err( + HttpProcessError.init(HttpServerError.DisconnectError, address, Http400)) proc getConnectionFence*(server: HttpServerRef, transp: StreamTransport): Future[ConnectionFence] {. @@ -1026,16 +914,11 @@ proc getConnectionFence*(server: HttpServerRef, ConnectionFence.ok(res) except CancelledError: ConnectionFence.err(HttpProcessError.init(HttpServerError.InterruptError)) - except HttpCriticalError as exc: - # On error `transp` will be closed by `createConnCallback()` call. - let address = Opt.none(TransportAddress) - ConnectionFence.err(HttpProcessError.init( - HttpServerError.CriticalError, exc, address, exc.code)) - except CatchableError as exc: + except HttpConnectionError as exc: # On error `transp` will be closed by `createConnCallback()` call. let address = Opt.none(TransportAddress) ConnectionFence.err(HttpProcessError.init( - HttpServerError.CriticalError, exc, address, Http503)) + HttpServerError.DisconnectError, exc, address, Http400)) proc processRequest(server: HttpServerRef, connection: HttpConnectionRef, @@ -1045,30 +928,28 @@ proc processRequest(server: HttpServerRef, if requestFence.isErr(): case requestFence.error.kind of HttpServerError.InterruptError: + # Cancelled, exiting return HttpProcessExitType.Immediate of HttpServerError.DisconnectError: + # Remote peer disconnected if HttpServerFlags.NotifyDisconnect notin server.flags: return HttpProcessExitType.Immediate else: + # Request is incorrect or unsupported, sending notification discard - let responseFence = await getResponseFence(connection, requestFence) - if responseFence.isErr() and - (responseFence.error.kind == HttpServerError.InterruptError): + try: + let response = + try: + await connection.server.processCallback(requestFence) + except CancelledError: + # Cancelled, exiting + return HttpProcessExitType.Immediate + + await connection.sendDefaultResponse(requestFence, response) + finally: if requestFence.isOk(): await requestFence.get().closeWait() - return HttpProcessExitType.Immediate - - let res = - if responseFence.isErr(): - await connection.sendErrorResponse(requestFence, responseFence.error) - else: - await connection.sendDefaultResponse(requestFence, responseFence.get()) - - if requestFence.isOk(): - await requestFence.get().closeWait() - - res proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = let @@ -1077,10 +958,11 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = connectionId = holder.connectionId connection = block: - let res = await server.getConnectionFence(transp) + let res = await getConnectionFence(server, transp) if res.isErr(): if res.error.kind != HttpServerError.InterruptError: - discard await server.getResponseFence(res) + discard await noCancel( + server.processCallback(RequestFence.err(res.error))) server.connections.del(connectionId) return res.get() @@ -1089,13 +971,7 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = var runLoop = HttpProcessExitType.KeepAlive while runLoop == HttpProcessExitType.KeepAlive: - runLoop = - try: - await server.processRequest(connection, connectionId) - except CancelledError: - HttpProcessExitType.Immediate - except CatchableError as exc: - raiseAssert "Unexpected error [" & $exc.name & "] happens: " & $exc.msg + runLoop = await server.processRequest(connection, connectionId) case runLoop of HttpProcessExitType.KeepAlive: @@ -1104,7 +980,6 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = await connection.closeWait() of HttpProcessExitType.Graceful: await connection.gracefulCloseWait() - server.connections.del(connectionId) proc acceptClientLoop(server: HttpServerRef) {.async: (raises: []).} = @@ -1210,89 +1085,84 @@ proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] = err("Request's method do not supports multipart") proc post*(req: HttpRequestRef): Future[HttpTable] {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpTransportError, + HttpProtocolError]).} = ## Return POST parameters if req.postTable.isSome(): return req.postTable.get() - else: - if req.meth notin PostMethods: - return HttpTable.init() - if UrlencodedForm in req.requestFlags: - let queryFlags = - if QueryCommaSeparatedArray in req.connection.server.flags: - {QueryParamsFlag.CommaSeparatedArray} - else: - {} - var table = HttpTable.init() - # getBody() will handle `Expect`. - var body = await req.getBody() - # TODO (cheatfate) double copy here, because of `byte` to `char` - # conversion. - var strbody = newString(len(body)) - if len(body) > 0: - copyMem(addr strbody[0], addr body[0], len(body)) - for key, value in queryParams(strbody, queryFlags): - table.add(key, value) - req.postTable = Opt.some(table) - return table - elif MultipartForm in req.requestFlags: - var table = HttpTable.init() - let res = getMultipartReader(req) - if res.isErr(): - raiseHttpCriticalError("Unable to retrieve multipart form data") - var mpreader = res.get() + if req.meth notin PostMethods: + return HttpTable.init() - # We must handle `Expect` first. + if UrlencodedForm in req.requestFlags: + let queryFlags = + if QueryCommaSeparatedArray in req.connection.server.flags: + {QueryParamsFlag.CommaSeparatedArray} + else: + {} + var table = HttpTable.init() + # getBody() will handle `Expect`. + var body = await req.getBody() + # TODO (cheatfate) double copy here, because of `byte` to `char` + # conversion. + var strbody = newString(len(body)) + if len(body) > 0: + copyMem(addr strbody[0], addr body[0], len(body)) + for key, value in queryParams(strbody, queryFlags): + table.add(key, value) + req.postTable = Opt.some(table) + return table + elif MultipartForm in req.requestFlags: + var table = HttpTable.init() + let mpreader = getMultipartReader(req).valueOr: + raiseHttpProtocolError(Http400, + "Unable to retrieve multipart form data, reason: " & $error) + # Reading multipart/form-data parts. + var runLoop = true + while runLoop: + var part: MultiPart try: - await req.handleExpect() - except CancelledError as exc: + part = await mpreader.readPart() + var value = await part.getBody() + + # TODO (cheatfate) double copy here, because of `byte` to `char` + # conversion. + var strvalue = newString(len(value)) + if len(value) > 0: + copyMem(addr strvalue[0], addr value[0], len(value)) + table.add(part.name, strvalue) + await part.closeWait() + except MultipartEOMError: + runLoop = false + except HttpWriteError as exc: + if not(part.isEmpty()): + await part.closeWait() await mpreader.closeWait() raise exc - except HttpCriticalError as exc: + except HttpProtocolError as exc: + if not(part.isEmpty()): + await part.closeWait() await mpreader.closeWait() raise exc - - # Reading multipart/form-data parts. - var runLoop = true - while runLoop: - var part: MultiPart - try: - part = await mpreader.readPart() - var value = await part.getBody() - # TODO (cheatfate) double copy here, because of `byte` to `char` - # conversion. - var strvalue = newString(len(value)) - if len(value) > 0: - copyMem(addr strvalue[0], addr value[0], len(value)) - table.add(part.name, strvalue) + except CancelledError as exc: + if not(part.isEmpty()): await part.closeWait() - except MultipartEOMError: - runLoop = false - except HttpCriticalError as exc: - if not(part.isEmpty()): - await part.closeWait() - await mpreader.closeWait() - raise exc - except CancelledError as exc: - if not(part.isEmpty()): - await part.closeWait() - await mpreader.closeWait() - raise exc - await mpreader.closeWait() - req.postTable = Opt.some(table) - return table - else: - if HttpRequestFlags.BoundBody in req.requestFlags: - if req.contentLength != 0: - raiseHttpCriticalError("Unsupported request body") - return HttpTable.init() - elif HttpRequestFlags.UnboundBody in req.requestFlags: - raiseHttpCriticalError("Unsupported request body") + await mpreader.closeWait() + raise exc + await mpreader.closeWait() + req.postTable = Opt.some(table) + return table + else: + if HttpRequestFlags.BoundBody in req.requestFlags: + if req.contentLength != 0: + raiseHttpProtocolError(Http400, "Unsupported request body") + return HttpTable.init() + elif HttpRequestFlags.UnboundBody in req.requestFlags: + raiseHttpProtocolError(Http400, "Unsupported request body") proc setHeader*(resp: HttpResponseRef, key, value: string) = ## Sets value of header ``key`` to ``value``. - doAssert(resp.state == HttpResponseState.Empty) + doAssert(resp.getResponseState() == HttpResponseState.Empty) resp.headersTable.set(key, value) proc setHeaderDefault*(resp: HttpResponseRef, key, value: string) = @@ -1302,7 +1172,7 @@ proc setHeaderDefault*(resp: HttpResponseRef, key, value: string) = proc addHeader*(resp: HttpResponseRef, key, value: string) = ## Adds value ``value`` to header's ``key`` value. - doAssert(resp.state == HttpResponseState.Empty) + doAssert(resp.getResponseState() == HttpResponseState.Empty) resp.headersTable.add(key, value) proc getHeader*(resp: HttpResponseRef, key: string, @@ -1316,8 +1186,22 @@ proc hasHeader*(resp: HttpResponseRef, key: string): bool = key in resp.headersTable template checkPending(t: untyped) = - if t.state != HttpResponseState.Empty: - raiseHttpCriticalError("Response body was already sent") + let currentState = t.getResponseState() + doAssert(currentState == HttpResponseState.Empty, + "Response body was already sent [" & $currentState & "]") + +template checkStreamResponse(t: untyped) = + doAssert(HttpResponseFlags.Stream in t.flags, + "Response was not prepared") + +template checkStreamResponseState(t: untyped) = + doAssert(t.getResponseState() in + {HttpResponseState.Prepared, HttpResponseState.Sending}, + "Response is in the wrong state") + +template checkPointerLength(t1, t2: untyped) = + doAssert(not(isNil(t1)), "pbytes must not be nil") + doAssert(t2 >= 0, "nbytes should be bigger or equal to zero") func createHeaders(resp: HttpResponseRef): string = var answer = $(resp.version) & " " & $(resp.status) & "\r\n" @@ -1386,69 +1270,68 @@ proc preparePlainHeaders(resp: HttpResponseRef): string = resp.createHeaders() proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Send HTTP response at once by using bytes pointer ``pbytes`` and length ## ``nbytes``. - doAssert(not(isNil(pbytes)), "pbytes must not be nil") - doAssert(nbytes >= 0, "nbytes should be bigger or equal to zero") + checkPointerLength(pbytes, nbytes) checkPending(resp) let responseHeaders = resp.prepareLengthHeaders(nbytes) - resp.state = HttpResponseState.Prepared + resp.setResponseState(HttpResponseState.Prepared) try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.connection.writer.write(responseHeaders) if nbytes > 0: await resp.connection.writer.write(pbytes, nbytes) - resp.state = HttpResponseState.Finished + resp.setResponseState(HttpResponseState.Finished) except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc sendBody*(resp: HttpResponseRef, data: ByteChar) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Send HTTP response at once by using data ``data``. checkPending(resp) let responseHeaders = resp.prepareLengthHeaders(len(data)) - resp.state = HttpResponseState.Prepared + resp.setResponseState(HttpResponseState.Prepared) try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.connection.writer.write(responseHeaders) if len(data) > 0: await resp.connection.writer.write(data) - resp.state = HttpResponseState.Finished + resp.setResponseState(HttpResponseState.Finished) except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Send HTTP error status response. checkPending(resp) resp.status = code let responseHeaders = resp.prepareLengthHeaders(len(body)) - resp.state = HttpResponseState.Prepared + resp.setResponseState(HttpResponseState.Prepared) try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.connection.writer.write(responseHeaders) if len(body) > 0: await resp.connection.writer.write(body) - resp.state = HttpResponseState.Finished + resp.setResponseState(HttpResponseState.Finished) except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc prepare*(resp: HttpResponseRef, streamType = HttpResponseStreamType.Chunked) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Prepare for HTTP stream response. ## ## Such responses will be sent chunk by chunk using ``chunked`` encoding. @@ -1462,9 +1345,9 @@ proc prepare*(resp: HttpResponseRef, of HttpResponseStreamType.Chunked: resp.prepareChunkedHeaders() resp.streamType = streamType - resp.state = HttpResponseState.Prepared + resp.setResponseState(HttpResponseState.Prepared) try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.connection.writer.write(responseHeaders) case streamType of HttpResponseStreamType.Plain, HttpResponseStreamType.SSE: @@ -1473,117 +1356,105 @@ proc prepare*(resp: HttpResponseRef, resp.writer = newChunkedStreamWriter(resp.connection.writer) resp.flags.incl(HttpResponseFlags.Stream) except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc prepareChunked*(resp: HttpResponseRef): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Prepare for HTTP chunked stream response. ## ## Such responses will be sent chunk by chunk using ``chunked`` encoding. resp.prepare(HttpResponseStreamType.Chunked) proc preparePlain*(resp: HttpResponseRef): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Prepare for HTTP plain stream response. ## ## Such responses will be sent without any encoding. resp.prepare(HttpResponseStreamType.Plain) proc prepareSSE*(resp: HttpResponseRef): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Prepare for HTTP server-side event stream response. resp.prepare(HttpResponseStreamType.SSE) proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Send single chunk of data pointed by ``pbytes`` and ``nbytes``. - doAssert(not(isNil(pbytes)), "pbytes must not be nil") - doAssert(nbytes >= 0, "nbytes should be bigger or equal to zero") - if HttpResponseFlags.Stream notin resp.flags: - raiseHttpCriticalError("Response was not prepared") - if resp.state notin {HttpResponseState.Prepared, HttpResponseState.Sending}: - raiseHttpCriticalError("Response in incorrect state") + checkPointerLength(pbytes, nbytes) + resp.checkStreamResponse() + resp.checkStreamResponseState() try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.writer.write(pbytes, nbytes) - resp.state = HttpResponseState.Sending except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc send*(resp: HttpResponseRef, data: ByteChar) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Send single chunk of data ``data``. - if HttpResponseFlags.Stream notin resp.flags: - raiseHttpCriticalError("Response was not prepared") - if resp.state notin {HttpResponseState.Prepared, HttpResponseState.Sending}: - raiseHttpCriticalError("Response in incorrect state") + resp.checkStreamResponse() + resp.checkStreamResponseState() try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.writer.write(data) - resp.state = HttpResponseState.Sending except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc sendChunk*(resp: HttpResponseRef, pbytes: pointer, nbytes: int): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = resp.send(pbytes, nbytes) proc sendChunk*(resp: HttpResponseRef, data: ByteChar): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = resp.send(data) proc sendEvent*(resp: HttpResponseRef, eventName: string, data: string): Future[void] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Send server-side event with name ``eventName`` and payload ``data`` to ## remote peer. - let data = - block: - var res = "" - if len(eventName) > 0: - res.add("event: ") - res.add(eventName) - res.add("\r\n") - res.add("data: ") - res.add(data) - res.add("\r\n\r\n") - res - resp.send(data) + var res = "" + if len(eventName) > 0: + res.add("event: ") + res.add(eventName) + res.add("\r\n") + res.add("data: ") + res.add(data) + res.add("\r\n\r\n") + resp.send(res) proc finish*(resp: HttpResponseRef) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Sending last chunk of data, so it will indicate end of HTTP response. - if HttpResponseFlags.Stream notin resp.flags: - raiseHttpCriticalError("Response was not prepared") - if resp.state notin {HttpResponseState.Prepared, HttpResponseState.Sending}: - raiseHttpCriticalError("Response in incorrect state") + resp.checkStreamResponse() + resp.checkStreamResponseState() try: - resp.state = HttpResponseState.Sending + resp.setResponseState(HttpResponseState.Sending) await resp.writer.finish() - resp.state = HttpResponseState.Finished + resp.setResponseState(HttpResponseState.Finished) except CancelledError as exc: - resp.state = HttpResponseState.Cancelled + resp.setResponseState(HttpResponseState.Cancelled) raise exc except AsyncStreamError as exc: - resp.state = HttpResponseState.Failed - raiseHttpCriticalError("Unable to send response, reason: " & $exc.msg) + resp.setResponseState(HttpResponseState.Failed) + raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, headers: HttpTable): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with the specified ``HttpCode``, HTTP ``headers`` ## and ``content``. let response = req.getResponse() @@ -1595,18 +1466,18 @@ proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with specified ``HttpCode`` and ``content``. respond(req, code, content, HttpTable.init()) proc respond*(req: HttpRequestRef, code: HttpCode): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with specified ``HttpCode`` only. respond(req, code, "", HttpTable.init()) proc redirect*(req: HttpRequestRef, code: HttpCode, location: string, headers: HttpTable): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with redirection to location ``location`` and ## additional headers ``headers``. ## @@ -1618,7 +1489,7 @@ proc redirect*(req: HttpRequestRef, code: HttpCode, proc redirect*(req: HttpRequestRef, code: HttpCode, location: Uri, headers: HttpTable): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with redirection to location ``location`` and ## additional headers ``headers``. ## @@ -1628,13 +1499,13 @@ proc redirect*(req: HttpRequestRef, code: HttpCode, proc redirect*(req: HttpRequestRef, code: HttpCode, location: Uri): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with redirection to location ``location``. redirect(req, code, location, HttpTable.init()) proc redirect*(req: HttpRequestRef, code: HttpCode, location: string): Future[HttpResponseRef] {. - async: (raw: true, raises: [CancelledError, HttpCriticalError]).} = + async: (raw: true, raises: [CancelledError, HttpWriteError]).} = ## Responds to the request with redirection to location ``location``. redirect(req, code, location, HttpTable.init()) diff --git a/chronos/apps/http/multipart.nim b/chronos/apps/http/multipart.nim index 83a4b566e..302d6efd9 100644 --- a/chronos/apps/http/multipart.nim +++ b/chronos/apps/http/multipart.nim @@ -18,7 +18,8 @@ import "."/[httptable, httpcommon, httpbodyrw] export asyncloop, httptable, httpcommon, httpbodyrw, asyncstream, httputils const - UnableToReadMultipartBody = "Unable to read multipart message body" + UnableToReadMultipartBody = "Unable to read multipart message body, reason: " + UnableToSendMultipartMessage = "Unable to send multipart message, reason: " type MultiPartSource* {.pure.} = enum @@ -69,7 +70,7 @@ type name*: string filename*: string - MultipartError* = object of HttpCriticalError + MultipartError* = object of HttpProtocolError MultipartEOMError* = object of MultipartError BChar* = byte | char @@ -105,7 +106,7 @@ func setPartNames(part: var MultiPart): HttpResult[void] = return err("Content-Disposition header value is incorrect") let dtype = disp.dispositionType(header.toOpenArrayByte(0, len(header) - 1)) if dtype.toLowerAscii() != "form-data": - return err("Content-Disposition type is incorrect") + return err("Content-Disposition header type is incorrect") for k, v in disp.fields(header.toOpenArrayByte(0, len(header) - 1)): case k.toLowerAscii() of "name": @@ -171,8 +172,17 @@ proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef], stream: stream, offset: 0, boundary: fboundary, buffer: newSeq[byte](partHeadersMaxSize)) +template handleAsyncStreamReaderError(targ, excarg: untyped) = + if targ.hasOverflow(): + raiseHttpRequestBodyTooLargeError() + raiseHttpReadError(UnableToReadMultipartBody & $excarg.msg) + +template handleAsyncStreamWriterError(targ, excarg: untyped) = + targ.state = MultiPartWriterState.MessageFailure + raiseHttpWriteError(UnableToSendMultipartMessage & $excarg.msg) + proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpReadError, HttpProtocolError]).} = doAssert(mpr.kind == MultiPartSource.Stream) if mpr.firstTime: try: @@ -181,14 +191,11 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {. mpr.firstTime = false if not(startsWith(mpr.buffer.toOpenArray(0, len(mpr.boundary) - 3), mpr.boundary.toOpenArray(2, len(mpr.boundary) - 1))): - raiseHttpCriticalError("Unexpected boundary encountered") + raiseHttpProtocolError(Http400, "Unexpected boundary encountered") except CancelledError as exc: raise exc - except AsyncStreamError: - if mpr.stream.hasOverflow(): - raiseHttpCriticalError(MaximumBodySizeError, Http413) - else: - raiseHttpCriticalError(UnableToReadMultipartBody) + except AsyncStreamError as exc: + handleAsyncStreamReaderError(mpr.stream, exc) # Reading part's headers try: @@ -202,9 +209,9 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {. raise newException(MultipartEOMError, "End of multipart message") else: - raiseHttpCriticalError("Incorrect multipart header found") + raiseHttpProtocolError(Http400, "Incorrect multipart header found") if mpr.buffer[0] != 0x0D'u8 or mpr.buffer[1] != 0x0A'u8: - raiseHttpCriticalError("Incorrect multipart boundary found") + raiseHttpProtocolError(Http400, "Incorrect multipart boundary found") # If two bytes are CRLF we are at the part beginning. # Reading part's headers @@ -212,7 +219,7 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {. HeadersMark) var headersList = parseHeaders(mpr.buffer.toOpenArray(0, res - 1), false) if headersList.failed(): - raiseHttpCriticalError("Incorrect multipart's headers found") + raiseHttpProtocolError(Http400, "Incorrect multipart's headers found") inc(mpr.counter) var part = MultiPart( @@ -228,45 +235,35 @@ proc readPart*(mpr: MultiPartReaderRef): Future[MultiPart] {. let sres = part.setPartNames() if sres.isErr(): - raiseHttpCriticalError($sres.error) + raiseHttpProtocolError(Http400, $sres.error) return part except CancelledError as exc: raise exc - except AsyncStreamError: - if mpr.stream.hasOverflow(): - raiseHttpCriticalError(MaximumBodySizeError, Http413) - else: - raiseHttpCriticalError(UnableToReadMultipartBody) + except AsyncStreamError as exc: + handleAsyncStreamReaderError(mpr.stream, exc) proc getBody*(mp: MultiPart): Future[seq[byte]] {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpReadError, HttpProtocolError]).} = ## Get multipart's ``mp`` value as sequence of bytes. case mp.kind of MultiPartSource.Stream: try: - let res = await mp.stream.read() - return res - except AsyncStreamError: - if mp.breader.hasOverflow(): - raiseHttpCriticalError(MaximumBodySizeError, Http413) - else: - raiseHttpCriticalError(UnableToReadMultipartBody) + await mp.stream.read() + except AsyncStreamError as exc: + handleAsyncStreamReaderError(mp.breader, exc) of MultiPartSource.Buffer: - return mp.buffer + mp.buffer proc consumeBody*(mp: MultiPart) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpReadError, HttpProtocolError]).} = ## Discard multipart's ``mp`` value. case mp.kind of MultiPartSource.Stream: try: discard await mp.stream.consume() - except AsyncStreamError: - if mp.breader.hasOverflow(): - raiseHttpCriticalError(MaximumBodySizeError, Http413) - else: - raiseHttpCriticalError(UnableToReadMultipartBody) + except AsyncStreamError as exc: + handleAsyncStreamReaderError(mp.breader, exc) of MultiPartSource.Buffer: discard @@ -533,7 +530,7 @@ proc new*[B: BChar](mpt: typedesc[MultiPartWriterRef], proc prepareHeaders(partMark: openArray[byte], name: string, filename: string, headers: HttpTable): string = - const ContentDisposition = "Content-Disposition" + const ContentDispositionHeader = "Content-Disposition" let qname = block: let res = quoteCheck(name) @@ -546,10 +543,10 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string, res.get() var buffer = newString(len(partMark)) copyMem(addr buffer[0], unsafeAddr partMark[0], len(partMark)) - buffer.add(ContentDisposition) + buffer.add(ContentDispositionHeader) buffer.add(": ") - if ContentDisposition in headers: - buffer.add(headers.getString(ContentDisposition)) + if ContentDispositionHeader in headers: + buffer.add(headers.getString(ContentDispositionHeader)) buffer.add("\r\n") else: buffer.add("form-data; name=\"") @@ -562,7 +559,7 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string, buffer.add("\r\n") for k, v in headers.stringItems(): - if k != toLowerAscii(ContentDisposition): + if k != ContentDispositionHeader: if len(v) > 0: buffer.add(k) buffer.add(": ") @@ -572,7 +569,7 @@ proc prepareHeaders(partMark: openArray[byte], name: string, filename: string, buffer proc begin*(mpw: MultiPartWriterRef) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Starts multipart message form and write approprate markers to output ## stream. doAssert(mpw.kind == MultiPartSource.Stream) @@ -580,10 +577,9 @@ proc begin*(mpw: MultiPartWriterRef) {. # write "--" try: await mpw.stream.write(mpw.beginMark) - except AsyncStreamError: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to start multipart message") - mpw.state = MultiPartWriterState.MessageStarted + mpw.state = MultiPartWriterState.MessageStarted + except AsyncStreamError as exc: + handleAsyncStreamWriterError(mpw, exc) proc begin*(mpw: var MultiPartWriter) = ## Starts multipart message form and write approprate markers to output @@ -596,7 +592,7 @@ proc begin*(mpw: var MultiPartWriter) = proc beginPart*(mpw: MultiPartWriterRef, name: string, filename: string, headers: HttpTable) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Starts part of multipart message and write appropriate ``headers`` to the ## output stream. ## @@ -611,9 +607,8 @@ proc beginPart*(mpw: MultiPartWriterRef, name: string, try: await mpw.stream.write(buffer) mpw.state = MultiPartWriterState.PartStarted - except AsyncStreamError: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError("Unable to start multipart part") + except AsyncStreamError as exc: + handleAsyncStreamWriterError(mpw, exc) proc beginPart*(mpw: var MultiPartWriter, name: string, filename: string, headers: HttpTable) = @@ -632,7 +627,7 @@ proc beginPart*(mpw: var MultiPartWriter, name: string, mpw.state = MultiPartWriterState.PartStarted proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Write part's data ``data`` to the output stream. doAssert(mpw.kind == MultiPartSource.Stream) doAssert(mpw.state == MultiPartWriterState.PartStarted) @@ -640,12 +635,10 @@ proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {. # write of data await mpw.stream.write(pbytes, nbytes) except AsyncStreamError as exc: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError( - "Unable to write multipart data, reason: " & $exc.msg) + handleAsyncStreamWriterError(mpw, exc) proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Write part's data ``data`` to the output stream. doAssert(mpw.kind == MultiPartSource.Stream) doAssert(mpw.state == MultiPartWriterState.PartStarted) @@ -653,12 +646,10 @@ proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {. # write of data await mpw.stream.write(data) except AsyncStreamError as exc: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError( - "Unable to write multipart data, reason: " & $exc.msg) + handleAsyncStreamWriterError(mpw, exc) proc write*(mpw: MultiPartWriterRef, data: string) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Write part's data ``data`` to the output stream. doAssert(mpw.kind == MultiPartSource.Stream) doAssert(mpw.state == MultiPartWriterState.PartStarted) @@ -666,9 +657,7 @@ proc write*(mpw: MultiPartWriterRef, data: string) {. # write of data await mpw.stream.write(data) except AsyncStreamError as exc: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError( - "Unable to write multipart data, reason: " & $exc.msg) + handleAsyncStreamWriterError(mpw, exc) proc write*(mpw: var MultiPartWriter, pbytes: pointer, nbytes: int) = ## Write part's data ``data`` to the output stream. @@ -692,7 +681,7 @@ proc write*(mpw: var MultiPartWriter, data: openArray[char]) = mpw.buffer.add(data.toOpenArrayByte(0, len(data) - 1)) proc finishPart*(mpw: MultiPartWriterRef) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Finish multipart's message part and send proper markers to output stream. doAssert(mpw.state == MultiPartWriterState.PartStarted) try: @@ -700,9 +689,7 @@ proc finishPart*(mpw: MultiPartWriterRef) {. await mpw.stream.write(mpw.finishPartMark) mpw.state = MultiPartWriterState.PartFinished except AsyncStreamError as exc: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError( - "Unable to finish multipart message part, reason: " & $exc.msg) + handleAsyncStreamWriterError(mpw, exc) proc finishPart*(mpw: var MultiPartWriter) = ## Finish multipart's message part and send proper markers to output stream. @@ -713,7 +700,7 @@ proc finishPart*(mpw: var MultiPartWriter) = mpw.state = MultiPartWriterState.PartFinished proc finish*(mpw: MultiPartWriterRef) {. - async: (raises: [CancelledError, HttpCriticalError]).} = + async: (raises: [CancelledError, HttpWriteError]).} = ## Finish multipart's message form and send finishing markers to the output ## stream. doAssert(mpw.kind == MultiPartSource.Stream) @@ -723,9 +710,7 @@ proc finish*(mpw: MultiPartWriterRef) {. await mpw.stream.write(mpw.finishMark) mpw.state = MultiPartWriterState.MessageFinished except AsyncStreamError as exc: - mpw.state = MultiPartWriterState.MessageFailure - raiseHttpCriticalError( - "Unable to finish multipart message, reason: " & $exc.msg) + handleAsyncStreamWriterError(mpw, exc) proc finish*(mpw: var MultiPartWriter): seq[byte] = ## Finish multipart's message form and send finishing markers to the output diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index f7e377f93..6272bb2b5 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -164,22 +164,21 @@ proc new*(htype: typedesc[SecureHttpServerRef], maxRequestBodySize: int = 1_048_576, dualstack = DualStackType.Auto ): HttpResult[SecureHttpServerRef] {. - deprecated: "raises missing from process callback".} = - proc processCallback2(req: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = - try: - await processCallback(req) - except CancelledError as exc: - raise exc - except HttpResponseError as exc: - raise exc - except CatchableError as exc: - # Emulate 3.x behavior - raise (ref HttpCriticalError)(msg: exc.msg, code: Http503) + deprecated: "Callback could raise only CancelledError, annotate with " & + "{.async: (raises: [CancelledError]).}".} = + + proc wrap(req: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + try: + await processCallback(req) + except CancelledError as exc: + raise exc + except CatchableError as exc: + defaultResponse(exc) SecureHttpServerRef.new( address = address, - processCallback = processCallback2, + processCallback = wrap, tlsPrivateKey = tlsPrivateKey, tlsCertificate = tlsCertificate, serverFlags = serverFlags, @@ -194,4 +193,4 @@ proc new*(htype: typedesc[SecureHttpServerRef], maxHeadersSize = maxHeadersSize, maxRequestBodySize = maxRequestBodySize, dualstack = dualstack - ) \ No newline at end of file + ) diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index d2a355d8c..967f896be 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -85,7 +85,8 @@ suite "HTTP client testing suite": res proc createServer(address: TransportAddress, - process: HttpProcessCallback2, secure: bool): HttpServerRef = + process: HttpProcessCallback2, + secure: bool): HttpServerRef = let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} serverFlags = {HttpServerFlags.Http11Pipeline} @@ -128,18 +129,24 @@ suite "HTTP client testing suite": (MethodPatch, "/test/patch") ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() case request.uri.path of "/test/get", "/test/post", "/test/head", "/test/put", "/test/delete", "/test/trace", "/test/options", "/test/connect", "/test/patch", "/test/error": - return await request.respond(Http200, request.uri.path) + try: + await request.respond(Http200, request.uri.path) + except HttpWriteError as exc: + defaultResponse(exc) else: - return await request.respond(Http404, "Page not found") + try: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -195,7 +202,7 @@ suite "HTTP client testing suite": "LONGCHUNKRESPONSE") ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() case request.uri.path @@ -203,46 +210,58 @@ suite "HTTP client testing suite": var response = request.getResponse() var data = createBigMessage(ResponseTests[0][4], ResponseTests[0][2]) response.status = Http200 - await response.sendBody(data) - return response + try: + await response.sendBody(data) + except HttpWriteError as exc: + return defaultResponse(exc) + response of "/test/long_size_response": var response = request.getResponse() var data = createBigMessage(ResponseTests[1][4], ResponseTests[1][2]) response.status = Http200 - await response.sendBody(data) - return response + try: + await response.sendBody(data) + except HttpWriteError as exc: + return defaultResponse(exc) + response of "/test/short_chunked_response": var response = request.getResponse() var data = createBigMessage(ResponseTests[2][4], ResponseTests[2][2]) response.status = Http200 - await response.prepare() - var offset = 0 - while true: - if len(data) == offset: - break - let toWrite = min(1024, len(data) - offset) - await response.sendChunk(addr data[offset], toWrite) - offset = offset + toWrite - await response.finish() - return response + try: + await response.prepare() + var offset = 0 + while true: + if len(data) == offset: + break + let toWrite = min(1024, len(data) - offset) + await response.sendChunk(addr data[offset], toWrite) + offset = offset + toWrite + await response.finish() + except HttpWriteError as exc: + return defaultResponse(exc) + response of "/test/long_chunked_response": var response = request.getResponse() var data = createBigMessage(ResponseTests[3][4], ResponseTests[3][2]) response.status = Http200 - await response.prepare() - var offset = 0 - while true: - if len(data) == offset: - break - let toWrite = min(1024, len(data) - offset) - await response.sendChunk(addr data[offset], toWrite) - offset = offset + toWrite - await response.finish() - return response + try: + await response.prepare() + var offset = 0 + while true: + if len(data) == offset: + break + let toWrite = min(1024, len(data) - offset) + await response.sendChunk(addr data[offset], toWrite) + offset = offset + toWrite + await response.finish() + except HttpWriteError as exc: + return defaultResponse(exc) + response else: - return await request.respond(Http404, "Page not found") + defaultResponse() else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -311,21 +330,26 @@ suite "HTTP client testing suite": (MethodPost, "/test/big_request", 262400) ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() case request.uri.path of "/test/big_request": - if request.hasBody(): - let body = await request.getBody() - let digest = $secureHash(string.fromBytes(body)) - return await request.respond(Http200, digest) - else: - return await request.respond(Http400, "Missing content body") + try: + if request.hasBody(): + let body = await request.getBody() + let digest = $secureHash(string.fromBytes(body)) + await request.respond(Http200, digest) + else: + await request.respond(Http400, "Missing content body") + except HttpProtocolError as exc: + defaultResponse(exc) + except HttpTransportError as exc: + defaultResponse(exc) else: - return await request.respond(Http404, "Page not found") + defaultResponse() else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -381,21 +405,27 @@ suite "HTTP client testing suite": (MethodPost, "/test/big_chunk_request", 262400) ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() case request.uri.path of "/test/big_chunk_request": - if request.hasBody(): - let body = await request.getBody() - let digest = $secureHash(string.fromBytes(body)) - return await request.respond(Http200, digest) - else: - return await request.respond(Http400, "Missing content body") + try: + if request.hasBody(): + let + body = await request.getBody() + digest = $secureHash(string.fromBytes(body)) + await request.respond(Http200, digest) + else: + await request.respond(Http400, "Missing content body") + except HttpProtocolError as exc: + defaultResponse(exc) + except HttpTransportError as exc: + defaultResponse(exc) else: - return await request.respond(Http404, "Page not found") + defaultResponse() else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -455,23 +485,28 @@ suite "HTTP client testing suite": ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() case request.uri.path of "/test/post/urlencoded_size", "/test/post/urlencoded_chunked": - if request.hasBody(): - var postTable = await request.post() - let body = postTable.getString("field1") & ":" & - postTable.getString("field2") & ":" & - postTable.getString("field3") - return await request.respond(Http200, body) - else: - return await request.respond(Http400, "Missing content body") + try: + if request.hasBody(): + var postTable = await request.post() + let body = postTable.getString("field1") & ":" & + postTable.getString("field2") & ":" & + postTable.getString("field3") + await request.respond(Http200, body) + else: + await request.respond(Http400, "Missing content body") + except HttpTransportError as exc: + defaultResponse(exc) + except HttpProtocolError as exc: + defaultResponse(exc) else: - return await request.respond(Http404, "Page not found") + defaultResponse() else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -554,23 +589,28 @@ suite "HTTP client testing suite": ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() case request.uri.path of "/test/post/multipart_size", "/test/post/multipart_chunked": - if request.hasBody(): - var postTable = await request.post() - let body = postTable.getString("field1") & ":" & - postTable.getString("field2") & ":" & - postTable.getString("field3") - return await request.respond(Http200, body) - else: - return await request.respond(Http400, "Missing content body") + try: + if request.hasBody(): + var postTable = await request.post() + let body = postTable.getString("field1") & ":" & + postTable.getString("field2") & ":" & + postTable.getString("field3") + await request.respond(Http200, body) + else: + await request.respond(Http400, "Missing content body") + except HttpProtocolError as exc: + defaultResponse(exc) + except HttpTransportError as exc: + defaultResponse(exc) else: - return await request.respond(Http404, "Page not found") + defaultResponse() else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -649,26 +689,29 @@ suite "HTTP client testing suite": var lastAddress: Uri proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - case request.uri.path - of "/": - return await request.redirect(Http302, "/redirect/1") - of "/redirect/1": - return await request.redirect(Http302, "/next/redirect/2") - of "/next/redirect/2": - return await request.redirect(Http302, "redirect/3") - of "/next/redirect/redirect/3": - return await request.redirect(Http302, "next/redirect/4") - of "/next/redirect/redirect/next/redirect/4": - return await request.redirect(Http302, lastAddress) - of "/final/5": - return await request.respond(Http200, "ok-5") - else: - return await request.respond(Http404, "Page not found") + try: + case request.uri.path + of "/": + await request.redirect(Http302, "/redirect/1") + of "/redirect/1": + await request.redirect(Http302, "/next/redirect/2") + of "/next/redirect/2": + await request.redirect(Http302, "redirect/3") + of "/next/redirect/redirect/3": + await request.redirect(Http302, "next/redirect/4") + of "/next/redirect/redirect/next/redirect/4": + await request.redirect(Http302, lastAddress) + of "/final/5": + await request.respond(Http200, "ok-5") + else: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -706,8 +749,8 @@ suite "HTTP client testing suite": proc testSendCancelLeaksTest(secure: bool): Future[bool] {.async.} = proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = - return defaultResponse() + async: (raises: [CancelledError]).} = + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -756,8 +799,8 @@ suite "HTTP client testing suite": proc testOpenCancelLeaksTest(secure: bool): Future[bool] {.async.} = proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = - return defaultResponse() + async: (raises: [CancelledError]).} = + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() @@ -868,20 +911,23 @@ suite "HTTP client testing suite": (data2.status, data2.data.bytesToString(), count)] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - case request.uri.path - of "/keep": - let headers = HttpTable.init([("connection", "keep-alive")]) - return await request.respond(Http200, "ok", headers = headers) - of "/drop": - let headers = HttpTable.init([("connection", "close")]) - return await request.respond(Http200, "ok", headers = headers) - else: - return await request.respond(Http404, "Page not found") + try: + case request.uri.path + of "/keep": + let headers = HttpTable.init([("connection", "keep-alive")]) + await request.respond(Http200, "ok", headers = headers) + of "/drop": + let headers = HttpTable.init([("connection", "close")]) + await request.respond(Http200, "ok", headers = headers) + else: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() @@ -1004,16 +1050,19 @@ suite "HTTP client testing suite": return (data.status, data.data.bytesToString(), 0) proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - case request.uri.path - of "/test": - return await request.respond(Http200, "ok") - else: - return await request.respond(Http404, "Page not found") + try: + case request.uri.path + of "/test": + await request.respond(Http200, "ok") + else: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() @@ -1064,19 +1113,22 @@ suite "HTTP client testing suite": return (data.status, data.data.bytesToString(), 0) proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - case request.uri.path - of "/test": - return await request.respond(Http200, "ok") - of "/keep-test": - let headers = HttpTable.init([("Connection", "keep-alive")]) - return await request.respond(Http200, "not-alive", headers) - else: - return await request.respond(Http404, "Page not found") + try: + case request.uri.path + of "/test": + await request.respond(Http200, "ok") + of "/keep-test": + let headers = HttpTable.init([("Connection", "keep-alive")]) + await request.respond(Http200, "not-alive", headers) + else: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, false) server.start() @@ -1180,58 +1232,61 @@ suite "HTTP client testing suite": true proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - if request.uri.path.startsWith("/test/single/"): - let index = - block: - var res = -1 - for index, value in SingleGoodTests.pairs(): - if value[0] == request.uri.path: - res = index - break - res - if index < 0: - return await request.respond(Http404, "Page not found") - var response = request.getResponse() - response.status = Http200 - await response.sendBody(SingleGoodTests[index][1]) - return response - elif request.uri.path.startsWith("/test/multiple/"): - let index = - block: - var res = -1 - for index, value in MultipleGoodTests.pairs(): - if value[0] == request.uri.path: - res = index - break - res - if index < 0: - return await request.respond(Http404, "Page not found") - var response = request.getResponse() - response.status = Http200 - await response.sendBody(MultipleGoodTests[index][1]) - return response - elif request.uri.path.startsWith("/test/overflow/"): - let index = - block: - var res = -1 - for index, value in OverflowTests.pairs(): - if value[0] == request.uri.path: - res = index - break - res - if index < 0: - return await request.respond(Http404, "Page not found") - var response = request.getResponse() - response.status = Http200 - await response.sendBody(OverflowTests[index][1]) - return response - else: - return await request.respond(Http404, "Page not found") + try: + if request.uri.path.startsWith("/test/single/"): + let index = + block: + var res = -1 + for index, value in SingleGoodTests.pairs(): + if value[0] == request.uri.path: + res = index + break + res + if index < 0: + return await request.respond(Http404, "Page not found") + var response = request.getResponse() + response.status = Http200 + await response.sendBody(SingleGoodTests[index][1]) + response + elif request.uri.path.startsWith("/test/multiple/"): + let index = + block: + var res = -1 + for index, value in MultipleGoodTests.pairs(): + if value[0] == request.uri.path: + res = index + break + res + if index < 0: + return await request.respond(Http404, "Page not found") + var response = request.getResponse() + response.status = Http200 + await response.sendBody(MultipleGoodTests[index][1]) + response + elif request.uri.path.startsWith("/test/overflow/"): + let index = + block: + var res = -1 + for index, value in OverflowTests.pairs(): + if value[0] == request.uri.path: + res = index + break + res + if index < 0: + return await request.respond(Http404, "Page not found") + var response = request.getResponse() + response.status = Http200 + await response.sendBody(OverflowTests[index][1]) + response + else: + defaultResponse() + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() var server = createServer(initTAddress("127.0.0.1:0"), process, secure) server.start() diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 33d5ea160..0183f1bfe 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -64,7 +64,7 @@ suite "HTTP server testing suite": proc testTooBigBodyChunked(operation: TooBigTest): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() try: @@ -77,13 +77,15 @@ suite "HTTP server testing suite": let ptable {.used.} = await request.post() of PostMultipartTest: let ptable {.used.} = await request.post() - except HttpCriticalError as exc: + defaultResponse() + except HttpTransportError as exc: + defaultResponse(exc) + except HttpProtocolError as exc: if exc.code == Http413: serverRes = true - # Reraising exception, because processor should properly handle it. - raise exc + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -128,14 +130,17 @@ suite "HTTP server testing suite": proc testTimeout(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - return await request.respond(Http200, "TEST_OK", HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK", HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: if r.error.kind == HttpServerError.TimeoutError: serverRes = true - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), @@ -158,14 +163,17 @@ suite "HTTP server testing suite": proc testEmpty(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - return await request.respond(Http200, "TEST_OK", HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK", HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: - if r.error.kind == HttpServerError.CriticalError: + if r.error.kind == HttpServerError.ProtocolError: serverRes = true - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), @@ -188,14 +196,17 @@ suite "HTTP server testing suite": proc testTooBig(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - return await request.respond(Http200, "TEST_OK", HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK", HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: - if r.error.error == HttpServerError.CriticalError: + if r.error.error == HttpServerError.ProtocolError: serverRes = true - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -219,13 +230,11 @@ suite "HTTP server testing suite": proc testTooBigBody(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = - if r.isOk(): - discard - else: - if r.error.error == HttpServerError.CriticalError: + async: (raises: [CancelledError]).} = + if r.isErr(): + if r.error.error == HttpServerError.ProtocolError: serverRes = true - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -266,7 +275,7 @@ suite "HTTP server testing suite": proc testQuery(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() var kres = newSeq[string]() @@ -274,11 +283,14 @@ suite "HTTP server testing suite": kres.add(k & ":" & v) sort(kres) serverRes = true - return await request.respond(Http200, "TEST_OK:" & kres.join(":"), - HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK:" & kres.join(":"), + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -296,10 +308,9 @@ suite "HTTP server testing suite": "GET /?a=%D0%9F&%D0%A4=%D0%91&b=%D0%A6&c=%D0%AE HTTP/1.0\r\n\r\n") await server.stop() await server.closeWait() - let r = serverRes and - (data1.find("TEST_OK:a:1:a:2:b:3:c:4") >= 0) and - (data2.find("TEST_OK:a:П:b:Ц:c:Ю:Ф:Б") >= 0) - return r + serverRes and + (data1.find("TEST_OK:a:1:a:2:b:3:c:4") >= 0) and + (data2.find("TEST_OK:a:П:b:Ц:c:Ю:Ф:Б") >= 0) check waitFor(testQuery()) == true @@ -307,7 +318,7 @@ suite "HTTP server testing suite": proc testHeaders(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() var kres = newSeq[string]() @@ -315,11 +326,14 @@ suite "HTTP server testing suite": kres.add(k & ":" & v) sort(kres) serverRes = true - return await request.respond(Http200, "TEST_OK:" & kres.join(":"), - HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK:" & kres.join(":"), + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -351,21 +365,30 @@ suite "HTTP server testing suite": proc testPostUrl(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() if request.meth in PostMethods: - let post = await request.post() + let post = + try: + await request.post() + except HttpProtocolError as exc: + return defaultResponse(exc) + except HttpTransportError as exc: + return defaultResponse(exc) for k, v in post.stringItems(): kres.add(k & ":" & v) sort(kres) - serverRes = true - return await request.respond(Http200, "TEST_OK:" & kres.join(":"), - HttpTable.init()) + serverRes = true + try: + await request.respond(Http200, "TEST_OK:" & kres.join(":"), + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -395,21 +418,30 @@ suite "HTTP server testing suite": proc testPostUrl2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() if request.meth in PostMethods: - let post = await request.post() + let post = + try: + await request.post() + except HttpProtocolError as exc: + return defaultResponse(exc) + except HttpTransportError as exc: + return defaultResponse(exc) for k, v in post.stringItems(): kres.add(k & ":" & v) sort(kres) - serverRes = true - return await request.respond(Http200, "TEST_OK:" & kres.join(":"), - HttpTable.init()) + serverRes = true + try: + await request.respond(Http200, "TEST_OK:" & kres.join(":"), + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -440,21 +472,30 @@ suite "HTTP server testing suite": proc testPostMultipart(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() if request.meth in PostMethods: - let post = await request.post() + let post = + try: + await request.post() + except HttpProtocolError as exc: + return defaultResponse(exc) + except HttpTransportError as exc: + return defaultResponse(exc) for k, v in post.stringItems(): kres.add(k & ":" & v) sort(kres) - serverRes = true - return await request.respond(Http200, "TEST_OK:" & kres.join(":"), - HttpTable.init()) + serverRes = true + try: + await request.respond(Http200, "TEST_OK:" & kres.join(":"), + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -496,21 +537,31 @@ suite "HTTP server testing suite": proc testPostMultipart2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): var kres = newSeq[string]() let request = r.get() if request.meth in PostMethods: - let post = await request.post() + let post = + try: + await request.post() + except HttpProtocolError as exc: + return defaultResponse(exc) + except HttpTransportError as exc: + return defaultResponse(exc) for k, v in post.stringItems(): kres.add(k & ":" & v) sort(kres) serverRes = true - return await request.respond(Http200, "TEST_OK:" & kres.join(":"), - HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK:" & kres.join(":"), + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -566,16 +617,19 @@ suite "HTTP server testing suite": var count = 0 proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() inc(count) if count == ClientsCount: eventWait.fire() await eventContinue.wait() - return await request.respond(Http404, "", HttpTable.init()) + try: + await request.respond(Http404, "", HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -1230,23 +1284,26 @@ suite "HTTP server testing suite": proc testPostMultipart2(): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() let response = request.getResponse() - await response.prepareSSE() - await response.send("event: event1\r\ndata: data1\r\n\r\n") - await response.send("event: event2\r\ndata: data2\r\n\r\n") - await response.sendEvent("event3", "data3") - await response.sendEvent("event4", "data4") - await response.send("data: data5\r\n\r\n") - await response.sendEvent("", "data6") - await response.finish() - serverRes = true - return response + try: + await response.prepareSSE() + await response.send("event: event1\r\ndata: data1\r\n\r\n") + await response.send("event: event2\r\ndata: data2\r\n\r\n") + await response.sendEvent("event3", "data3") + await response.sendEvent("event4", "data4") + await response.send("data: data5\r\n\r\n") + await response.sendEvent("", "data6") + await response.finish() + serverRes = true + response + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, @@ -1306,12 +1363,15 @@ suite "HTTP server testing suite": ] proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - return await request.respond(Http200, "TEST_OK", HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK", HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() for test in TestMessages: let @@ -1360,12 +1420,15 @@ suite "HTTP server testing suite": TestRequest = "GET /httpdebug HTTP/1.1\r\nConnection: keep-alive\r\n\r\n" proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - return await request.respond(Http200, "TEST_OK", HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK", HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: - return defaultResponse() + defaultResponse() proc client(address: TransportAddress, data: string): Future[StreamTransport] {.async.} = diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index 3ff2565a0..18e84a972 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -108,15 +108,18 @@ suite "Secure HTTP server testing suite": proc testHTTPS(address: TransportAddress): Future[bool] {.async.} = var serverRes = false proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() serverRes = true - return await request.respond(Http200, "TEST_OK:" & $request.meth, - HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK:" & $request.meth, + HttpTable.init()) + except HttpWriteError as exc: + serverRes = false + defaultResponse(exc) else: - serverRes = false - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let serverFlags = {Secure} @@ -146,16 +149,18 @@ suite "Secure HTTP server testing suite": var serverRes = false var testFut = newFuture[void]() proc process(r: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError, HttpResponseError]).} = + async: (raises: [CancelledError]).} = if r.isOk(): let request = r.get() - serverRes = false - return await request.respond(Http200, "TEST_OK:" & $request.meth, - HttpTable.init()) + try: + await request.respond(Http200, "TEST_OK:" & $request.meth, + HttpTable.init()) + except HttpWriteError as exc: + defaultResponse(exc) else: serverRes = true testFut.complete() - return defaultResponse() + defaultResponse() let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} let serverFlags = {Secure} From 1598471ed23a6e1aa60cca7601df1f3429dee223 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 21 Dec 2023 15:52:16 +0100 Subject: [PATCH 097/146] add a test for `results.?` compatibility (#484) Finally! (haha) --- tests/testmacro.nim | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 013379379..9b19c6891 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -555,3 +555,27 @@ suite "Exceptions tracking": await raiseException() waitFor(callCatchAll()) + + test "Results compatibility": + proc returnOk(): Future[Result[int, string]] {.async: (raises: []).} = + ok(42) + + proc returnErr(): Future[Result[int, string]] {.async: (raises: []).} = + err("failed") + + proc testit(): Future[Result[void, string]] {.async: (raises: []).} = + let + v = await returnOk() + + check: + v.isOk() and v.value() == 42 + + let + vok = ?v + check: + vok == 42 + + discard ?await returnErr() + + check: + waitFor(testit()).error() == "failed" From 41f77d261ead2508acdd3bd3f88a5cbbcefff05f Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 27 Dec 2023 20:57:39 +0100 Subject: [PATCH 098/146] Better line information on effect violation We can capture the line info from the original future source and direct violation errors there --- chronos/internal/asyncfutures.nim | 98 ++++++++++++++++++------------- chronos/internal/asyncmacro.nim | 4 +- 2 files changed, 58 insertions(+), 44 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index a7fd96124..807895225 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -478,14 +478,26 @@ when chronosStackTrace: # newMsg.add "\n" & $entry error.msg = newMsg -proc internalCheckComplete*(fut: FutureBase) {.raises: [CatchableError].} = - # For internal use only. Used in asyncmacro - if not(isNil(fut.internalError)): - when chronosStackTrace: - injectStacktrace(fut.internalError) - raise fut.internalError +proc deepLineInfo(n: NimNode, p: LineInfo) = + n.setLineInfo(p) + for i in 0.. Date: Thu, 4 Jan 2024 16:17:42 +0100 Subject: [PATCH 099/146] prevent http `closeWait` future from being cancelled (#486) * simplify `closeWait` implementations * remove redundant cancellation callbacks * use `noCancel` to avoid forgetting the right future flags * add a few missing raises trackers * enforce `OwnCancelSchedule` on manually created futures that don't raise `CancelledError` * ensure cancellations don't reach internal futures --- chronos/apps/http/httpbodyrw.nim | 4 +- chronos/apps/http/httpclient.nim | 16 ++--- chronos/asyncsync.nim | 8 +-- chronos/futures.nim | 19 ++++++ chronos/internal/asyncfutures.nim | 2 + chronos/internal/raisesfutures.nim | 73 +++++++++++--------- chronos/streams/asyncstream.nim | 37 +++------- chronos/transports/common.nim | 4 +- chronos/transports/datagram.nim | 43 ++++-------- chronos/transports/stream.nim | 104 ++++++++++++----------------- 10 files changed, 143 insertions(+), 167 deletions(-) diff --git a/chronos/apps/http/httpbodyrw.nim b/chronos/apps/http/httpbodyrw.nim index c9ac899bf..9a11e85d9 100644 --- a/chronos/apps/http/httpbodyrw.nim +++ b/chronos/apps/http/httpbodyrw.nim @@ -43,7 +43,7 @@ proc closeWait*(bstream: HttpBodyReader) {.async: (raises: []).} = ## Close and free resource allocated by body reader. if bstream.bstate == HttpState.Alive: bstream.bstate = HttpState.Closing - var res = newSeq[Future[void]]() + var res = newSeq[Future[void].Raising([])]() # We closing streams in reversed order because stream at position [0], uses # data from stream at position [1]. for index in countdown((len(bstream.streams) - 1), 0): @@ -68,7 +68,7 @@ proc closeWait*(bstream: HttpBodyWriter) {.async: (raises: []).} = ## Close and free all the resources allocated by body writer. if bstream.bstate == HttpState.Alive: bstream.bstate = HttpState.Closing - var res = newSeq[Future[void]]() + var res = newSeq[Future[void].Raising([])]() for index in countdown(len(bstream.streams) - 1, 0): res.add(bstream.streams[index].closeWait()) await noCancel(allFutures(res)) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 5f4bd71fb..33a6b7f3f 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -294,7 +294,7 @@ proc new*(t: typedesc[HttpSessionRef], if HttpClientFlag.Http11Pipeline in flags: sessionWatcher(res) else: - Future[void].Raising([]).init("session.watcher.placeholder") + nil res proc getTLSFlags(flags: HttpClientFlags): set[TLSFlags] = @@ -607,7 +607,7 @@ proc closeWait(conn: HttpClientConnectionRef) {.async: (raises: []).} = conn.state = HttpClientConnectionState.Closing let pending = block: - var res: seq[Future[void]] + var res: seq[Future[void].Raising([])] if not(isNil(conn.reader)) and not(conn.reader.closed()): res.add(conn.reader.closeWait()) if not(isNil(conn.writer)) and not(conn.writer.closed()): @@ -847,14 +847,14 @@ proc sessionWatcher(session: HttpSessionRef) {.async: (raises: []).} = break proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} = - var pending: seq[FutureBase] + var pending: seq[Future[void].Raising([])] if request.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: request.state = HttpReqRespState.Closing if not(isNil(request.writer)): if not(request.writer.closed()): - pending.add(FutureBase(request.writer.closeWait())) + pending.add(request.writer.closeWait()) request.writer = nil - pending.add(FutureBase(request.releaseConnection())) + pending.add(request.releaseConnection()) await noCancel(allFutures(pending)) request.session = nil request.error = nil @@ -862,14 +862,14 @@ proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} = untrackCounter(HttpClientRequestTrackerName) proc closeWait*(response: HttpClientResponseRef) {.async: (raises: []).} = - var pending: seq[FutureBase] + var pending: seq[Future[void].Raising([])] if response.state notin {HttpReqRespState.Closing, HttpReqRespState.Closed}: response.state = HttpReqRespState.Closing if not(isNil(response.reader)): if not(response.reader.closed()): - pending.add(FutureBase(response.reader.closeWait())) + pending.add(response.reader.closeWait()) response.reader = nil - pending.add(FutureBase(response.releaseConnection())) + pending.add(response.releaseConnection()) await noCancel(allFutures(pending)) response.session = nil response.error = nil diff --git a/chronos/asyncsync.nim b/chronos/asyncsync.nim index f77d5fe59..5fab9b2ab 100644 --- a/chronos/asyncsync.nim +++ b/chronos/asyncsync.nim @@ -523,15 +523,13 @@ proc closeWait*(ab: AsyncEventQueue): Future[void] {. {FutureFlag.OwnCancelSchedule}) proc continuation(udata: pointer) {.gcsafe.} = retFuture.complete() - proc cancellation(udata: pointer) {.gcsafe.} = - # We are not going to change the state of `retFuture` to cancelled, so we - # will prevent the entire sequence of Futures from being cancelled. - discard + + # Ignore cancellation requests - we'll complete the future soon enough + retFuture.cancelCallback = nil ab.close() # Schedule `continuation` to be called only after all the `reader` # notifications will be scheduled and processed. - retFuture.cancelCallback = cancellation callSoon(continuation) retFuture diff --git a/chronos/futures.nim b/chronos/futures.nim index 6fb9592a9..fd8dbfe70 100644 --- a/chronos/futures.nim +++ b/chronos/futures.nim @@ -34,6 +34,19 @@ type FutureFlag* {.pure.} = enum OwnCancelSchedule + ## When OwnCancelSchedule is set, the owner of the future is responsible + ## for implementing cancellation in one of 3 ways: + ## + ## * ensure that cancellation requests never reach the future by means of + ## not exposing it to user code, `await` and `tryCancel` + ## * set `cancelCallback` to `nil` to stop cancellation propagation - this + ## is appropriate when it is expected that the future will be completed + ## in a regular way "soon" + ## * set `cancelCallback` to a handler that implements cancellation in an + ## operation-specific way + ## + ## If `cancelCallback` is not set and the future gets cancelled, a + ## `Defect` will be raised. FutureFlags* = set[FutureFlag] @@ -104,6 +117,12 @@ proc internalInitFutureBase*(fut: FutureBase, loc: ptr SrcLoc, fut.internalState = state fut.internalLocation[LocationKind.Create] = loc fut.internalFlags = flags + if FutureFlag.OwnCancelSchedule in flags: + # Owners must replace `cancelCallback` with `nil` if they want to ignore + # cancellations + fut.internalCancelcb = proc(_: pointer) = + raiseAssert "Cancellation request for non-cancellable future" + if state != FutureState.Pending: fut.internalLocation[LocationKind.Finish] = loc diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 807895225..5ce9da484 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1013,6 +1013,7 @@ proc cancelAndWait*(future: FutureBase, loc: ptr SrcLoc): Future[void] {. if future.finished(): retFuture.complete() else: + retFuture.cancelCallback = nil cancelSoon(future, continuation, cast[pointer](retFuture), loc) retFuture @@ -1057,6 +1058,7 @@ proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: as if future.finished(): completeFuture() else: + retFuture.cancelCallback = nil future.addCallback(continuation) retFuture diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 20fa6ed0d..5b91f4152 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -18,6 +18,39 @@ proc makeNoRaises*(): NimNode {.compileTime.} = ident"void" +proc dig(n: NimNode): NimNode {.compileTime.} = + # Dig through the layers of type to find the raises list + if n.eqIdent("void"): + n + elif n.kind == nnkBracketExpr: + if n[0].eqIdent("tuple"): + n + elif n[0].eqIdent("typeDesc"): + dig(getType(n[1])) + else: + echo astGenRepr(n) + raiseAssert "Unkown bracket" + elif n.kind == nnkTupleConstr: + n + else: + dig(getType(getTypeInst(n))) + +proc isNoRaises*(n: NimNode): bool {.compileTime.} = + dig(n).eqIdent("void") + +iterator members(tup: NimNode): NimNode = + # Given a typedesc[tuple] = (A, B, C), yields the tuple members (A, B C) + if not isNoRaises(tup): + for n in getType(getTypeInst(tup)[1])[1..^1]: + yield n + +proc members(tup: NimNode): seq[NimNode] {.compileTime.} = + for t in tup.members(): + result.add(t) + +macro hasException(raises: typedesc, ident: static string): bool = + newLit(raises.members.anyIt(it.eqIdent(ident))) + macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped = ## Given a Future type instance, return a type storing `{.raises.}` ## information @@ -41,6 +74,11 @@ template init*[T, E]( ## ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. + when not hasException(type(E), "CancelledError"): + static: + raiseAssert "Manually created futures must either own cancellation schedule or raise CancelledError" + + let res = F() internalInitFutureBase(res, getSrcLocation(fromProc), FutureState.Pending, {}) res @@ -53,40 +91,15 @@ template init*[T, E]( ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. let res = F() + when not hasException(type(E), "CancelledError"): + static: + doAssert FutureFlag.OwnCancelSchedule in flags, + "Manually created futures must either own cancellation schedule or raise CancelledError" + internalInitFutureBase( res, getSrcLocation(fromProc), FutureState.Pending, flags) res -proc dig(n: NimNode): NimNode {.compileTime.} = - # Dig through the layers of type to find the raises list - if n.eqIdent("void"): - n - elif n.kind == nnkBracketExpr: - if n[0].eqIdent("tuple"): - n - elif n[0].eqIdent("typeDesc"): - dig(getType(n[1])) - else: - echo astGenRepr(n) - raiseAssert "Unkown bracket" - elif n.kind == nnkTupleConstr: - n - else: - dig(getType(getTypeInst(n))) - -proc isNoRaises*(n: NimNode): bool {.compileTime.} = - dig(n).eqIdent("void") - -iterator members(tup: NimNode): NimNode = - # Given a typedesc[tuple] = (A, B, C), yields the tuple members (A, B C) - if not isNoRaises(tup): - for n in getType(getTypeInst(tup)[1])[1..^1]: - yield n - -proc members(tup: NimNode): seq[NimNode] {.compileTime.} = - for t in tup.members(): - result.add(t) - proc containsSignature(members: openArray[NimNode], typ: NimNode): bool {.compileTime.} = let typHash = signatureHash(typ) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index a52108476..4fbe7a422 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -77,7 +77,7 @@ type udata: pointer error*: ref AsyncStreamError bytesCount*: uint64 - future: Future[void] + future: Future[void].Raising([]) AsyncStreamWriter* = ref object of RootRef wsource*: AsyncStreamWriter @@ -88,7 +88,7 @@ type error*: ref AsyncStreamError udata: pointer bytesCount*: uint64 - future: Future[void] + future: Future[void].Raising([]) AsyncStream* = object of RootObj reader*: AsyncStreamReader @@ -897,44 +897,27 @@ proc close*(rw: AsyncStreamRW) = rw.future.addCallback(continuation) rw.future.cancelSoon() -proc closeWait*(rw: AsyncStreamRW): Future[void] {. - async: (raw: true, raises: []).} = +proc closeWait*(rw: AsyncStreamRW): Future[void] {.async: (raises: []).} = ## Close and frees resources of stream ``rw``. - const FutureName = - when rw is AsyncStreamReader: - "async.stream.reader.closeWait" - else: - "async.stream.writer.closeWait" - - let retFuture = Future[void].Raising([]).init(FutureName) - - if rw.closed(): - retFuture.complete() - return retFuture - - proc continuation(udata: pointer) {.gcsafe, raises:[].} = - retFuture.complete() - - rw.close() - if rw.future.finished(): - retFuture.complete() - else: - rw.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture + if not rw.closed(): + rw.close() + await noCancel(rw.join()) proc startReader(rstream: AsyncStreamReader) = rstream.state = Running if not isNil(rstream.readerLoop): rstream.future = rstream.readerLoop(rstream) else: - rstream.future = newFuture[void]("async.stream.empty.reader") + rstream.future = Future[void].Raising([]).init( + "async.stream.empty.reader", {FutureFlag.OwnCancelSchedule}) proc startWriter(wstream: AsyncStreamWriter) = wstream.state = Running if not isNil(wstream.writerLoop): wstream.future = wstream.writerLoop(wstream) else: - wstream.future = newFuture[void]("async.stream.empty.writer") + wstream.future = Future[void].Raising([]).init( + "async.stream.empty.writer", {FutureFlag.OwnCancelSchedule}) proc init*(child, wsource: AsyncStreamWriter, loop: StreamWriterLoop, queueSize = AsyncStreamDefaultQueueSize) = diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index ba7568a45..8fa062a5c 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -73,7 +73,7 @@ when defined(windows) or defined(nimdoc): udata*: pointer # User-defined pointer flags*: set[ServerFlags] # Flags bufferSize*: int # Size of internal transports' buffer - loopFuture*: Future[void] # Server's main Future + loopFuture*: Future[void].Raising([]) # Server's main Future domain*: Domain # Current server domain (IPv4 or IPv6) apending*: bool asock*: AsyncFD # Current AcceptEx() socket @@ -92,7 +92,7 @@ else: udata*: pointer # User-defined pointer flags*: set[ServerFlags] # Flags bufferSize*: int # Size of internal transports' buffer - loopFuture*: Future[void] # Server's main Future + loopFuture*: Future[void].Raising([]) # Server's main Future errorCode*: OSErrorCode # Current error code dualstack*: DualStackType # IPv4/IPv6 dualstack parameters diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index fed15d362..88db7ee14 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -44,7 +44,7 @@ type remote: TransportAddress # Remote address udata*: pointer # User-driven pointer function: DatagramCallback # Receive data callback - future: Future[void] # Transport's life future + future: Future[void].Raising([]) # Transport's life future raddr: Sockaddr_storage # Reader address storage ralen: SockLen # Reader address length waddr: Sockaddr_storage # Writer address storage @@ -359,7 +359,8 @@ when defined(windows): res.queue = initDeque[GramVector]() res.udata = udata res.state = {ReadPaused, WritePaused} - res.future = newFuture[void]("datagram.transport") + res.future = Future[void].Raising([]).init( + "datagram.transport", {FutureFlag.OwnCancelSchedule}) res.rovl.data = CompletionData(cb: readDatagramLoop, udata: cast[pointer](res)) res.wovl.data = CompletionData(cb: writeDatagramLoop, @@ -568,7 +569,8 @@ else: res.queue = initDeque[GramVector]() res.udata = udata res.state = {ReadPaused, WritePaused} - res.future = newFuture[void]("datagram.transport") + res.future = Future[void].Raising([]).init( + "datagram.transport", {FutureFlag.OwnCancelSchedule}) GC_ref(res) # Start tracking transport trackCounter(DgramTransportTrackerName) @@ -840,31 +842,16 @@ proc join*(transp: DatagramTransport): Future[void] {. return retFuture +proc closed*(transp: DatagramTransport): bool {.inline.} = + ## Returns ``true`` if transport in closed state. + {ReadClosed, WriteClosed} * transp.state != {} + proc closeWait*(transp: DatagramTransport): Future[void] {. - async: (raw: true, raises: []).} = + async: (raises: []).} = ## Close transport ``transp`` and release all resources. - let retFuture = newFuture[void]( - "datagram.transport.closeWait", {FutureFlag.OwnCancelSchedule}) - - if {ReadClosed, WriteClosed} * transp.state != {}: - retFuture.complete() - return retFuture - - proc continuation(udata: pointer) {.gcsafe.} = - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe.} = - # We are not going to change the state of `retFuture` to cancelled, so we - # will prevent the entire sequence of Futures from being cancelled. - discard - - transp.close() - if transp.future.finished(): - retFuture.complete() - else: - transp.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation - retFuture + if not transp.closed(): + transp.close() + await noCancel(transp.join()) proc send*(transp: DatagramTransport, pbytes: pointer, nbytes: int): Future[void] {. @@ -1020,7 +1007,3 @@ proc getMessage*(transp: DatagramTransport): seq[byte] {. proc getUserData*[T](transp: DatagramTransport): T {.inline.} = ## Obtain user data stored in ``transp`` object. cast[T](transp.udata) - -proc closed*(transp: DatagramTransport): bool {.inline.} = - ## Returns ``true`` if transport in closed state. - {ReadClosed, WriteClosed} * transp.state != {} diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index c0d1cfcdb..73699a25b 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -76,7 +76,7 @@ when defined(windows): offset: int # Reading buffer offset error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue - future: Future[void] # Stream life future + future: Future[void].Raising([]) # Stream life future # Windows specific part rwsabuf: WSABUF # Reader WSABUF wwsabuf: WSABUF # Writer WSABUF @@ -103,7 +103,7 @@ else: offset: int # Reading buffer offset error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue - future: Future[void] # Stream life future + future: Future[void].Raising([]) # Stream life future case kind*: TransportKind of TransportKind.Socket: domain: Domain # Socket transport domain (IPv4/IPv6) @@ -598,7 +598,8 @@ when defined(windows): transp.buffer = newSeq[byte](bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() - transp.future = newFuture[void]("stream.socket.transport") + transp.future = Future[void].Raising([]).init( + "stream.socket.transport", {FutureFlag.OwnCancelSchedule}) GC_ref(transp) transp @@ -619,7 +620,8 @@ when defined(windows): transp.flags = flags transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() - transp.future = newFuture[void]("stream.pipe.transport") + transp.future = Future[void].Raising([]).init( + "stream.pipe.transport", {FutureFlag.OwnCancelSchedule}) GC_ref(transp) transp @@ -1457,7 +1459,8 @@ else: transp.buffer = newSeq[byte](bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() - transp.future = newFuture[void]("socket.stream.transport") + transp.future = Future[void].Raising([]).init( + "socket.stream.transport", {FutureFlag.OwnCancelSchedule}) GC_ref(transp) transp @@ -1473,7 +1476,8 @@ else: transp.buffer = newSeq[byte](bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() - transp.future = newFuture[void]("pipe.stream.transport") + transp.future = Future[void].Raising([]).init( + "pipe.stream.transport", {FutureFlag.OwnCancelSchedule}) GC_ref(transp) transp @@ -1806,6 +1810,9 @@ proc connect*(address: TransportAddress, if TcpNoDelay in flags: mappedFlags.incl(SocketFlags.TcpNoDelay) connect(address, bufferSize, child, localAddress, mappedFlags, dualstack) +proc closed*(server: StreamServer): bool = + server.status == ServerStatus.Closed + proc close*(server: StreamServer) = ## Release ``server`` resources. ## @@ -1832,22 +1839,11 @@ proc close*(server: StreamServer) = else: server.sock.closeSocket(continuation) -proc closeWait*(server: StreamServer): Future[void] {. - async: (raw: true, raises: []).} = +proc closeWait*(server: StreamServer): Future[void] {.async: (raises: []).} = ## Close server ``server`` and release all resources. - let retFuture = newFuture[void]( - "stream.server.closeWait", {FutureFlag.OwnCancelSchedule}) - - proc continuation(udata: pointer) = - retFuture.complete() - - server.close() - - if not(server.loopFuture.finished()): - server.loopFuture.addCallback(continuation, cast[pointer](retFuture)) - else: - retFuture.complete() - retFuture + if not server.closed(): + server.close() + await noCancel(server.join()) proc getBacklogSize(backlog: int): cint = doAssert(backlog >= 0 and backlog <= high(int32)) @@ -2058,7 +2054,9 @@ proc createStreamServer*(host: TransportAddress, sres.init = init sres.bufferSize = bufferSize sres.status = Starting - sres.loopFuture = newFuture[void]("stream.transport.server") + sres.loopFuture = asyncloop.init( + Future[void].Raising([]), "stream.transport.server", + {FutureFlag.OwnCancelSchedule}) sres.udata = udata sres.dualstack = dualstack if localAddress.family == AddressFamily.None: @@ -2630,6 +2628,23 @@ proc join*(transp: StreamTransport): Future[void] {. retFuture.complete() return retFuture +proc closed*(transp: StreamTransport): bool {.inline.} = + ## Returns ``true`` if transport in closed state. + ({ReadClosed, WriteClosed} * transp.state != {}) + +proc finished*(transp: StreamTransport): bool {.inline.} = + ## Returns ``true`` if transport in finished (EOF) state. + ({ReadEof, WriteEof} * transp.state != {}) + +proc failed*(transp: StreamTransport): bool {.inline.} = + ## Returns ``true`` if transport in error state. + ({ReadError, WriteError} * transp.state != {}) + +proc running*(transp: StreamTransport): bool {.inline.} = + ## Returns ``true`` if transport is still pending. + ({ReadClosed, ReadEof, ReadError, + WriteClosed, WriteEof, WriteError} * transp.state == {}) + proc close*(transp: StreamTransport) = ## Closes and frees resources of transport ``transp``. ## @@ -2672,31 +2687,11 @@ proc close*(transp: StreamTransport) = elif transp.kind == TransportKind.Socket: closeSocket(transp.fd, continuation) -proc closeWait*(transp: StreamTransport): Future[void] {. - async: (raw: true, raises: []).} = +proc closeWait*(transp: StreamTransport): Future[void] {.async: (raises: []).} = ## Close and frees resources of transport ``transp``. - let retFuture = newFuture[void]( - "stream.transport.closeWait", {FutureFlag.OwnCancelSchedule}) - - if {ReadClosed, WriteClosed} * transp.state != {}: - retFuture.complete() - return retFuture - - proc continuation(udata: pointer) {.gcsafe.} = - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe.} = - # We are not going to change the state of `retFuture` to cancelled, so we - # will prevent the entire sequence of Futures from being cancelled. - discard - - transp.close() - if transp.future.finished(): - retFuture.complete() - else: - transp.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation - retFuture + if not transp.closed(): + transp.close() + await noCancel(transp.join()) proc shutdownWait*(transp: StreamTransport): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = @@ -2756,23 +2751,6 @@ proc shutdownWait*(transp: StreamTransport): Future[void] {. callSoon(continuation, nil) retFuture -proc closed*(transp: StreamTransport): bool {.inline.} = - ## Returns ``true`` if transport in closed state. - ({ReadClosed, WriteClosed} * transp.state != {}) - -proc finished*(transp: StreamTransport): bool {.inline.} = - ## Returns ``true`` if transport in finished (EOF) state. - ({ReadEof, WriteEof} * transp.state != {}) - -proc failed*(transp: StreamTransport): bool {.inline.} = - ## Returns ``true`` if transport in error state. - ({ReadError, WriteError} * transp.state != {}) - -proc running*(transp: StreamTransport): bool {.inline.} = - ## Returns ``true`` if transport is still pending. - ({ReadClosed, ReadEof, ReadError, - WriteClosed, WriteEof, WriteError} * transp.state == {}) - proc fromPipe2*(fd: AsyncFD, child: StreamTransport = nil, bufferSize = DefaultStreamBufferSize ): Result[StreamTransport, OSErrorCode] = From f0a2d4df61302d24baa6c0f1c257f92045c9ee57 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 8 Jan 2024 14:54:50 +0100 Subject: [PATCH 100/146] Feature flag for raises support (#488) Feature flags allow consumers of chronos to target versions with and without certain features via compile-time selection. The first feature flag added is for raise tracking support. --- chronos/config.nim | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/chronos/config.nim b/chronos/config.nim index 21c313206..47bf6698c 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -11,6 +11,15 @@ ## `chronosDebug` can be defined to enable several debugging helpers that come ## with a runtime cost - it is recommeneded to not enable these in production ## code. +## +## In this file we also declare feature flags starting with `chronosHas...` - +## these constants are declared when a feature exists in a particular release - +## each flag is declared as an integer starting at 0 during experimental +## development, 1 when feature complete and higher numbers when significant +## functionality has been added. If a feature ends up being removed (or changed +## in a backwards-incompatible way), the feature flag will be removed or renamed +## also - you can use `when declared(chronosHasXxx): when chronosHasXxx >= N:` +## to require a particular version. const chronosHandleException* {.booldefine.}: bool = false ## Remap `Exception` to `AsyncExceptionError` for all `async` functions. @@ -79,6 +88,9 @@ const "" ## OS polling engine type which is going to be used by chronos. + chronosHasRaises* = 0 + ## raises effect support via `async: (raises: [])` + when defined(chronosStrictException): {.warning: "-d:chronosStrictException has been deprecated in favor of handleException".} # In chronos v3, this setting was used as the opposite of From b02b9608c3c4a4815da39583847dad026d89781d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 12 Jan 2024 15:27:36 +0200 Subject: [PATCH 101/146] HTTP server middleware implementation. (#483) * HTTP server middleware implementation and test. * Address review comments. * Address review comments. --- chronos/apps/http/httpserver.nim | 329 ++++++++++++++++++++--------- docs/examples/middleware.nim | 130 ++++++++++++ docs/src/SUMMARY.md | 1 + docs/src/examples.md | 2 + docs/src/http_server_middleware.md | 102 +++++++++ tests/testhttpserver.nim | 299 +++++++++++++++++++++++++- 6 files changed, 766 insertions(+), 97 deletions(-) create mode 100644 docs/examples/middleware.nim create mode 100644 docs/src/http_server_middleware.md diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 9646956d6..c716d14ac 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -11,11 +11,14 @@ import std/[tables, uri, strutils] import stew/[base10], httputils, results -import ../../asyncloop, ../../asyncsync +import ../../[asyncloop, asyncsync] import ../../streams/[asyncstream, boundstream, chunkstream] import "."/[httptable, httpcommon, multipart] +from ../../transports/common import TransportAddress, ServerFlags, `$`, `==` + export asyncloop, asyncsync, httptable, httpcommon, httputils, multipart, asyncstream, boundstream, chunkstream, uri, tables, results +export TransportAddress, ServerFlags, `$`, `==` type HttpServerFlags* {.pure.} = enum @@ -107,6 +110,7 @@ type maxRequestBodySize*: int processCallback*: HttpProcessCallback2 createConnCallback*: HttpConnectionCallback + middlewares: seq[HttpProcessCallback2] HttpServerRef* = ref HttpServer @@ -158,6 +162,16 @@ type HttpConnectionRef* = ref HttpConnection + MiddlewareHandleCallback* = proc( + middleware: HttpServerMiddlewareRef, request: RequestFence, + handler: HttpProcessCallback2): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} + + HttpServerMiddleware* = object of RootObj + handler*: MiddlewareHandleCallback + + HttpServerMiddlewareRef* = ref HttpServerMiddleware + ByteChar* = string | seq[byte] proc init(htype: typedesc[HttpProcessError], error: HttpServerError, @@ -175,6 +189,8 @@ proc init(htype: typedesc[HttpProcessError], proc defaultResponse*(exc: ref CatchableError): HttpResponseRef +proc defaultResponse*(msg: HttpMessage): HttpResponseRef + proc new(htype: typedesc[HttpConnectionHolderRef], server: HttpServerRef, transp: StreamTransport, connectionId: string): HttpConnectionHolderRef = @@ -188,20 +204,54 @@ proc createConnection(server: HttpServerRef, transp: StreamTransport): Future[HttpConnectionRef] {. async: (raises: [CancelledError, HttpConnectionError]).} -proc new*(htype: typedesc[HttpServerRef], - address: TransportAddress, - processCallback: HttpProcessCallback2, - serverFlags: set[HttpServerFlags] = {}, - socketFlags: set[ServerFlags] = {ReuseAddr}, - serverUri = Uri(), - serverIdent = "", - maxConnections: int = -1, - bufferSize: int = 4096, - backlogSize: int = DefaultBacklogSize, - httpHeadersTimeout = 10.seconds, - maxHeadersSize: int = 8192, - maxRequestBodySize: int = 1_048_576, - dualstack = DualStackType.Auto): HttpResult[HttpServerRef] = +proc prepareMiddlewares( + requestProcessCallback: HttpProcessCallback2, + middlewares: openArray[HttpServerMiddlewareRef] + ): seq[HttpProcessCallback2] = + var + handlers: seq[HttpProcessCallback2] + currentHandler = requestProcessCallback + + if len(middlewares) == 0: + return handlers + + let mws = @middlewares + handlers = newSeq[HttpProcessCallback2](len(mws)) + + for index in countdown(len(mws) - 1, 0): + let processor = + block: + var res: HttpProcessCallback2 + closureScope: + let + middleware = mws[index] + realHandler = currentHandler + res = + proc(request: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError], raw: true).} = + middleware.handler(middleware, request, realHandler) + res + handlers[index] = processor + currentHandler = processor + handlers + +proc new*( + htype: typedesc[HttpServerRef], + address: TransportAddress, + processCallback: HttpProcessCallback2, + serverFlags: set[HttpServerFlags] = {}, + socketFlags: set[ServerFlags] = {ReuseAddr}, + serverUri = Uri(), + serverIdent = "", + maxConnections: int = -1, + bufferSize: int = 4096, + backlogSize: int = DefaultBacklogSize, + httpHeadersTimeout = 10.seconds, + maxHeadersSize: int = 8192, + maxRequestBodySize: int = 1_048_576, + dualstack = DualStackType.Auto, + middlewares: openArray[HttpServerMiddlewareRef] = [] + ): HttpResult[HttpServerRef] = let serverUri = if len(serverUri.hostname) > 0: @@ -240,24 +290,28 @@ proc new*(htype: typedesc[HttpServerRef], # else: # nil lifetime: newFuture[void]("http.server.lifetime"), - connections: initOrderedTable[string, HttpConnectionHolderRef]() + connections: initOrderedTable[string, HttpConnectionHolderRef](), + middlewares: prepareMiddlewares(processCallback, middlewares) ) ok(res) -proc new*(htype: typedesc[HttpServerRef], - address: TransportAddress, - processCallback: HttpProcessCallback, - serverFlags: set[HttpServerFlags] = {}, - socketFlags: set[ServerFlags] = {ReuseAddr}, - serverUri = Uri(), - serverIdent = "", - maxConnections: int = -1, - bufferSize: int = 4096, - backlogSize: int = DefaultBacklogSize, - httpHeadersTimeout = 10.seconds, - maxHeadersSize: int = 8192, - maxRequestBodySize: int = 1_048_576, - dualstack = DualStackType.Auto): HttpResult[HttpServerRef] {. +proc new*( + htype: typedesc[HttpServerRef], + address: TransportAddress, + processCallback: HttpProcessCallback, + serverFlags: set[HttpServerFlags] = {}, + socketFlags: set[ServerFlags] = {ReuseAddr}, + serverUri = Uri(), + serverIdent = "", + maxConnections: int = -1, + bufferSize: int = 4096, + backlogSize: int = DefaultBacklogSize, + httpHeadersTimeout = 10.seconds, + maxHeadersSize: int = 8192, + maxRequestBodySize: int = 1_048_576, + dualstack = DualStackType.Auto, + middlewares: openArray[HttpServerMiddlewareRef] = [] + ): HttpResult[HttpServerRef] {. deprecated: "Callback could raise only CancelledError, annotate with " & "{.async: (raises: [CancelledError]).}".} = @@ -273,7 +327,7 @@ proc new*(htype: typedesc[HttpServerRef], HttpServerRef.new(address, wrap, serverFlags, socketFlags, serverUri, serverIdent, maxConnections, bufferSize, backlogSize, httpHeadersTimeout, maxHeadersSize, maxRequestBodySize, - dualstack) + dualstack, middlewares) proc getServerFlags(req: HttpRequestRef): set[HttpServerFlags] = var defaultFlags: set[HttpServerFlags] = {} @@ -345,6 +399,18 @@ proc defaultResponse*(exc: ref CatchableError): HttpResponseRef = else: HttpResponseRef(state: HttpResponseState.ErrorCode, status: Http503) +proc defaultResponse*(msg: HttpMessage): HttpResponseRef = + HttpResponseRef(state: HttpResponseState.ErrorCode, status: msg.code) + +proc defaultResponse*(err: HttpProcessError): HttpResponseRef = + HttpResponseRef(state: HttpResponseState.ErrorCode, status: err.code) + +proc dropResponse*(): HttpResponseRef = + HttpResponseRef(state: HttpResponseState.Failed) + +proc codeResponse*(status: HttpCode): HttpResponseRef = + HttpResponseRef(state: HttpResponseState.ErrorCode, status: status) + proc dumbResponse*(): HttpResponseRef {. deprecated: "Please use defaultResponse() instead".} = ## Create an empty response to return when request processor got no request. @@ -362,29 +428,21 @@ proc hasBody*(request: HttpRequestRef): bool = request.requestFlags * {HttpRequestFlags.BoundBody, HttpRequestFlags.UnboundBody} != {} -proc prepareRequest(conn: HttpConnectionRef, - req: HttpRequestHeader): HttpResultMessage[HttpRequestRef] = - var request = HttpRequestRef(connection: conn, state: HttpState.Alive) - - if req.version notin {HttpVersion10, HttpVersion11}: - return err(HttpMessage.init(Http505, "Unsupported HTTP protocol version")) +func new(t: typedesc[HttpRequestRef], conn: HttpConnectionRef): HttpRequestRef = + HttpRequestRef(connection: conn, state: HttpState.Alive) - request.scheme = - if HttpServerFlags.Secure in conn.server.flags: - "https" - else: - "http" +proc updateRequest*(request: HttpRequestRef, scheme: string, meth: HttpMethod, + version: HttpVersion, requestUri: string, + headers: HttpTable): HttpResultMessage[void] = + ## Update HTTP request object using base request object with new properties. - request.version = req.version - request.meth = req.meth - - request.rawPath = - block: - let res = req.uri() - if len(res) == 0: - return err(HttpMessage.init(Http400, "Invalid request URI")) - res + # Store request version and call method. + request.scheme = scheme + request.version = version + request.meth = meth + # Processing request's URI + request.rawPath = requestUri request.uri = if request.rawPath != "*": let uri = parseUri(request.rawPath) @@ -396,10 +454,11 @@ proc prepareRequest(conn: HttpConnectionRef, uri.path = "*" uri + # Conversion of request query string to HttpTable. request.query = block: let queryFlags = - if QueryCommaSeparatedArray in conn.server.flags: + if QueryCommaSeparatedArray in request.connection.server.flags: {QueryParamsFlag.CommaSeparatedArray} else: {} @@ -408,22 +467,8 @@ proc prepareRequest(conn: HttpConnectionRef, table.add(key, value) table - request.headers = - block: - var table = HttpTable.init() - # Retrieve headers and values - for key, value in req.headers(): - table.add(key, value) - # Validating HTTP request headers - # Some of the headers must be present only once. - if table.count(ContentTypeHeader) > 1: - return err(HttpMessage.init(Http400, "Multiple Content-Type headers")) - if table.count(ContentLengthHeader) > 1: - return err(HttpMessage.init(Http400, "Multiple Content-Length headers")) - if table.count(TransferEncodingHeader) > 1: - return err(HttpMessage.init(Http400, - "Multuple Transfer-Encoding headers")) - table + # Store request headers + request.headers = headers # Preprocessing "Content-Encoding" header. request.contentEncoding = @@ -443,15 +488,17 @@ proc prepareRequest(conn: HttpConnectionRef, # steps to reveal information about body. request.contentLength = if ContentLengthHeader in request.headers: + # Request headers has `Content-Length` header present. let length = request.headers.getInt(ContentLengthHeader) if length != 0: if request.meth == MethodTrace: let msg = "TRACE requests could not have request body" return err(HttpMessage.init(Http400, msg)) - # Because of coversion to `int` we should avoid unexpected OverflowError. + # Because of coversion to `int` we should avoid unexpected + # OverflowError. if length > uint64(high(int)): return err(HttpMessage.init(Http413, "Unsupported content length")) - if length > uint64(conn.server.maxRequestBodySize): + if length > uint64(request.connection.server.maxRequestBodySize): return err(HttpMessage.init(Http413, "Content length exceeds limits")) request.requestFlags.incl(HttpRequestFlags.BoundBody) int(length) @@ -459,6 +506,7 @@ proc prepareRequest(conn: HttpConnectionRef, 0 else: if TransferEncodingFlags.Chunked in request.transferEncoding: + # Request headers has "Transfer-Encoding: chunked" header present. if request.meth == MethodTrace: let msg = "TRACE requests could not have request body" return err(HttpMessage.init(Http400, msg)) @@ -466,8 +514,9 @@ proc prepareRequest(conn: HttpConnectionRef, 0 if request.hasBody(): - # If request has body, we going to understand how its encoded. + # If the request has a body, we will determine how it is encoded. if ContentTypeHeader in request.headers: + # Request headers has "Content-Type" header present. let contentType = getContentType(request.headers.getList(ContentTypeHeader)).valueOr: let msg = "Incorrect or missing Content-Type header" @@ -477,12 +526,67 @@ proc prepareRequest(conn: HttpConnectionRef, elif contentType == MultipartContentType: request.requestFlags.incl(HttpRequestFlags.MultipartForm) request.contentTypeData = Opt.some(contentType) - + # If `Expect` header is present, we will handle expectation procedure. if ExpectHeader in request.headers: let expectHeader = request.headers.getString(ExpectHeader) if strip(expectHeader).toLowerAscii() == "100-continue": request.requestFlags.incl(HttpRequestFlags.ClientExpect) + ok() + +proc updateRequest*(request: HttpRequestRef, meth: HttpMethod, + requestUri: string, + headers: HttpTable): HttpResultMessage[void] = + ## Update HTTP request object using base request object with new properties. + updateRequest(request, request.scheme, meth, request.version, requestUri, + headers) + +proc updateRequest*(request: HttpRequestRef, requestUri: string, + headers: HttpTable): HttpResultMessage[void] = + ## Update HTTP request object using base request object with new properties. + updateRequest(request, request.scheme, request.meth, request.version, + requestUri, headers) + +proc updateRequest*(request: HttpRequestRef, + requestUri: string): HttpResultMessage[void] = + ## Update HTTP request object using base request object with new properties. + updateRequest(request, request.scheme, request.meth, request.version, + requestUri, request.headers) + +proc updateRequest*(request: HttpRequestRef, + headers: HttpTable): HttpResultMessage[void] = + ## Update HTTP request object using base request object with new properties. + updateRequest(request, request.scheme, request.meth, request.version, + request.rawPath, headers) + +proc prepareRequest(conn: HttpConnectionRef, + req: HttpRequestHeader): HttpResultMessage[HttpRequestRef] = + let + request = HttpRequestRef.new(conn) + scheme = + if HttpServerFlags.Secure in conn.server.flags: + "https" + else: + "http" + headers = + block: + var table = HttpTable.init() + # Retrieve headers and values + for key, value in req.headers(): + table.add(key, value) + # Validating HTTP request headers + # Some of the headers must be present only once. + if table.count(ContentTypeHeader) > 1: + return err(HttpMessage.init(Http400, + "Multiple Content-Type headers")) + if table.count(ContentLengthHeader) > 1: + return err(HttpMessage.init(Http400, + "Multiple Content-Length headers")) + if table.count(TransferEncodingHeader) > 1: + return err(HttpMessage.init(Http400, + "Multuple Transfer-Encoding headers")) + table + ? updateRequest(request, scheme, req.meth, req.version, req.uri(), headers) trackCounter(HttpServerRequestTrackerName) ok(request) @@ -736,16 +840,19 @@ proc sendDefaultResponse( # Response was ignored, so we respond with not found. await conn.sendErrorResponse(version, Http404, keepConnection.toBool()) + response.setResponseState(HttpResponseState.Finished) keepConnection of HttpResponseState.Prepared: # Response was prepared but not sent, so we can respond with some # error code await conn.sendErrorResponse(HttpVersion11, Http409, keepConnection.toBool()) + response.setResponseState(HttpResponseState.Finished) keepConnection of HttpResponseState.ErrorCode: # Response with error code await conn.sendErrorResponse(version, response.status, false) + response.setResponseState(HttpResponseState.Finished) HttpProcessExitType.Immediate of HttpResponseState.Sending, HttpResponseState.Failed, HttpResponseState.Cancelled: @@ -755,6 +862,7 @@ proc sendDefaultResponse( # Response was ignored, so we respond with not found. await conn.sendErrorResponse(version, Http404, keepConnection.toBool()) + response.setResponseState(HttpResponseState.Finished) keepConnection of HttpResponseState.Finished: keepConnection @@ -878,6 +986,25 @@ proc getRemoteAddress(connection: HttpConnectionRef): Opt[TransportAddress] = if isNil(connection): return Opt.none(TransportAddress) getRemoteAddress(connection.transp) +proc getLocalAddress(transp: StreamTransport): Opt[TransportAddress] = + if isNil(transp): return Opt.none(TransportAddress) + try: + Opt.some(transp.localAddress()) + except TransportOsError: + Opt.none(TransportAddress) + +proc getLocalAddress(connection: HttpConnectionRef): Opt[TransportAddress] = + if isNil(connection): return Opt.none(TransportAddress) + getLocalAddress(connection.transp) + +proc remote*(request: HttpRequestRef): Opt[TransportAddress] = + ## Returns remote address of HTTP request's connection. + request.connection.getRemoteAddress() + +proc local*(request: HttpRequestRef): Opt[TransportAddress] = + ## Returns local address of HTTP request's connection. + request.connection.getLocalAddress() + proc getRequestFence*(server: HttpServerRef, connection: HttpConnectionRef): Future[RequestFence] {. async: (raises: []).} = @@ -920,6 +1047,14 @@ proc getConnectionFence*(server: HttpServerRef, ConnectionFence.err(HttpProcessError.init( HttpServerError.DisconnectError, exc, address, Http400)) +proc invokeProcessCallback(server: HttpServerRef, + req: RequestFence): Future[HttpResponseRef] {. + async: (raw: true, raises: [CancelledError]).} = + if len(server.middlewares) > 0: + server.middlewares[0](req) + else: + server.processCallback(req) + proc processRequest(server: HttpServerRef, connection: HttpConnectionRef, connId: string): Future[HttpProcessExitType] {. @@ -941,7 +1076,7 @@ proc processRequest(server: HttpServerRef, try: let response = try: - await connection.server.processCallback(requestFence) + await invokeProcessCallback(connection.server, requestFence) except CancelledError: # Cancelled, exiting return HttpProcessExitType.Immediate @@ -962,7 +1097,7 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = if res.isErr(): if res.error.kind != HttpServerError.InterruptError: discard await noCancel( - server.processCallback(RequestFence.err(res.error))) + invokeProcessCallback(server, RequestFence.err(res.error))) server.connections.del(connectionId) return res.get() @@ -1160,19 +1295,43 @@ proc post*(req: HttpRequestRef): Future[HttpTable] {. elif HttpRequestFlags.UnboundBody in req.requestFlags: raiseHttpProtocolError(Http400, "Unsupported request body") +template checkPending(t: untyped) = + let currentState = t.getResponseState() + doAssert(currentState == HttpResponseState.Empty, + "Response body was already sent [" & $currentState & "]") + +template checkStreamResponse(t: untyped) = + doAssert(HttpResponseFlags.Stream in t.flags, + "Response was not prepared") + +template checkStreamResponseState(t: untyped) = + doAssert(t.getResponseState() in + {HttpResponseState.Prepared, HttpResponseState.Sending}, + "Response is in the wrong state") + +template checkResponseCanBeModified(t: untyped) = + doAssert(t.getResponseState() in + {HttpResponseState.Empty, HttpResponseState.ErrorCode}, + "Response could not be modified at this stage") + +template checkPointerLength(t1, t2: untyped) = + doAssert(not(isNil(t1)), "pbytes must not be nil") + doAssert(t2 >= 0, "nbytes should be bigger or equal to zero") + proc setHeader*(resp: HttpResponseRef, key, value: string) = ## Sets value of header ``key`` to ``value``. - doAssert(resp.getResponseState() == HttpResponseState.Empty) + checkResponseCanBeModified(resp) resp.headersTable.set(key, value) proc setHeaderDefault*(resp: HttpResponseRef, key, value: string) = ## Sets value of header ``key`` to ``value``, only if header ``key`` is not ## present in the headers table. + checkResponseCanBeModified(resp) discard resp.headersTable.hasKeyOrPut(key, value) proc addHeader*(resp: HttpResponseRef, key, value: string) = ## Adds value ``value`` to header's ``key`` value. - doAssert(resp.getResponseState() == HttpResponseState.Empty) + checkResponseCanBeModified(resp) resp.headersTable.add(key, value) proc getHeader*(resp: HttpResponseRef, key: string, @@ -1185,24 +1344,6 @@ proc hasHeader*(resp: HttpResponseRef, key: string): bool = ## Returns ``true`` if header with name ``key`` present in the headers table. key in resp.headersTable -template checkPending(t: untyped) = - let currentState = t.getResponseState() - doAssert(currentState == HttpResponseState.Empty, - "Response body was already sent [" & $currentState & "]") - -template checkStreamResponse(t: untyped) = - doAssert(HttpResponseFlags.Stream in t.flags, - "Response was not prepared") - -template checkStreamResponseState(t: untyped) = - doAssert(t.getResponseState() in - {HttpResponseState.Prepared, HttpResponseState.Sending}, - "Response is in the wrong state") - -template checkPointerLength(t1, t2: untyped) = - doAssert(not(isNil(t1)), "pbytes must not be nil") - doAssert(t2 >= 0, "nbytes should be bigger or equal to zero") - func createHeaders(resp: HttpResponseRef): string = var answer = $(resp.version) & " " & $(resp.status) & "\r\n" for k, v in resp.headersTable.stringItems(): diff --git a/docs/examples/middleware.nim b/docs/examples/middleware.nim new file mode 100644 index 000000000..9d06a8970 --- /dev/null +++ b/docs/examples/middleware.nim @@ -0,0 +1,130 @@ +import chronos/apps/http/httpserver + +{.push raises: [].} + +proc firstMiddlewareHandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 +): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + # Ignore request errors + return await nextHandler(reqfence) + + let request = reqfence.get() + var headers = request.headers + + if request.uri.path.startsWith("/path/to/hidden/resources"): + headers.add("X-Filter", "drop") + elif request.uri.path.startsWith("/path/to/blocked/resources"): + headers.add("X-Filter", "block") + else: + headers.add("X-Filter", "pass") + + # Updating request by adding new HTTP header `X-Filter`. + let res = request.updateRequest(headers) + if res.isErr(): + # We use default error handler in case of error which will respond with + # proper HTTP status code error. + return defaultResponse(res.error) + + # Calling next handler. + await nextHandler(reqfence) + +proc secondMiddlewareHandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 +): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + # Ignore request errors + return await nextHandler(reqfence) + + let + request = reqfence.get() + filtered = request.headers.getString("X-Filter", "pass") + + if filtered == "drop": + # Force HTTP server to drop connection with remote peer. + dropResponse() + elif filtered == "block": + # Force HTTP server to respond with HTTP `404 Not Found` error code. + codeResponse(Http404) + else: + # Calling next handler. + await nextHandler(reqfence) + +proc thirdMiddlewareHandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 +): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + # Ignore request errors + return await nextHandler(reqfence) + + let request = reqfence.get() + echo "QUERY = [", request.rawPath, "]" + echo request.headers + try: + if request.uri.path == "/path/to/plugin/resources/page1": + await request.respond(Http200, "PLUGIN PAGE1") + elif request.uri.path == "/path/to/plugin/resources/page2": + await request.respond(Http200, "PLUGIN PAGE2") + else: + # Calling next handler. + await nextHandler(reqfence) + except HttpWriteError as exc: + # We use default error handler if we unable to send response. + defaultResponse(exc) + +proc mainHandler( + reqfence: RequestFence +): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + return defaultResponse() + + let request = reqfence.get() + try: + if request.uri.path == "/path/to/original/page1": + await request.respond(Http200, "ORIGINAL PAGE1") + elif request.uri.path == "/path/to/original/page2": + await request.respond(Http200, "ORIGINAL PAGE2") + else: + # Force HTTP server to respond with `404 Not Found` status code. + codeResponse(Http404) + except HttpWriteError as exc: + defaultResponse(exc) + +proc middlewareExample() {.async: (raises: []).} = + let + middlewares = [ + HttpServerMiddlewareRef(handler: firstMiddlewareHandler), + HttpServerMiddlewareRef(handler: secondMiddlewareHandler), + HttpServerMiddlewareRef(handler: thirdMiddlewareHandler) + ] + socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} + boundAddress = + if isAvailable(AddressFamily.IPv6): + AnyAddress6 + else: + AnyAddress + res = HttpServerRef.new(boundAddress, mainHandler, + socketFlags = socketFlags, + middlewares = middlewares) + + doAssert(res.isOk(), "Unable to start HTTP server") + let server = res.get() + server.start() + let address = server.instance.localAddress() + echo "HTTP server running on ", address + try: + await server.join() + except CancelledError: + discard + finally: + await server.stop() + await server.closeWait() + +when isMainModule: + waitFor(middlewareExample()) diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 4f2ee56ff..f8343670b 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -8,6 +8,7 @@ - [Errors and exceptions](./error_handling.md) - [Tips, tricks and best practices](./tips.md) - [Porting code to `chronos`](./porting.md) +- [HTTP server middleware](./http_server_middleware.md) # Developer guide diff --git a/docs/src/examples.md b/docs/src/examples.md index c71247c65..49c6dc45e 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -16,3 +16,5 @@ Examples are available in the [`docs/examples/`](https://github.com/status-im/ni * [httpget](https://github.com/status-im/nim-chronos/tree/master/docs/examples/httpget.nim) - Downloading a web page using the http client * [twogets](https://github.com/status-im/nim-chronos/tree/master/docs/examples/twogets.nim) - Download two pages concurrently +* [middleware](https://github.com/status-im/nim-chronos/tree/master/docs/examples/middleware.nim) +- Deploy multiple HTTP server middlewares diff --git a/docs/src/http_server_middleware.md b/docs/src/http_server_middleware.md new file mode 100644 index 000000000..6edd9b5c1 --- /dev/null +++ b/docs/src/http_server_middleware.md @@ -0,0 +1,102 @@ +## HTTP server middleware + +Chronos provides a powerful mechanism for customizing HTTP request handlers via +middlewares. + +A middleware is a coroutine that can modify, block or filter HTTP request. + +Single HTTP server could support unlimited number of middlewares, but you need to consider that each request in worst case could go through all the middlewares, and therefore a huge number of middlewares can have a significant impact on HTTP server performance. + +Order of middlewares is also important: right after HTTP server has received request, it will be sent to the first middleware in list, and each middleware will be responsible for passing control to other middlewares. Therefore, when building a list, it would be a good idea to place the request handlers at the end of the list, while keeping the middleware that could block or modify the request at the beginning of the list. + +Middleware could also modify HTTP server request, and these changes will be visible to all handlers (either middlewares or the original request handler). This can be done using the following helpers: + +```nim + proc updateRequest*(request: HttpRequestRef, scheme: string, meth: HttpMethod, + version: HttpVersion, requestUri: string, + headers: HttpTable): HttpResultMessage[void] + + proc updateRequest*(request: HttpRequestRef, meth: HttpMethod, + requestUri: string, + headers: HttpTable): HttpResultMessage[void] + + proc updateRequest*(request: HttpRequestRef, requestUri: string, + headers: HttpTable): HttpResultMessage[void] + + proc updateRequest*(request: HttpRequestRef, + requestUri: string): HttpResultMessage[void] + + proc updateRequest*(request: HttpRequestRef, + headers: HttpTable): HttpResultMessage[void] +``` + +As you can see all the HTTP request parameters could be modified: request method, version, request path and request headers. + +Middleware could also use helpers to obtain more information about remote and local addresses of request's connection (this could be helpful when you need to do some IP address filtering). + +```nim + proc remote*(request: HttpRequestRef): Opt[TransportAddress] + ## Returns remote address of HTTP request's connection. + proc local*(request: HttpRequestRef): Opt[TransportAddress] = + ## Returns local address of HTTP request's connection. +``` + +Every middleware is the coroutine which looks like this: + +```nim + proc middlewareHandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = +``` + +Where `middleware` argument is the object which could hold some specific values, `reqfence` is HTTP request which is enclosed with HTTP server error information and `nextHandler` is reference to next request handler, it could be either middleware handler or the original request processing callback handler. + +```nim + await nextHandler(reqfence) +``` + +You should perform await for the response from the `nextHandler(reqfence)`. Usually you should call next handler when you dont want to handle request or you dont know how to handle it, for example: + +```nim + proc middlewareHandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + # We dont know or do not want to handle failed requests, so we call next handler. + return await nextHandler(reqfence) + let request = reqfence.get() + if request.uri.path == "/path/we/able/to/respond": + try: + # Sending some response. + await request.respond(Http200, "TEST") + except HttpWriteError as exc: + # We could also return default response for exception or other types of error. + defaultResponse(exc) + elif request.uri.path == "/path/for/rewrite": + # We going to modify request object for this request, next handler will receive it with different request path. + let res = request.updateRequest("/path/to/new/location") + if res.isErr(): + return defaultResponse(res.error) + await nextHandler(reqfence) + elif request.uri.path == "/restricted/path": + if request.remote().isNone(): + # We can't obtain remote address, so we force HTTP server to respond with `401 Unauthorized` status code. + return codeResponse(Http401) + if $(request.remote().get()).startsWith("127.0.0.1"): + # Remote peer's address starts with "127.0.0.1", sending proper response. + await request.respond(Http200, "AUTHORIZED") + else: + # Force HTTP server to respond with `403 Forbidden` status code. + codeResponse(Http403) + elif request.uri.path == "/blackhole": + # Force HTTP server to drop connection with remote peer. + dropResponse() + else: + # All other requests should be handled by somebody else. + await nextHandler(reqfence) +``` + diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 0183f1bfe..91064f5ab 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -18,9 +18,16 @@ suite "HTTP server testing suite": TooBigTest = enum GetBodyTest, ConsumeBodyTest, PostUrlTest, PostMultipartTest TestHttpResponse = object + status: int headers: HttpTable data: string + FirstMiddlewareRef = ref object of HttpServerMiddlewareRef + someInteger: int + + SecondMiddlewareRef = ref object of HttpServerMiddlewareRef + someString: string + proc httpClient(address: TransportAddress, data: string): Future[string] {.async.} = var transp: StreamTransport @@ -50,7 +57,7 @@ suite "HTTP server testing suite": zeroMem(addr buffer[0], len(buffer)) await transp.readExactly(addr buffer[0], length) let data = bytesToString(buffer.toOpenArray(0, length - 1)) - let headers = + let (status, headers) = block: let resp = parseResponse(hdata, false) if resp.failed(): @@ -58,8 +65,38 @@ suite "HTTP server testing suite": var res = HttpTable.init() for key, value in resp.headers(hdata): res.add(key, value) - res - return TestHttpResponse(headers: headers, data: data) + (resp.code, res) + TestHttpResponse(status: status, headers: headers, data: data) + + proc httpClient3(address: TransportAddress, + data: string): Future[TestHttpResponse] {.async.} = + var + transp: StreamTransport + buffer = newSeq[byte](4096) + sep = @[0x0D'u8, 0x0A'u8, 0x0D'u8, 0x0A'u8] + try: + transp = await connect(address) + if len(data) > 0: + let wres = await transp.write(data) + if wres != len(data): + raise newException(ValueError, "Unable to write full request") + let hres = await transp.readUntil(addr buffer[0], len(buffer), sep) + var hdata = @buffer + hdata.setLen(hres) + var rres = bytesToString(await transp.read()) + let (status, headers) = + block: + let resp = parseResponse(hdata, false) + if resp.failed(): + raise newException(ValueError, "Unable to decode response headers") + var res = HttpTable.init() + for key, value in resp.headers(hdata): + res.add(key, value) + (resp.code, res) + TestHttpResponse(status: status, headers: headers, data: rres) + finally: + if not(isNil(transp)): + await closeWait(transp) proc testTooBigBodyChunked(operation: TooBigTest): Future[bool] {.async.} = var serverRes = false @@ -1490,5 +1527,261 @@ suite "HTTP server testing suite": await server.stop() await server.closeWait() + asyncTest "HTTP middleware request filtering test": + proc init(t: typedesc[FirstMiddlewareRef], + data: int): HttpServerMiddlewareRef = + proc shandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + let mw = FirstMiddlewareRef(middleware) + if reqfence.isErr(): + # Our handler is not supposed to handle request errors, so we + # call next handler in sequence which could process errors. + return await nextHandler(reqfence) + + let request = reqfence.get() + if request.uri.path == "/first": + # This is request we are waiting for, so we going to process it. + try: + await request.respond(Http200, $mw.someInteger) + except HttpWriteError as exc: + defaultResponse(exc) + else: + # We know nothing about request's URI, so we pass this request to the + # next handler which could process such request. + await nextHandler(reqfence) + + HttpServerMiddlewareRef( + FirstMiddlewareRef(someInteger: data, handler: shandler)) + + proc init(t: typedesc[SecondMiddlewareRef], + data: string): HttpServerMiddlewareRef = + proc shandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + let mw = SecondMiddlewareRef(middleware) + if reqfence.isErr(): + # Our handler is not supposed to handle request errors, so we + # call next handler in sequence which could process errors. + return await nextHandler(reqfence) + + let request = reqfence.get() + + if request.uri.path == "/second": + # This is request we are waiting for, so we going to process it. + try: + await request.respond(Http200, mw.someString) + except HttpWriteError as exc: + defaultResponse(exc) + else: + # We know nothing about request's URI, so we pass this request to the + # next handler which could process such request. + await nextHandler(reqfence) + + HttpServerMiddlewareRef( + SecondMiddlewareRef(someString: data, handler: shandler)) + + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + if r.isOk(): + let request = r.get() + if request.uri.path == "/test": + try: + await request.respond(Http200, "ORIGIN") + except HttpWriteError as exc: + defaultResponse(exc) + else: + defaultResponse() + else: + defaultResponse() + + let + middlewares = [FirstMiddlewareRef.init(655370), + SecondMiddlewareRef.init("SECOND")] + socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} + res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, + socketFlags = socketFlags, + middlewares = middlewares) + check res.isOk() + + let server = res.get() + server.start() + let + address = server.instance.localAddress() + req1 = "GET /test HTTP/1.1\r\n\r\n" + req2 = "GET /first HTTP/1.1\r\n\r\n" + req3 = "GET /second HTTP/1.1\r\n\r\n" + req4 = "GET /noway HTTP/1.1\r\n\r\n" + resp1 = await httpClient3(address, req1) + resp2 = await httpClient3(address, req2) + resp3 = await httpClient3(address, req3) + resp4 = await httpClient3(address, req4) + + check: + resp1.status == 200 + resp1.data == "ORIGIN" + resp2.status == 200 + resp2.data == "655370" + resp3.status == 200 + resp3.data == "SECOND" + resp4.status == 404 + + await server.stop() + await server.closeWait() + + asyncTest "HTTP middleware request modification test": + proc init(t: typedesc[FirstMiddlewareRef], + data: int): HttpServerMiddlewareRef = + proc shandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + let mw = FirstMiddlewareRef(middleware) + if reqfence.isErr(): + # Our handler is not supposed to handle request errors, so we + # call next handler in sequence which could process errors. + return await nextHandler(reqfence) + + let + request = reqfence.get() + modifiedUri = "/modified/" & $mw.someInteger & request.rawPath + var modifiedHeaders = request.headers + modifiedHeaders.add("X-Modified", "test-value") + + let res = request.updateRequest(modifiedUri, modifiedHeaders) + if res.isErr(): + return defaultResponse(res.error) + + # We sending modified request to the next handler. + await nextHandler(reqfence) + + HttpServerMiddlewareRef( + FirstMiddlewareRef(someInteger: data, handler: shandler)) + + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + if r.isOk(): + let request = r.get() + try: + await request.respond(Http200, request.rawPath & ":" & + request.headers.getString("x-modified")) + except HttpWriteError as exc: + defaultResponse(exc) + else: + defaultResponse() + + let + middlewares = [FirstMiddlewareRef.init(655370)] + socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} + res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, + socketFlags = socketFlags, + middlewares = middlewares) + check res.isOk() + + let server = res.get() + server.start() + let + address = server.instance.localAddress() + req1 = "GET /test HTTP/1.1\r\n\r\n" + req2 = "GET /first HTTP/1.1\r\n\r\n" + req3 = "GET /second HTTP/1.1\r\n\r\n" + req4 = "GET /noway HTTP/1.1\r\n\r\n" + resp1 = await httpClient3(address, req1) + resp2 = await httpClient3(address, req2) + resp3 = await httpClient3(address, req3) + resp4 = await httpClient3(address, req4) + + check: + resp1.status == 200 + resp1.data == "/modified/655370/test:test-value" + resp2.status == 200 + resp2.data == "/modified/655370/first:test-value" + resp3.status == 200 + resp3.data == "/modified/655370/second:test-value" + resp4.status == 200 + resp4.data == "/modified/655370/noway:test-value" + + await server.stop() + await server.closeWait() + + asyncTest "HTTP middleware request blocking test": + proc init(t: typedesc[FirstMiddlewareRef], + data: int): HttpServerMiddlewareRef = + proc shandler( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2 + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + # Our handler is not supposed to handle request errors, so we + # call next handler in sequence which could process errors. + return await nextHandler(reqfence) + + let request = reqfence.get() + if request.uri.path == "/first": + # Blocking request by disconnecting remote peer. + dropResponse() + elif request.uri.path == "/second": + # Blocking request by sending HTTP error message with 401 code. + codeResponse(Http401) + else: + # Allow all other requests to be processed by next handler. + await nextHandler(reqfence) + + HttpServerMiddlewareRef( + FirstMiddlewareRef(someInteger: data, handler: shandler)) + + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + if r.isOk(): + let request = r.get() + try: + await request.respond(Http200, "ORIGIN") + except HttpWriteError as exc: + defaultResponse(exc) + else: + defaultResponse() + + let + middlewares = [FirstMiddlewareRef.init(655370)] + socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr} + res = HttpServerRef.new(initTAddress("127.0.0.1:0"), process, + socketFlags = socketFlags, + middlewares = middlewares) + check res.isOk() + + let server = res.get() + server.start() + let + address = server.instance.localAddress() + req1 = "GET /test HTTP/1.1\r\n\r\n" + req2 = "GET /first HTTP/1.1\r\n\r\n" + req3 = "GET /second HTTP/1.1\r\n\r\n" + resp1 = await httpClient3(address, req1) + resp3 = await httpClient3(address, req3) + + check: + resp1.status == 200 + resp1.data == "ORIGIN" + resp3.status == 401 + + let checked = + try: + let res {.used.} = await httpClient3(address, req2) + false + except TransportIncompleteError: + true + + check: + checked == true + + await server.stop() + await server.closeWait() + test "Leaks test": checkLeaks() From 92acf68b04070dfe8eb65bab71fbf63804979a16 Mon Sep 17 00:00:00 2001 From: cheatfate Date: Fri, 12 Jan 2024 15:39:45 +0200 Subject: [PATCH 102/146] Fix examples documentation. --- docs/src/examples.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/src/examples.md b/docs/src/examples.md index 49c6dc45e..0bcfc74ba 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -16,5 +16,4 @@ Examples are available in the [`docs/examples/`](https://github.com/status-im/ni * [httpget](https://github.com/status-im/nim-chronos/tree/master/docs/examples/httpget.nim) - Downloading a web page using the http client * [twogets](https://github.com/status-im/nim-chronos/tree/master/docs/examples/twogets.nim) - Download two pages concurrently -* [middleware](https://github.com/status-im/nim-chronos/tree/master/docs/examples/middleware.nim) -- Deploy multiple HTTP server middlewares +* [middleware](https://github.com/status-im/nim-chronos/tree/master/docs/examples/middleware.nim) - Deploy multiple HTTP server middlewares From 1021a7d29453ac184cc406483ff5fcdb73d48472 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 18 Jan 2024 13:34:16 +0100 Subject: [PATCH 103/146] check leaks after every test (#487) --- chronos/unittest2/asynctests.nim | 3 ++- tests/testasyncstream.nim | 22 +++++++++------------- tests/testdatagram.nim | 5 +++-- tests/testhttpclient.nim | 5 ++--- tests/testhttpserver.nim | 6 +++--- tests/testproc.nim | 6 +++--- tests/testshttpserver.nim | 5 ++--- tests/teststream.nim | 11 +++-------- 8 files changed, 27 insertions(+), 36 deletions(-) diff --git a/chronos/unittest2/asynctests.nim b/chronos/unittest2/asynctests.nim index 758e0a6b0..9e01dba19 100644 --- a/chronos/unittest2/asynctests.nim +++ b/chronos/unittest2/asynctests.nim @@ -26,6 +26,7 @@ template checkLeaks*(name: string): untyped = ", closed = " & $ counter.closed check counter.opened == counter.closed -template checkLeaks*(): untyped = +proc checkLeaks*() = for key in getThreadDispatcher().trackerCounterKeys(): checkLeaks(key) + GC_fullCollect() diff --git a/tests/testasyncstream.nim b/tests/testasyncstream.nim index bd0207f8d..399eb63b6 100644 --- a/tests/testasyncstream.nim +++ b/tests/testasyncstream.nim @@ -84,6 +84,9 @@ proc createBigMessage(message: string, size: int): seq[byte] = res suite "AsyncStream test suite": + teardown: + checkLeaks() + test "AsyncStream(StreamTransport) readExactly() test": proc testReadExactly(): Future[bool] {.async.} = proc serveClient(server: StreamServer, @@ -256,9 +259,6 @@ suite "AsyncStream test suite": result = true check waitFor(testConsume()) == true - test "AsyncStream(StreamTransport) leaks test": - checkLeaks() - test "AsyncStream(AsyncStream) readExactly() test": proc testReadExactly2(): Future[bool] {.async.} = proc serveClient(server: StreamServer, @@ -581,10 +581,10 @@ suite "AsyncStream test suite": check waitFor(testWriteEof()) == true - test "AsyncStream(AsyncStream) leaks test": +suite "ChunkedStream test suite": + teardown: checkLeaks() -suite "ChunkedStream test suite": test "ChunkedStream test vectors": const ChunkedVectors = [ ["4\r\nWiki\r\n5\r\npedia\r\nE\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n", @@ -890,10 +890,10 @@ suite "ChunkedStream test suite": check waitFor(testSmallChunk(262400, 4096, 61)) == true check waitFor(testSmallChunk(767309, 4457, 173)) == true - test "ChunkedStream leaks test": +suite "TLSStream test suite": + teardown: checkLeaks() -suite "TLSStream test suite": const HttpHeadersMark = @[byte(0x0D), byte(0x0A), byte(0x0D), byte(0x0A)] test "Simple HTTPS connection": proc headerClient(address: TransportAddress, @@ -1023,10 +1023,9 @@ suite "TLSStream test suite": let res = waitFor checkTrustAnchors("Some message") check res == "Some message\r\n" - test "TLSStream leaks test": - checkLeaks() - suite "BoundedStream test suite": + teardown: + checkLeaks() type BoundarySizeTest = enum @@ -1402,6 +1401,3 @@ suite "BoundedStream test suite": return (writer1Res and writer2Res and readerRes) check waitFor(checkEmptyStreams()) == true - - test "BoundedStream leaks test": - checkLeaks() diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index bd33ef365..7b27c3431 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -13,6 +13,9 @@ import ".."/chronos {.used.} suite "Datagram Transport test suite": + teardown: + checkLeaks() + const TestsCount = 2000 ClientsCount = 20 @@ -727,5 +730,3 @@ suite "Datagram Transport test suite": DualStackType.Auto, initTAddress("[::1]:0"))) == true else: skip() - test "Transports leak test": - checkLeaks() diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index 967f896be..a468aaed9 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -74,6 +74,8 @@ N8r5CwGcIX/XPC3lKazzbZ8baA== """ suite "HTTP client testing suite": + teardown: + checkLeaks() type TestResponseTuple = tuple[status: int, data: string, count: int] @@ -1516,6 +1518,3 @@ suite "HTTP client testing suite": res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed res.error.isRecoverableError() not(res.error.isCriticalError()) - - test "Leaks test": - checkLeaks() diff --git a/tests/testhttpserver.nim b/tests/testhttpserver.nim index 91064f5ab..70cca33ed 100644 --- a/tests/testhttpserver.nim +++ b/tests/testhttpserver.nim @@ -14,6 +14,9 @@ import stew/base10 {.used.} suite "HTTP server testing suite": + teardown: + checkLeaks() + type TooBigTest = enum GetBodyTest, ConsumeBodyTest, PostUrlTest, PostMultipartTest @@ -1782,6 +1785,3 @@ suite "HTTP server testing suite": await server.stop() await server.closeWait() - - test "Leaks test": - checkLeaks() diff --git a/tests/testproc.nim b/tests/testproc.nim index 588e30877..4d8accf89 100644 --- a/tests/testproc.nim +++ b/tests/testproc.nim @@ -16,6 +16,9 @@ when defined(posix): when defined(nimHasUsed): {.used.} suite "Asynchronous process management test suite": + teardown: + checkLeaks() + const OutputTests = when defined(windows): [ @@ -463,6 +466,3 @@ suite "Asynchronous process management test suite": skip() else: check getCurrentFD() == markFD - - test "Leaks test": - checkLeaks() diff --git a/tests/testshttpserver.nim b/tests/testshttpserver.nim index 18e84a972..f846d8d56 100644 --- a/tests/testshttpserver.nim +++ b/tests/testshttpserver.nim @@ -75,6 +75,8 @@ N8r5CwGcIX/XPC3lKazzbZ8baA== suite "Secure HTTP server testing suite": + teardown: + checkLeaks() proc httpsClient(address: TransportAddress, data: string, flags = {NoVerifyHost, NoVerifyServerName} @@ -184,6 +186,3 @@ suite "Secure HTTP server testing suite": return serverRes and data == "EXCEPTION" check waitFor(testHTTPS2(initTAddress("127.0.0.1:30080"))) == true - - test "Leaks test": - checkLeaks() diff --git a/tests/teststream.nim b/tests/teststream.nim index fb5534b5c..bf4c455e7 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -16,6 +16,9 @@ when defined(windows): importc: "_get_osfhandle", header:"".} suite "Stream Transport test suite": + teardown: + checkLeaks() + const ConstantMessage = "SOMEDATA" BigMessagePattern = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" @@ -1555,12 +1558,6 @@ suite "Stream Transport test suite": check waitFor(testAccept(addresses[i])) == true test prefixes[i] & "close() while in accept() waiting test": check waitFor(testAcceptClose(addresses[i])) == true - test prefixes[i] & "Intermediate transports leak test #1": - checkLeaks() - when defined(windows): - skip() - else: - checkLeaks(StreamTransportTrackerName) test prefixes[i] & "accept() too many file descriptors test": when defined(windows): skip() @@ -1671,8 +1668,6 @@ suite "Stream Transport test suite": DualStackType.Disabled, initTAddress("[::1]:0"))) == true else: skip() - test "Leaks test": - checkLeaks() test "File descriptors leak test": when defined(windows): # Windows handle numbers depends on many conditions, so we can't use From 3ca2c5e6b510c15ce88c94ed25731b30f7ad46b5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 19 Jan 2024 09:21:10 +0100 Subject: [PATCH 104/146] deprecate `callback=`, UDP fixes (fixes #491) (#492) Using the callback setter may lead to callbacks owned by others being reset, which is unexpected. * don't crash on zero-length UDP writes --- chronos/internal/asyncfutures.nim | 8 +++++-- chronos/transports/datagram.nim | 38 +++++++++++++++---------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 5ce9da484..496a77604 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -330,7 +330,8 @@ proc removeCallback*(future: FutureBase, cb: CallbackFunc, proc removeCallback*(future: FutureBase, cb: CallbackFunc) = future.removeCallback(cb, cast[pointer](future)) -proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer) = +proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer) {. + deprecated: "use addCallback/removeCallback/clearCallbacks to manage the callback list".} = ## Clears the list of callbacks and sets the callback proc to be called when ## the future completes. ## @@ -341,11 +342,14 @@ proc `callback=`*(future: FutureBase, cb: CallbackFunc, udata: pointer) = future.clearCallbacks future.addCallback(cb, udata) -proc `callback=`*(future: FutureBase, cb: CallbackFunc) = +proc `callback=`*(future: FutureBase, cb: CallbackFunc) {. + deprecated: "use addCallback/removeCallback/clearCallbacks instead to manage the callback list".} = ## Sets the callback proc to be called when the future completes. ## ## If future has already completed then ``cb`` will be called immediately. + {.push warning[Deprecated]: off.} `callback=`(future, cb, cast[pointer](future)) + {.pop.} proc `cancelCallback=`*(future: FutureBase, cb: CallbackFunc) = ## Sets the callback procedure to be called when the future is cancelled. diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 88db7ee14..cd335dfd9 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -13,6 +13,7 @@ import std/deques when not(defined(windows)): import ".."/selectors2 import ".."/[asyncloop, config, osdefs, oserrno, osutils, handles] import "."/common +import stew/ptrops type VectorKind = enum @@ -119,7 +120,7 @@ when defined(windows): ## Initiation transp.state.incl(WritePending) let fd = SocketHandle(transp.fd) - var vector = transp.queue.popFirst() + let vector = transp.queue.popFirst() transp.setWriterWSABuffer(vector) let ret = if vector.kind == WithAddress: @@ -365,7 +366,7 @@ when defined(windows): udata: cast[pointer](res)) res.wovl.data = CompletionData(cb: writeDatagramLoop, udata: cast[pointer](res)) - res.rwsabuf = WSABUF(buf: cast[cstring](addr res.buffer[0]), + res.rwsabuf = WSABUF(buf: cast[cstring](baseAddr res.buffer), len: ULONG(len(res.buffer))) GC_ref(res) # Start tracking transport @@ -392,7 +393,7 @@ else: else: while true: transp.ralen = SockLen(sizeof(Sockaddr_storage)) - var res = osdefs.recvfrom(fd, addr transp.buffer[0], + var res = osdefs.recvfrom(fd, baseAddr transp.buffer, cint(len(transp.buffer)), cint(0), cast[ptr SockAddr](addr transp.raddr), addr transp.ralen) @@ -424,7 +425,7 @@ else: transp.state.incl({WritePaused}) else: if len(transp.queue) > 0: - var vector = transp.queue.popFirst() + let vector = transp.queue.popFirst() while true: if vector.kind == WithAddress: toSAddr(vector.address, transp.waddr, transp.walen) @@ -826,7 +827,7 @@ proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback, proc join*(transp: DatagramTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until the transport ``transp`` will be closed. - var retFuture = newFuture[void]("datagram.transport.join") + let retFuture = newFuture[void]("datagram.transport.join") proc continuation(udata: pointer) {.gcsafe.} = retFuture.complete() @@ -858,12 +859,12 @@ proc send*(transp: DatagramTransport, pbytes: pointer, async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send buffer with pointer ``pbytes`` and size ``nbytes`` using transport ## ``transp`` to remote destination address which was bounded on transport. - var retFuture = newFuture[void]("datagram.transport.send(pointer)") + let retFuture = newFuture[void]("datagram.transport.send(pointer)") transp.checkClosed(retFuture) if transp.remote.port == Port(0): retFuture.fail(newException(TransportError, "Remote peer not set!")) return retFuture - var vector = GramVector(kind: WithoutAddress, buf: pbytes, buflen: nbytes, + let vector = GramVector(kind: WithoutAddress, buf: pbytes, buflen: nbytes, writer: retFuture) transp.queue.addLast(vector) if WritePaused in transp.state: @@ -877,14 +878,14 @@ proc send*(transp: DatagramTransport, msg: sink string, async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address which was bounded on transport. - var retFuture = newFuture[void]("datagram.transport.send(string)") + let retFuture = newFuture[void]("datagram.transport.send(string)") transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) - let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0], + let vector = GramVector(kind: WithoutAddress, buf: baseAddr localCopy, buflen: length, writer: retFuture) @@ -900,14 +901,14 @@ proc send*[T](transp: DatagramTransport, msg: sink seq[T], async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address which was bounded on transport. - var retFuture = newFuture[void]("datagram.transport.send(seq)") + let retFuture = newFuture[void]("datagram.transport.send(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) - let vector = GramVector(kind: WithoutAddress, buf: addr localCopy[0], + let vector = GramVector(kind: WithoutAddress, buf: baseAddr localCopy, buflen: length, writer: retFuture) transp.queue.addLast(vector) @@ -922,7 +923,7 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send buffer with pointer ``pbytes`` and size ``nbytes`` using transport ## ``transp`` to remote destination address ``remote``. - var retFuture = newFuture[void]("datagram.transport.sendTo(pointer)") + let retFuture = newFuture[void]("datagram.transport.sendTo(pointer)") transp.checkClosed(retFuture) let vector = GramVector(kind: WithAddress, buf: pbytes, buflen: nbytes, writer: retFuture, address: remote) @@ -938,14 +939,14 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address ``remote``. - var retFuture = newFuture[void]("datagram.transport.sendTo(string)") + let retFuture = newFuture[void]("datagram.transport.sendTo(string)") transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) - let vector = GramVector(kind: WithAddress, buf: addr localCopy[0], + let vector = GramVector(kind: WithAddress, buf: baseAddr localCopy, buflen: length, writer: retFuture, address: remote) @@ -961,15 +962,15 @@ proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send sequence ``msg`` using transport ``transp`` to remote destination ## address ``remote``. - var retFuture = newFuture[void]("datagram.transport.sendTo(seq)") + let retFuture = newFuture[void]("datagram.transport.sendTo(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) var localCopy = chronosMoveSink(msg) retFuture.addCallback(proc(_: pointer) = reset(localCopy)) - let vector = GramVector(kind: WithAddress, buf: addr localCopy[0], + let vector = GramVector(kind: WithAddress, buf: baseAddr localCopy, buflen: length, - writer: cast[Future[void]](retFuture), + writer: retFuture, address: remote) transp.queue.addLast(vector) if WritePaused in transp.state: @@ -993,7 +994,6 @@ proc peekMessage*(transp: DatagramTransport, msg: var seq[byte], proc getMessage*(transp: DatagramTransport): seq[byte] {. raises: [TransportError].} = ## Copy data from internal message buffer and return result. - var default: seq[byte] if ReadError in transp.state: transp.state.excl(ReadError) raise transp.getError() @@ -1002,7 +1002,7 @@ proc getMessage*(transp: DatagramTransport): seq[byte] {. copyMem(addr res[0], addr transp.buffer[0], transp.buflen) res else: - default + default(seq[byte]) proc getUserData*[T](transp: DatagramTransport): T {.inline.} = ## Obtain user data stored in ``transp`` object. From e296ae30c84bdd1f0b12c50ab551ed080f8a815c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sat, 20 Jan 2024 16:56:57 +0100 Subject: [PATCH 105/146] asyncraises for threadsync (#495) * asyncraises for threadsync * missing bracket * missing exception --- chronos/internal/asyncfutures.nim | 2 +- chronos/threadsync.nim | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 496a77604..1a2be7577 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1553,7 +1553,7 @@ when defined(windows): proc waitForSingleObject*(handle: HANDLE, timeout: Duration): Future[WaitableResult] {. - raises: [].} = + async: (raises: [AsyncError, CancelledError], raw: true).} = ## Waits until the specified object is in the signaled state or the ## time-out interval elapses. WaitForSingleObject() for asynchronous world. let flags = WT_EXECUTEONLYONCE diff --git a/chronos/threadsync.nim b/chronos/threadsync.nim index bbff18bd1..f922c1263 100644 --- a/chronos/threadsync.nim +++ b/chronos/threadsync.nim @@ -272,7 +272,8 @@ proc waitSync*(signal: ThreadSignalPtr, else: return ok(true) -proc fire*(signal: ThreadSignalPtr): Future[void] = +proc fire*(signal: ThreadSignalPtr): Future[void] {. + async: (raises: [AsyncError, CancelledError], raw: true).} = ## Set state of ``signal`` to signaled in asynchronous way. var retFuture = newFuture[void]("asyncthreadsignal.fire") when defined(windows): @@ -356,14 +357,17 @@ proc fire*(signal: ThreadSignalPtr): Future[void] = retFuture when defined(windows): - proc wait*(signal: ThreadSignalPtr) {.async.} = + proc wait*(signal: ThreadSignalPtr) {. + async: (raises: [AsyncError, CancelledError]).} = let handle = signal[].event let res = await waitForSingleObject(handle, InfiniteDuration) # There should be no other response, because we use `InfiniteDuration`. doAssert(res == WaitableResult.Ok) else: - proc wait*(signal: ThreadSignalPtr): Future[void] = - var retFuture = newFuture[void]("asyncthreadsignal.wait") + proc wait*(signal: ThreadSignalPtr): Future[void] {. + async: (raises: [AsyncError, CancelledError], raw: true).} = + let retFuture = Future[void].Raising([AsyncError, CancelledError]).init( + "asyncthreadsignal.wait") var data = 1'u64 let eventFd = when defined(linux): From 09a0b117194ed41ee6cebf628404698006d238b4 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 23 Jan 2024 09:34:10 +0200 Subject: [PATCH 106/146] Make asyncproc use asyncraises. (#497) * Make asyncproc use asyncraises. * Fix missing asyncraises for waitForExit(). --- chronos/asyncproc.nim | 75 ++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index 8615c570a..f00877675 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -231,8 +231,9 @@ proc closeProcessHandles(pipes: var AsyncProcessPipes, lastError: OSErrorCode): OSErrorCode {.apforward.} proc closeProcessStreams(pipes: AsyncProcessPipes, options: set[AsyncProcessOption]): Future[void] {. - apforward.} -proc closeWait(holder: AsyncStreamHolder): Future[void] {.apforward.} + async: (raises: []).} +proc closeWait(holder: AsyncStreamHolder): Future[void] {. + async: (raises: []).} template isOk(code: OSErrorCode): bool = when defined(windows): @@ -391,7 +392,8 @@ when defined(windows): stdinHandle = ProcessStreamHandle(), stdoutHandle = ProcessStreamHandle(), stderrHandle = ProcessStreamHandle(), - ): Future[AsyncProcessRef] {.async.} = + ): Future[AsyncProcessRef] {. + async: (raises: [AsyncProcessError, CancelledError]).} = var pipes = preparePipes(options, stdinHandle, stdoutHandle, stderrHandle).valueOr: @@ -517,14 +519,16 @@ when defined(windows): ok(false) proc waitForExit*(p: AsyncProcessRef, - timeout = InfiniteDuration): Future[int] {.async.} = + timeout = InfiniteDuration): Future[int] {. + async: (raises: [AsyncProcessError, AsyncProcessTimeoutError, + CancelledError]).} = if p.exitStatus.isSome(): return p.exitStatus.get() let wres = try: await waitForSingleObject(p.processHandle, timeout) - except ValueError as exc: + except AsyncError as exc: raiseAsyncProcessError("Unable to wait for process handle", exc) if wres == WaitableResult.Timeout: @@ -537,7 +541,8 @@ when defined(windows): if exitCode >= 0: p.exitStatus = Opt.some(exitCode) - return exitCode + + exitCode proc peekExitCode(p: AsyncProcessRef): AsyncProcessResult[int] = if p.exitStatus.isSome(): @@ -787,7 +792,8 @@ else: stdinHandle = ProcessStreamHandle(), stdoutHandle = ProcessStreamHandle(), stderrHandle = ProcessStreamHandle(), - ): Future[AsyncProcessRef] {.async.} = + ): Future[AsyncProcessRef] {. + async: (raises: [AsyncProcessError, CancelledError]).} = var pid: Pid pipes = preparePipes(options, stdinHandle, stdoutHandle, @@ -887,7 +893,7 @@ else: ) trackCounter(AsyncProcessTrackerName) - return process + process proc peekProcessExitCode(p: AsyncProcessRef, reap = false): AsyncProcessResult[int] = @@ -948,7 +954,9 @@ else: ok(false) proc waitForExit*(p: AsyncProcessRef, - timeout = InfiniteDuration): Future[int] = + timeout = InfiniteDuration): Future[int] {. + async: (raw: true, raises: [ + AsyncProcessError, AsyncProcessTimeoutError, CancelledError]).} = var retFuture = newFuture[int]("chronos.waitForExit()") processHandle: ProcessHandle @@ -1050,7 +1058,7 @@ else: # Process is still running, so we going to wait for SIGCHLD. retFuture.cancelCallback = cancellation - return retFuture + retFuture proc peekExitCode(p: AsyncProcessRef): AsyncProcessResult[int] = let res = ? p.peekProcessExitCode() @@ -1155,7 +1163,7 @@ proc preparePipes(options: set[AsyncProcessOption], stderrHandle: remoteStderr )) -proc closeWait(holder: AsyncStreamHolder) {.async.} = +proc closeWait(holder: AsyncStreamHolder) {.async: (raises: []).} = let (future, transp) = case holder.kind of StreamKind.None: @@ -1182,10 +1190,11 @@ proc closeWait(holder: AsyncStreamHolder) {.async.} = res if len(pending) > 0: - await allFutures(pending) + await noCancel allFutures(pending) proc closeProcessStreams(pipes: AsyncProcessPipes, - options: set[AsyncProcessOption]): Future[void] = + options: set[AsyncProcessOption]): Future[void] {. + async: (raw: true, raises: []).} = let pending = block: var res: seq[Future[void]] @@ -1196,10 +1205,12 @@ proc closeProcessStreams(pipes: AsyncProcessPipes, if ProcessFlag.AutoStderr in pipes.flags: res.add(pipes.stderrHolder.closeWait()) res - allFutures(pending) + noCancel allFutures(pending) proc opAndWaitForExit(p: AsyncProcessRef, op: WaitOperation, - timeout = InfiniteDuration): Future[int] {.async.} = + timeout = InfiniteDuration): Future[int] {. + async: (raises: [ + AsyncProcessError, AsyncProcessTimeoutError, CancelledError]).} = let timerFut = if timeout == InfiniteDuration: newFuture[void]("chronos.killAndwaitForExit") @@ -1223,7 +1234,10 @@ proc opAndWaitForExit(p: AsyncProcessRef, op: WaitOperation, return exitCode let waitFut = p.waitForExit().wait(100.milliseconds) - discard await race(FutureBase(waitFut), FutureBase(timerFut)) + try: + discard await race(FutureBase(waitFut), FutureBase(timerFut)) + except ValueError: + raiseAssert "This should not be happened!" if waitFut.finished() and not(waitFut.failed()): let res = p.peekExitCode() @@ -1237,25 +1251,28 @@ proc opAndWaitForExit(p: AsyncProcessRef, op: WaitOperation, await waitFut.cancelAndWait() raiseAsyncProcessTimeoutError() -proc closeWait*(p: AsyncProcessRef) {.async.} = +proc closeWait*(p: AsyncProcessRef) {.async: (raises: []).} = # Here we ignore all possible errrors, because we do not want to raise # exceptions. discard closeProcessHandles(p.pipes, p.options, OSErrorCode(0)) - await noCancel(p.pipes.closeProcessStreams(p.options)) + await p.pipes.closeProcessStreams(p.options) discard p.closeThreadAndProcessHandle() untrackCounter(AsyncProcessTrackerName) proc stdinStream*(p: AsyncProcessRef): AsyncStreamWriter = + ## Returns STDIN async stream associated with process `p`. doAssert(p.pipes.stdinHolder.kind == StreamKind.Writer, "StdinStreamWriter is not available") p.pipes.stdinHolder.writer proc stdoutStream*(p: AsyncProcessRef): AsyncStreamReader = + ## Returns STDOUT async stream associated with process `p`. doAssert(p.pipes.stdoutHolder.kind == StreamKind.Reader, "StdoutStreamReader is not available") p.pipes.stdoutHolder.reader proc stderrStream*(p: AsyncProcessRef): AsyncStreamReader = + ## Returns STDERR async stream associated with process `p`. doAssert(p.pipes.stderrHolder.kind == StreamKind.Reader, "StderrStreamReader is not available") p.pipes.stderrHolder.reader @@ -1263,7 +1280,9 @@ proc stderrStream*(p: AsyncProcessRef): AsyncStreamReader = proc execCommand*(command: string, options = {AsyncProcessOption.EvalCommand}, timeout = InfiniteDuration - ): Future[int] {.async.} = + ): Future[int] {. + async: (raises: [ + AsyncProcessError, AsyncProcessTimeoutError, CancelledError]).} = let poptions = options + {AsyncProcessOption.EvalCommand} process = await startProcess(command, options = poptions) @@ -1277,7 +1296,9 @@ proc execCommand*(command: string, proc execCommandEx*(command: string, options = {AsyncProcessOption.EvalCommand}, timeout = InfiniteDuration - ): Future[CommandExResponse] {.async.} = + ): Future[CommandExResponse] {. + async: (raises: [ + AsyncProcessError, AsyncProcessTimeoutError, CancelledError]).} = let process = await startProcess(command, options = options, stdoutHandle = AsyncProcess.Pipe, @@ -1291,13 +1312,13 @@ proc execCommandEx*(command: string, status = await process.waitForExit(timeout) output = try: - string.fromBytes(outputReader.read()) + string.fromBytes(await outputReader) except AsyncStreamError as exc: raiseAsyncProcessError("Unable to read process' stdout channel", exc) error = try: - string.fromBytes(errorReader.read()) + string.fromBytes(await errorReader) except AsyncStreamError as exc: raiseAsyncProcessError("Unable to read process' stderr channel", exc) @@ -1308,13 +1329,15 @@ proc execCommandEx*(command: string, res proc pid*(p: AsyncProcessRef): int = - ## Returns process ``p`` identifier. + ## Returns process ``p`` unique process identifier. int(p.processId) template processId*(p: AsyncProcessRef): int = pid(p) proc killAndWaitForExit*(p: AsyncProcessRef, - timeout = InfiniteDuration): Future[int] = + timeout = InfiniteDuration): Future[int] {. + async: (raw: true, raises: [ + AsyncProcessError, AsyncProcessTimeoutError, CancelledError]).} = ## Perform continuous attempts to kill the ``p`` process for specified period ## of time ``timeout``. ## @@ -1330,7 +1353,9 @@ proc killAndWaitForExit*(p: AsyncProcessRef, opAndWaitForExit(p, WaitOperation.Kill, timeout) proc terminateAndWaitForExit*(p: AsyncProcessRef, - timeout = InfiniteDuration): Future[int] = + timeout = InfiniteDuration): Future[int] {. + async: (raw: true, raises: [ + AsyncProcessError, AsyncProcessTimeoutError, CancelledError]).} = ## Perform continuous attempts to terminate the ``p`` process for specified ## period of time ``timeout``. ## From 672db137b7cad9b384b8f4fb551fb6bbeaabfe1b Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 24 Jan 2024 18:33:13 +0100 Subject: [PATCH 107/146] v4.0.0 (#494) Features: * Exception effects / raises for async procedures helping you write more efficient leak-free code * Cross-thread notification mechanism for suitable building channels, queues and other multithreaded primitives * Async process I/O * IPv6 dual stack support * HTTP middleware support alloing multiple services to share a single http server * A new [documentation web site](https://status-im.github.io/nim-chronos/) covering the basics, with several simple examples for getting started * Implicit returns, support for `results.?` and other conveniences * Rate limiter * Revamped cancellation support with more control over the cancellation process * Efficiency improvements with `lent` and `sink` See the [porting](https://status-im.github.io/nim-chronos/porting.html) guides for porting code from earlier chronos releases (as well as asyncdispatch) --- chronos.nimble | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index e43588329..01117b6ba 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,13 +1,13 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "3.2.0" +version = "4.0.0" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" skipDirs = @["tests"] -requires "nim >= 1.6.0", +requires "nim >= 1.6.16", "results", "stew", "bearssl", From 08db79fe6354cc3e381daaf81e769b07334ecfc9 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 14 Feb 2024 00:03:12 +0200 Subject: [PATCH 108/146] Disable memory hungry tests in 32bit tests. (#503) * Disable memory hungry tests in 32bit tests. * Limit threadsync tests for 32bit. --- tests/testproc.nim | 53 +++++++++++++++++++++------------------- tests/teststream.nim | 4 +-- tests/testthreadsync.nim | 5 +++- 3 files changed, 34 insertions(+), 28 deletions(-) diff --git a/tests/testproc.nim b/tests/testproc.nim index 4d8accf89..57ef5a67b 100644 --- a/tests/testproc.nim +++ b/tests/testproc.nim @@ -209,31 +209,34 @@ suite "Asynchronous process management test suite": await process.closeWait() asyncTest "Capture big amount of bytes from STDOUT stream test": - let options = {AsyncProcessOption.EvalCommand} - let command = - when defined(windows): - "tests\\testproc.bat bigdata" - else: - "tests/testproc.sh bigdata" - let expect = - when defined(windows): - 100_000 * (64 + 2) - else: - 100_000 * (64 + 1) - let process = await startProcess(command, options = options, - stdoutHandle = AsyncProcess.Pipe, - stderrHandle = AsyncProcess.Pipe) - try: - let outBytesFut = process.stdoutStream.read() - let errBytesFut = process.stderrStream.read() - let res = await process.waitForExit(InfiniteDuration) - await allFutures(outBytesFut, errBytesFut) - check: - res == 0 - len(outBytesFut.read()) == expect - len(errBytesFut.read()) == 0 - finally: - await process.closeWait() + when sizeof(int) == 4: + skip() + else: + let options = {AsyncProcessOption.EvalCommand} + let command = + when defined(windows): + "tests\\testproc.bat bigdata" + else: + "tests/testproc.sh bigdata" + let expect = + when defined(windows): + 100_000 * (64 + 2) + else: + 100_000 * (64 + 1) + let process = await startProcess(command, options = options, + stdoutHandle = AsyncProcess.Pipe, + stderrHandle = AsyncProcess.Pipe) + try: + let outBytesFut = process.stdoutStream.read() + let errBytesFut = process.stderrStream.read() + let res = await process.waitForExit(InfiniteDuration) + await allFutures(outBytesFut, errBytesFut) + check: + res == 0 + len(outBytesFut.read()) == expect + len(errBytesFut.read()) == 0 + finally: + await process.closeWait() asyncTest "Long-waiting waitForExit() test": let command = diff --git a/tests/teststream.nim b/tests/teststream.nim index bf4c455e7..340575c62 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1520,12 +1520,12 @@ suite "Stream Transport test suite": check waitFor(testWCR(addresses[i])) == ClientsCount * MessagesCount test prefixes[i] & "writeFile() multiple clients (" & $FilesCount & " files)": when defined(windows): - if addresses[i].family == AddressFamily.IPv4: + if addresses[i].family == AddressFamily.IPv4 and (sizeof(int) == 8): check waitFor(testSendFile(addresses[i])) == FilesCount else: skip() else: - if defined(emscripten): + if defined(emscripten) or (sizeof(int) == 4): skip() else: check waitFor(testSendFile(addresses[i])) == FilesCount diff --git a/tests/testthreadsync.nim b/tests/testthreadsync.nim index fc85dc8c4..b5273975b 100644 --- a/tests/testthreadsync.nim +++ b/tests/testthreadsync.nim @@ -39,9 +39,12 @@ type Sync, Async const - TestsCount = 1000 + TestsCount = when sizeof(int) == 8: 1000 else: 100 suite "Asynchronous multi-threading sync primitives test suite": + teardown: + checkLeaks() + proc setResult(thr: ThreadResultPtr, value: int) = thr[].value = value From 8cf2d69aaa9c49306b7abe5914f385bc8a35f27a Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 14 Feb 2024 08:27:09 +0100 Subject: [PATCH 109/146] Minimal threading docs (#493) * Minimal threading docs * compile examples with threads * links --- chronos.nimble | 2 +- docs/examples/signalling.nim | 38 ++++++++++++++++++++++++++++++++++++ docs/src/SUMMARY.md | 1 + docs/src/examples.md | 4 ++++ docs/src/threads.md | 18 +++++++++++++++++ 5 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 docs/examples/signalling.nim create mode 100644 docs/src/threads.md diff --git a/chronos.nimble b/chronos.nimble index 01117b6ba..e063ac357 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -50,7 +50,7 @@ task examples, "Build examples": # Build book examples for file in listFiles("docs/examples"): if file.endsWith(".nim"): - build "", file + build "--threads:on", file task test, "Run all tests": for args in testArguments: diff --git a/docs/examples/signalling.nim b/docs/examples/signalling.nim new file mode 100644 index 000000000..1d4e932ef --- /dev/null +++ b/docs/examples/signalling.nim @@ -0,0 +1,38 @@ +import chronos, chronos/threadsync +import os + +type + Context = object + # Context allocated by `createShared` should contain no garbage-collected + # types! + signal: ThreadSignalPtr + value: int + +proc myThread(ctx: ptr Context) {.thread.} = + echo "Doing some work in a thread" + sleep(3000) + ctx.value = 42 + echo "Done, firing the signal" + discard ctx.signal.fireSync().expect("correctly initialized signal should not fail") + +proc main() {.async.} = + let + signal = ThreadSignalPtr.new().expect("free file descriptor for signal") + context = createShared(Context) + context.signal = signal + + var thread: Thread[ptr Context] + + echo "Starting thread" + createThread(thread, myThread, context) + + await signal.wait() + + echo "Work done: ", context.value + + joinThread(thread) + + signal.close().expect("closing once works") + deallocShared(context) + +waitFor main() diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index f8343670b..9bd22f675 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -6,6 +6,7 @@ - [Core concepts](./concepts.md) - [`async` functions](async_procs.md) - [Errors and exceptions](./error_handling.md) +- [Threads](./threads.md) - [Tips, tricks and best practices](./tips.md) - [Porting code to `chronos`](./porting.md) - [HTTP server middleware](./http_server_middleware.md) diff --git a/docs/src/examples.md b/docs/src/examples.md index 0bcfc74ba..2670570d9 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -8,6 +8,10 @@ Examples are available in the [`docs/examples/`](https://github.com/status-im/ni * [timeoutsimple](https://github.com/status-im/nim-chronos/tree/master/docs/examples/timeoutsimple.nim) - Simple timeouts * [timeoutcomposed](https://github.com/status-im/nim-chronos/tree/master/docs/examples/examples/timeoutcomposed.nim) - Shared timeout of multiple tasks +## Threads + +* [signalling](https://github.com/status-im/nim-chronos/tree/master/docs/examples/signalling.nim) - Cross-thread signalling + ## TCP * [tcpserver](https://github.com/status-im/nim-chronos/tree/master/docs/examples/tcpserver.nim) - Simple TCP/IP v4/v6 echo server diff --git a/docs/src/threads.md b/docs/src/threads.md new file mode 100644 index 000000000..a28590406 --- /dev/null +++ b/docs/src/threads.md @@ -0,0 +1,18 @@ +# Threads + +While the cooperative [`async`](./concepts.md) model offers an efficient model +for dealing with many tasks that often are blocked on I/O, it is not suitable +for long-running computations that would prevent concurrent tasks from progressing. + +Multithreading offers a way to offload heavy computations to be executed in +parallel with the async work, or, in cases where a single event loop gets +overloaded, to manage multiple event loops in parallel. + +For interaction between threads, the `ThreadSignalPtr` type (found in the +(`chronos/threadsync`)(https://github.com/status-im/nim-chronos/blob/master/chronos/threadsync.nim) +module) is used - both to wait for notifications coming from other threads and +to notify other threads of progress from within an async procedure. + +```nim +{{#include ../examples/signalling.nim}} +``` From a81961a3c64f64e576c913563547b7149a743a8e Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 14 Feb 2024 14:05:19 +0200 Subject: [PATCH 110/146] Fix HTTP server accept() loop exiting under heavy load. (#502) * Add more specific accept() exceptions raised. Add some refactoring to HTTP server code. * Refactor acceptLoop. * Print GC statistics in every failing test. * Try to disable failing tests. --- chronos/apps/http/httpserver.nim | 160 ++++++++++++++++--------------- chronos/transports/stream.nim | 8 +- tests/testthreadsync.nim | 21 +++- 3 files changed, 104 insertions(+), 85 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index c716d14ac..92ed35671 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -833,45 +833,33 @@ proc sendDefaultResponse( if reqFence.isOk(): if isNil(response): await conn.sendErrorResponse(version, Http404, keepConnection.toBool()) + return keepConnection + + case response.state + of HttpResponseState.Empty, HttpResponseState.Default: + # Response was ignored, so we respond with not found. + await conn.sendErrorResponse(version, Http404, + keepConnection.toBool()) + keepConnection + of HttpResponseState.Prepared: + # Response was prepared but not sent, so we can respond with some + # error code + await conn.sendErrorResponse(version, Http409, + keepConnection.toBool()) + keepConnection + of HttpResponseState.ErrorCode: + # Response with error code + await conn.sendErrorResponse(version, response.status, false) + HttpProcessExitType.Immediate + of HttpResponseState.Sending, HttpResponseState.Failed, + HttpResponseState.Cancelled: + # Just drop connection, because we dont know at what stage we are + HttpProcessExitType.Immediate + of HttpResponseState.Finished: keepConnection - else: - case response.state - of HttpResponseState.Empty: - # Response was ignored, so we respond with not found. - await conn.sendErrorResponse(version, Http404, - keepConnection.toBool()) - response.setResponseState(HttpResponseState.Finished) - keepConnection - of HttpResponseState.Prepared: - # Response was prepared but not sent, so we can respond with some - # error code - await conn.sendErrorResponse(HttpVersion11, Http409, - keepConnection.toBool()) - response.setResponseState(HttpResponseState.Finished) - keepConnection - of HttpResponseState.ErrorCode: - # Response with error code - await conn.sendErrorResponse(version, response.status, false) - response.setResponseState(HttpResponseState.Finished) - HttpProcessExitType.Immediate - of HttpResponseState.Sending, HttpResponseState.Failed, - HttpResponseState.Cancelled: - # Just drop connection, because we dont know at what stage we are - HttpProcessExitType.Immediate - of HttpResponseState.Default: - # Response was ignored, so we respond with not found. - await conn.sendErrorResponse(version, Http404, - keepConnection.toBool()) - response.setResponseState(HttpResponseState.Finished) - keepConnection - of HttpResponseState.Finished: - keepConnection else: case reqFence.error.kind - of HttpServerError.TimeoutError: - await conn.sendErrorResponse(version, reqFence.error.code, false) - HttpProcessExitType.Graceful - of HttpServerError.ProtocolError: + of HttpServerError.TimeoutError, HttpServerError.ProtocolError: await conn.sendErrorResponse(version, reqFence.error.code, false) HttpProcessExitType.Graceful of HttpServerError.DisconnectError: @@ -1017,8 +1005,7 @@ proc getRequestFence*(server: HttpServerRef, connection.currentRawQuery = Opt.some(res.rawPath) RequestFence.ok(res) except CancelledError: - RequestFence.err( - HttpProcessError.init(HttpServerError.InterruptError)) + RequestFence.err(HttpProcessError.init(HttpServerError.InterruptError)) except AsyncTimeoutError: let address = connection.getRemoteAddress() RequestFence.err( @@ -1073,18 +1060,19 @@ proc processRequest(server: HttpServerRef, # Request is incorrect or unsupported, sending notification discard - try: - let response = - try: - await invokeProcessCallback(connection.server, requestFence) - except CancelledError: - # Cancelled, exiting - return HttpProcessExitType.Immediate + let response = + try: + await invokeProcessCallback(connection.server, requestFence) + except CancelledError: + # Cancelled, exiting + if requestFence.isOk(): + await requestFence.get().closeWait() + return HttpProcessExitType.Immediate - await connection.sendDefaultResponse(requestFence, response) - finally: - if requestFence.isOk(): - await requestFence.get().closeWait() + let res = await connection.sendDefaultResponse(requestFence, response) + if requestFence.isOk(): + await requestFence.get().closeWait() + res proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = let @@ -1118,29 +1106,42 @@ proc processLoop(holder: HttpConnectionHolderRef) {.async: (raises: []).} = server.connections.del(connectionId) proc acceptClientLoop(server: HttpServerRef) {.async: (raises: []).} = - var runLoop = true - while runLoop: - try: - # if server.maxConnections > 0: - # await server.semaphore.acquire() - let transp = await server.instance.accept() - let resId = transp.getId() - if resId.isErr(): - # We are unable to identify remote peer, it means that remote peer - # disconnected before identification. - await transp.closeWait() - runLoop = false - else: - let connId = resId.get() - let holder = HttpConnectionHolderRef.new(server, transp, resId.get()) - server.connections[connId] = holder + block mainLoop: + while true: + block clientLoop: + # if server.maxConnections > 0: + # await server.semaphore.acquire() + let transp = + try: + await server.instance.accept() + except TransportTooManyError: + # Too many FDs used by process + break clientLoop + except TransportAbortedError: + # Remote peer disconnected + break clientLoop + except TransportUseClosedError: + # accept() call invoked when server is stopped + break mainLoop + except TransportOsError: + # Critical OS error + break mainLoop + except CancelledError: + # Server being closed, exiting + break mainLoop + + doAssert(not(isNil(transp)), "Stream transport should be present!") + + let + connectionId = transp.getId().valueOr: + # We are unable to identify remote peer, it means that remote peer + # disconnected before. + await transp.closeWait() + break clientLoop + holder = HttpConnectionHolderRef.new(server, transp, connectionId) + + server.connections[connectionId] = holder holder.future = processLoop(holder) - except TransportTooManyError, TransportAbortedError: - # Non-critical error - discard - except CancelledError, TransportOsError, CatchableError: - # Critical, cancellation or unexpected error - runLoop = false proc state*(server: HttpServerRef): HttpServerState = ## Returns current HTTP server's state. @@ -1429,7 +1430,7 @@ proc sendBody*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError("Unable to send response body, reason: " & $exc.msg) proc sendBody*(resp: HttpResponseRef, data: ByteChar) {. async: (raises: [CancelledError, HttpWriteError]).} = @@ -1448,7 +1449,7 @@ proc sendBody*(resp: HttpResponseRef, data: ByteChar) {. raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError("Unable to send response body, reason: " & $exc.msg) proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {. async: (raises: [CancelledError, HttpWriteError]).} = @@ -1468,7 +1469,8 @@ proc sendError*(resp: HttpResponseRef, code: HttpCode, body = "") {. raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError( + "Unable to send error response body, reason: " & $exc.msg) proc prepare*(resp: HttpResponseRef, streamType = HttpResponseStreamType.Chunked) {. @@ -1501,7 +1503,7 @@ proc prepare*(resp: HttpResponseRef, raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError("Unable to send response headers, reason: " & $exc.msg) proc prepareChunked*(resp: HttpResponseRef): Future[void] {. async: (raw: true, raises: [CancelledError, HttpWriteError]).} = @@ -1536,7 +1538,7 @@ proc send*(resp: HttpResponseRef, pbytes: pointer, nbytes: int) {. raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError("Unable to send response data, reason: " & $exc.msg) proc send*(resp: HttpResponseRef, data: ByteChar) {. async: (raises: [CancelledError, HttpWriteError]).} = @@ -1551,7 +1553,7 @@ proc send*(resp: HttpResponseRef, data: ByteChar) {. raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError("Unable to send response data, reason: " & $exc.msg) proc sendChunk*(resp: HttpResponseRef, pbytes: pointer, nbytes: int): Future[void] {. @@ -1591,7 +1593,7 @@ proc finish*(resp: HttpResponseRef) {. raise exc except AsyncStreamError as exc: resp.setResponseState(HttpResponseState.Failed) - raiseHttpWriteError("Unable to send response, reason: " & $exc.msg) + raiseHttpWriteError("Unable to finish response data, reason: " & $exc.msg) proc respond*(req: HttpRequestRef, code: HttpCode, content: ByteChar, headers: HttpTable): Future[HttpResponseRef] {. @@ -1673,7 +1675,7 @@ proc remoteAddress*(request: HttpRequestRef): TransportAddress {. ## Returns address of the remote host that made request ``request``. request.connection.remoteAddress() -proc requestInfo*(req: HttpRequestRef, contentType = "text/text"): string = +proc requestInfo*(req: HttpRequestRef, contentType = "text/plain"): string = ## Returns comprehensive information about request for specific content ## type. ## diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 73699a25b..33a863133 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1044,7 +1044,9 @@ when defined(windows): ok() proc accept*(server: StreamServer): Future[StreamTransport] {. - async: (raw: true, raises: [TransportError, CancelledError]).} = + async: (raw: true, raises: [TransportUseClosedError, + TransportTooManyError, TransportAbortedError, TransportOsError, + CancelledError]).} = var retFuture = newFuture[StreamTransport]("stream.server.accept") doAssert(server.status != ServerStatus.Running, @@ -1675,7 +1677,9 @@ else: ok() proc accept*(server: StreamServer): Future[StreamTransport] {. - async: (raw: true, raises: [TransportError, CancelledError]).} = + async: (raw: true, raises: [TransportUseClosedError, + TransportTooManyError, TransportAbortedError, TransportOsError, + CancelledError]).} = var retFuture = newFuture[StreamTransport]("stream.server.accept") doAssert(server.status != ServerStatus.Running, diff --git a/tests/testthreadsync.nim b/tests/testthreadsync.nim index b5273975b..cf7aada79 100644 --- a/tests/testthreadsync.nim +++ b/tests/testthreadsync.nim @@ -43,6 +43,7 @@ const suite "Asynchronous multi-threading sync primitives test suite": teardown: + echo GC_getStatistics() checkLeaks() proc setResult(thr: ThreadResultPtr, value: int) = @@ -325,19 +326,31 @@ suite "Asynchronous multi-threading sync primitives test suite": asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & "] test [sync -> sync]": - threadSignalTest2(TestsCount, WaitSendKind.Sync, WaitSendKind.Sync) + when sizeof(int) == 8: + threadSignalTest2(TestsCount, WaitSendKind.Sync, WaitSendKind.Sync) + else: + skip() asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & "] test [async -> async]": - threadSignalTest2(TestsCount, WaitSendKind.Async, WaitSendKind.Async) + when sizeof(int) == 8: + threadSignalTest2(TestsCount, WaitSendKind.Async, WaitSendKind.Async) + else: + skip() asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & "] test [sync -> async]": - threadSignalTest2(TestsCount, WaitSendKind.Sync, WaitSendKind.Async) + when sizeof(int) == 8: + threadSignalTest2(TestsCount, WaitSendKind.Sync, WaitSendKind.Async) + else: + skip() asyncTest "ThreadSignal: Multiple thread switches [" & $TestsCount & "] test [async -> sync]": - threadSignalTest2(TestsCount, WaitSendKind.Async, WaitSendKind.Sync) + when sizeof(int) == 8: + threadSignalTest2(TestsCount, WaitSendKind.Async, WaitSendKind.Sync) + else: + skip() asyncTest "ThreadSignal: Multiple signals [" & $TestsCount & "] to multiple threads [" & $numProcs & "] test [sync -> sync]": From be4923be19867c24fd916aa7437fe78784ce3b9d Mon Sep 17 00:00:00 2001 From: cheatfate Date: Wed, 14 Feb 2024 14:09:01 +0200 Subject: [PATCH 111/146] Strip debugging echo in threadsync tests. --- tests/testthreadsync.nim | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/testthreadsync.nim b/tests/testthreadsync.nim index cf7aada79..f6b6bccf4 100644 --- a/tests/testthreadsync.nim +++ b/tests/testthreadsync.nim @@ -43,7 +43,6 @@ const suite "Asynchronous multi-threading sync primitives test suite": teardown: - echo GC_getStatistics() checkLeaks() proc setResult(thr: ThreadResultPtr, value: int) = From 2e37a6e26c903ba4c2f9cb9f3eb850cbbfad3cfe Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 14 Feb 2024 19:23:01 +0200 Subject: [PATCH 112/146] Increase AsyncStream and Transport default buffer size from 4096 to 16384 bytes. (#506) Make buffer sizes configurable at compile time. --- chronos/config.nim | 11 ++++++++++- chronos/streams/asyncstream.nim | 6 +++--- chronos/transports/common.nim | 10 +++++----- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/chronos/config.nim b/chronos/config.nim index 47bf6698c..cf500dba1 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -91,6 +91,12 @@ const chronosHasRaises* = 0 ## raises effect support via `async: (raises: [])` + chronosTransportDefaultBufferSize* {.intdefine.} = 16384 + ## Default size of chronos transport internal buffer. + + chronosStreamDefaultBufferSize* {.intdefine.} = 16384 + ## Default size of chronos async stream internal buffer. + when defined(chronosStrictException): {.warning: "-d:chronosStrictException has been deprecated in favor of handleException".} # In chronos v3, this setting was used as the opposite of @@ -113,7 +119,10 @@ when defined(debug) or defined(chronosConfig): printOption("chronosEventEngine", chronosEventEngine) printOption("chronosEventsCount", chronosEventsCount) printOption("chronosInitialSize", chronosInitialSize) - + printOption("chronosTransportDefaultBufferSize", + chronosTransportDefaultBufferSize) + printOption("chronosStreamDefaultBufferSize", + chronosStreamDefaultBufferSize) # In nim 1.6, `sink` + local variable + `move` generates the best code for # moving a proc parameter into a closure - this only works for closure diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 4fbe7a422..e688f2889 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -9,12 +9,12 @@ {.push raises: [].} -import ../asyncloop, ../asyncsync -import ../transports/common, ../transports/stream +import ../[config, asyncloop, asyncsync] +import ../transports/[common, stream] export asyncloop, asyncsync, stream, common const - AsyncStreamDefaultBufferSize* = 4096 + AsyncStreamDefaultBufferSize* = chronosStreamDefaultBufferSize ## Default reading stream internal buffer size. AsyncStreamDefaultQueueSize* = 0 ## Default writing stream internal queue size. diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 8fa062a5c..8fcf0eb63 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -11,7 +11,7 @@ import std/[strutils] import stew/[base10, byteutils] -import ".."/[asyncloop, osdefs, oserrno, handles] +import ".."/[config, asyncloop, osdefs, oserrno, handles] from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, Protocol, Port, `$` @@ -21,10 +21,10 @@ export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, Protocol, Port, toInt, `$` const - DefaultStreamBufferSize* = 4096 ## Default buffer size for stream - ## transports - DefaultDatagramBufferSize* = 65536 ## Default buffer size for datagram - ## transports + DefaultStreamBufferSize* = chronosTransportDefaultBufferSize + ## Default buffer size for stream transports + DefaultDatagramBufferSize* = 65536 + ## Default buffer size for datagram transports type ServerFlags* = enum ## Server's flags From 7b02247ce74d5ad5630013334f2e347680b02f65 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 14 Feb 2024 19:23:15 +0200 Subject: [PATCH 113/146] Add `--mm:refc` to `libbacktrace` test. (#505) * Add `--mm:refc` to `libbacktrace` test. * Make tests with `refc` to run before tests with default memory manager. --- chronos.nimble | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index e063ac357..48c778175 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -54,17 +54,20 @@ task examples, "Build examples": task test, "Run all tests": for args in testArguments: - run args, "tests/testall" if (NimMajor, NimMinor) > (1, 6): + # First run tests with `refc` memory manager. run args & " --mm:refc", "tests/testall" - + run args, "tests/testall" task test_libbacktrace, "test with libbacktrace": - var allArgs = @[ - "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", - ] + let allArgs = @[ + "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", + ] for args in allArgs: + if (NimMajor, NimMinor) > (1, 6): + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" run args, "tests/testall" task docs, "Generate API documentation": From 5dfa3fd7fa0b7c3f6e7e8bc54a233f8b07d9356c Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 5 Mar 2024 13:53:12 +0100 Subject: [PATCH 114/146] fix conversion error with `or` on futures with `{.async: (raises: []).}` (#515) ```nim import chronos proc f(): Future[void] {.async: (raises: []).} = discard discard f() or f() or f() ``` ``` /Users/etan/Documents/Repos/nimbus-eth2/vendor/nim-chronos/chronos/internal/raisesfutures.nim(145, 44) union /Users/etan/Documents/Repos/nimbus-eth2/vendor/nimbus-build-system/vendor/Nim/lib/core/macros.nim(185, 28) [] /Users/etan/Documents/Repos/nimbus-eth2/test.nim(6, 13) template/generic instantiation of `or` from here /Users/etan/Documents/Repos/nimbus-eth2/vendor/nim-chronos/chronos/internal/asyncfutures.nim(1668, 39) template/generic instantiation of `union` from here /Users/etan/Documents/Repos/nimbus-eth2/vendor/nimbus-build-system/vendor/Nim/lib/core/macros.nim(185, 28) Error: illegal conversion from '-1' to '[0..9223372036854775807]' ``` Fix by checking for `void` before trying to access `raises` --- chronos/internal/raisesfutures.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 5b91f4152..ed85c036e 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -142,7 +142,7 @@ macro union*(tup0: typedesc, tup1: typedesc): typedesc = if not found: result.add err - for err2 in getType(getTypeInst(tup1)[1])[1..^1]: + for err2 in tup1.members(): result.add err2 if result.len == 0: result = makeNoRaises() From 1eb834a2f98c31677e899ffcc80259a10d78cfe7 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 5 Mar 2024 18:33:46 +0200 Subject: [PATCH 115/146] Fix `or` deadlock issue. (#517) * Fix `or` should not create future with OwnCancelSchedule flag set. * Fix `CancelledError` missing from raises list when both futures has empty raises list. * Fix macros tests. --- chronos/internal/asyncfutures.nim | 9 ++++----- tests/testbugs.nim | 13 +++++++++++++ tests/testmacro.nim | 2 +- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 1a2be7577..de6debaf5 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -786,7 +786,7 @@ template orImpl*[T, Y](fut1: Future[T], fut2: Future[Y]): untyped = fut2.addCallback(cb) retFuture.cancelCallback = cancellation - return retFuture + retFuture proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = ## Returns a future which will complete once either ``fut1`` or ``fut2`` @@ -801,7 +801,7 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] = ## completed, the result future will also be completed. ## ## If cancelled, ``fut1`` and ``fut2`` futures WILL NOT BE cancelled. - var retFuture = newFuture[void]("chronos.or") + var retFuture = newFuture[void]("chronos.or()") orImpl(fut1, fut2) @@ -1665,10 +1665,9 @@ proc `or`*[T, Y, E1, E2]( fut1: InternalRaisesFuture[T, E1], fut2: InternalRaisesFuture[Y, E2]): auto = type - InternalRaisesFutureRaises = union(E1, E2) + InternalRaisesFutureRaises = union(E1, E2).union((CancelledError,)) - let - retFuture = newFuture[void]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + let retFuture = newFuture[void]("chronos.or()", {}) orImpl(fut1, fut2) proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = diff --git a/tests/testbugs.nim b/tests/testbugs.nim index fc4af3a45..3f2f4e420 100644 --- a/tests/testbugs.nim +++ b/tests/testbugs.nim @@ -135,6 +135,16 @@ suite "Asynchronous issues test suite": await server.closeWait() return true + proc testOrDeadlock(): Future[bool] {.async.} = + proc f(): Future[void] {.async.} = + await sleepAsync(2.seconds) or sleepAsync(1.seconds) + let fx = f() + try: + await fx.cancelAndWait().wait(2.seconds) + except AsyncTimeoutError: + return false + true + test "Issue #6": check waitFor(issue6()) == true @@ -152,3 +162,6 @@ suite "Asynchronous issues test suite": test "IndexError crash test": check waitFor(testIndexError()) == true + + test "`or` deadlock [#516] test": + check waitFor(testOrDeadlock()) == true diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 9b19c6891..d646303a3 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -491,7 +491,7 @@ suite "Exceptions tracking": proc testit2 {.async: (raises: [IOError]).} = raise (ref IOError)() - proc test {.async: (raises: [ValueError, IOError]).} = + proc test {.async: (raises: [CancelledError, ValueError, IOError]).} = await testit() or testit2() proc noraises() {.raises: [].} = From 4ed0cd6be723c6709a7d1d9a72a5aa5916f6871d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 5 Mar 2024 18:34:53 +0200 Subject: [PATCH 116/146] Ensure that `OwnCancelSchedule` flag will not be removed from `wait()` and `withTimeout()`. (#519) --- chronos/internal/asyncfutures.nim | 8 +++++++- tests/testfut.nim | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index de6debaf5..d08425202 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1410,6 +1410,8 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. var retFuture = newFuture[bool]("chronos.withTimeout", {FutureFlag.OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. moment: Moment timer: TimerCallback timeouted = false @@ -1536,6 +1538,8 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = ## should return, because it can't be cancelled too. var retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. waitImpl(fut, retFuture, timeout) @@ -1677,6 +1681,8 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError) let - retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + retFuture = newFuture[T]("chronos.wait()", {OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. waitImpl(fut, retFuture, timeout) diff --git a/tests/testfut.nim b/tests/testfut.nim index fc2401d04..aee3b1537 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -1594,6 +1594,19 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut + asyncTest "wait() should allow cancellation test (depends on race())": + proc testFoo(): Future[bool] {.async.} = + let + resFut = sleepAsync(2.seconds).wait(3.seconds) + timeFut = sleepAsync(1.seconds) + cancelFut = cancelAndWait(resFut) + discard await race(cancelFut, timeFut) + if cancelFut.finished(): + return (resFut.cancelled() and cancelFut.completed()) + false + + check (await testFoo()) == true + asyncTest "withTimeout() cancellation undefined behavior test #1": proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. async.} = @@ -1654,6 +1667,19 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut + asyncTest "withTimeout() should allow cancellation test (depends on race())": + proc testFoo(): Future[bool] {.async.} = + let + resFut = sleepAsync(2.seconds).withTimeout(3.seconds) + timeFut = sleepAsync(1.seconds) + cancelFut = cancelAndWait(resFut) + discard await race(cancelFut, timeFut) + if cancelFut.finished(): + return (resFut.cancelled() and cancelFut.completed()) + false + + check (await testFoo()) == true + asyncTest "Cancellation behavior test": proc testInnerFoo(fooFut: Future[void]) {.async.} = await fooFut From f6c7ecfa0a3af7fdffcabe9101b44125a1c93a6d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 6 Mar 2024 01:56:40 +0200 Subject: [PATCH 117/146] Add missing parts of defaults buffer size increase. (#513) --- chronos/apps/http/httpserver.nim | 6 +++--- chronos/apps/http/multipart.nim | 10 ++++++---- chronos/apps/http/shttpserver.nim | 6 +++--- chronos/config.nim | 5 +++++ chronos/streams/boundstream.nim | 6 +++--- chronos/streams/chunkstream.nim | 6 +++--- chronos/streams/tlsstream.nim | 10 +++++++--- 7 files changed, 30 insertions(+), 19 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index 92ed35671..c1e52793b 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -11,7 +11,7 @@ import std/[tables, uri, strutils] import stew/[base10], httputils, results -import ../../[asyncloop, asyncsync] +import ../../[asyncloop, asyncsync, config] import ../../streams/[asyncstream, boundstream, chunkstream] import "."/[httptable, httpcommon, multipart] from ../../transports/common import TransportAddress, ServerFlags, `$`, `==` @@ -244,7 +244,7 @@ proc new*( serverUri = Uri(), serverIdent = "", maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, @@ -304,7 +304,7 @@ proc new*( serverUri = Uri(), serverIdent = "", maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, diff --git a/chronos/apps/http/multipart.nim b/chronos/apps/http/multipart.nim index 302d6efd9..5c50e4610 100644 --- a/chronos/apps/http/multipart.nim +++ b/chronos/apps/http/multipart.nim @@ -20,6 +20,7 @@ export asyncloop, httptable, httpcommon, httpbodyrw, asyncstream, httputils const UnableToReadMultipartBody = "Unable to read multipart message body, reason: " UnableToSendMultipartMessage = "Unable to send multipart message, reason: " + MaxMultipartHeaderSize = 4096 type MultiPartSource* {.pure.} = enum @@ -142,10 +143,11 @@ proc init*[A: BChar, B: BChar](mpt: typedesc[MultiPartReader], MultiPartReader(kind: MultiPartSource.Buffer, buffer: buf, offset: 0, boundary: fboundary) -proc new*[B: BChar](mpt: typedesc[MultiPartReaderRef], - stream: HttpBodyReader, - boundary: openArray[B], - partHeadersMaxSize = 4096): MultiPartReaderRef = +proc new*[B: BChar]( + mpt: typedesc[MultiPartReaderRef], + stream: HttpBodyReader, + boundary: openArray[B], + partHeadersMaxSize = MaxMultipartHeaderSize): MultiPartReaderRef = ## Create new MultiPartReader instance with `stream` interface. ## ## ``stream`` is stream used to read data. diff --git a/chronos/apps/http/shttpserver.nim b/chronos/apps/http/shttpserver.nim index 6272bb2b5..532839d2a 100644 --- a/chronos/apps/http/shttpserver.nim +++ b/chronos/apps/http/shttpserver.nim @@ -10,7 +10,7 @@ {.push raises: [].} import httpserver -import ../../asyncloop, ../../asyncsync +import ../../[asyncloop, asyncsync, config] import ../../streams/[asyncstream, tlsstream] export asyncloop, asyncsync, httpserver, asyncstream, tlsstream @@ -91,7 +91,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], serverIdent = "", secureFlags: set[TLSFlags] = {}, maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, @@ -157,7 +157,7 @@ proc new*(htype: typedesc[SecureHttpServerRef], serverIdent = "", secureFlags: set[TLSFlags] = {}, maxConnections: int = -1, - bufferSize: int = 4096, + bufferSize: int = chronosTransportDefaultBufferSize, backlogSize: int = DefaultBacklogSize, httpHeadersTimeout = 10.seconds, maxHeadersSize: int = 8192, diff --git a/chronos/config.nim b/chronos/config.nim index cf500dba1..26d110f1b 100644 --- a/chronos/config.nim +++ b/chronos/config.nim @@ -97,6 +97,9 @@ const chronosStreamDefaultBufferSize* {.intdefine.} = 16384 ## Default size of chronos async stream internal buffer. + chronosTLSSessionCacheBufferSize* {.intdefine.} = 4096 + ## Default size of chronos TLS Session cache's internal buffer. + when defined(chronosStrictException): {.warning: "-d:chronosStrictException has been deprecated in favor of handleException".} # In chronos v3, this setting was used as the opposite of @@ -123,6 +126,8 @@ when defined(debug) or defined(chronosConfig): chronosTransportDefaultBufferSize) printOption("chronosStreamDefaultBufferSize", chronosStreamDefaultBufferSize) + printOption("chronosTLSSessionCacheBufferSize", + chronosTLSSessionCacheBufferSize) # In nim 1.6, `sink` + local variable + `move` generates the best code for # moving a proc parameter into a closure - this only works for closure diff --git a/chronos/streams/boundstream.nim b/chronos/streams/boundstream.nim index ce6957198..8d2e52c5c 100644 --- a/chronos/streams/boundstream.nim +++ b/chronos/streams/boundstream.nim @@ -18,8 +18,8 @@ {.push raises: [].} import results -import ../asyncloop, ../timer -import asyncstream, ../transports/stream, ../transports/common +import ../[asyncloop, timer, config] +import asyncstream, ../transports/[stream, common] export asyncloop, asyncstream, stream, timer, common type @@ -44,7 +44,7 @@ type BoundedStreamRW* = BoundedStreamReader | BoundedStreamWriter const - BoundedBufferSize* = 4096 + BoundedBufferSize* = chronosStreamDefaultBufferSize BoundarySizeDefectMessage = "Boundary must not be empty array" template newBoundedStreamIncompleteError(): ref BoundedStreamError = diff --git a/chronos/streams/chunkstream.nim b/chronos/streams/chunkstream.nim index 773920769..f3e73e0cc 100644 --- a/chronos/streams/chunkstream.nim +++ b/chronos/streams/chunkstream.nim @@ -11,13 +11,13 @@ {.push raises: [].} -import ../asyncloop, ../timer -import asyncstream, ../transports/stream, ../transports/common +import ../[asyncloop, timer, config] +import asyncstream, ../transports/[stream, common] import results export asyncloop, asyncstream, stream, timer, common, results const - ChunkBufferSize = 4096 + ChunkBufferSize = chronosStreamDefaultBufferSize MaxChunkHeaderSize = 1024 ChunkHeaderValueSize = 8 # This is limit for chunk size to 8 hexadecimal digits, so maximum diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 12ea6d3c4..6c019f11d 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -16,9 +16,12 @@ import bearssl/[brssl, ec, errors, pem, rsa, ssl, x509], bearssl/certs/cacert import ".."/[asyncloop, asyncsync, config, timer] -import asyncstream, ../transports/stream, ../transports/common +import asyncstream, ../transports/[stream, common] export asyncloop, asyncsync, timer, asyncstream +const + TLSSessionCacheBufferSize* = chronosTLSSessionCacheBufferSize + type TLSStreamKind {.pure.} = enum Client, Server @@ -777,11 +780,12 @@ proc init*(tt: typedesc[TLSCertificate], raiseTLSStreamProtocolError("Could not find any certificates") res -proc init*(tt: typedesc[TLSSessionCache], size: int = 4096): TLSSessionCache = +proc init*(tt: typedesc[TLSSessionCache], + size: int = TLSSessionCacheBufferSize): TLSSessionCache = ## Create new TLS session cache with size ``size``. ## ## One cached item is near 100 bytes size. - var rsize = min(size, 4096) + let rsize = min(size, 4096) var res = TLSSessionCache(storage: newSeq[byte](rsize)) sslSessionCacheLruInit(addr res.context, addr res.storage[0], rsize) res From 03d82475d91c09d35faace9077c8f2050a0bfc2e Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 6 Mar 2024 06:42:22 +0100 Subject: [PATCH 118/146] Avoid `ValueError` effect in varargs `race`/`one` (#520) We can check at compile-time that at least one parameter is passed * clean up closure environment explicitly in some callbacks to release memory earlier --- chronos/internal/asyncfutures.nim | 140 ++++++++++++++++++++---------- tests/testfut.nim | 31 +++++-- 2 files changed, 119 insertions(+), 52 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index d08425202..d364dc550 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -734,8 +734,8 @@ proc `and`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] {. retFuture.fail(fut2.error) else: retFuture.complete() - fut1.callback = cb - fut2.callback = cb + fut1.addCallback(cb) + fut2.addCallback(cb) proc cancellation(udata: pointer) = # On cancel we remove all our callbacks only. @@ -1086,12 +1086,14 @@ proc allFutures*(futs: varargs[FutureBase]): Future[void] {. inc(finishedFutures) if finishedFutures == totalFutures: retFuture.complete() + reset(nfuts) proc cancellation(udata: pointer) = # On cancel we remove all our callbacks only. for i in 0.. Date: Thu, 7 Mar 2024 08:07:53 +0100 Subject: [PATCH 119/146] fix circular reference in timer (#510) --- chronos/asyncproc.nim | 12 +++++++----- chronos/internal/asyncfutures.nim | 11 +++++++++-- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/chronos/asyncproc.nim b/chronos/asyncproc.nim index f00877675..572e3828f 100644 --- a/chronos/asyncproc.nim +++ b/chronos/asyncproc.nim @@ -1010,12 +1010,14 @@ else: retFuture.fail(newException(AsyncProcessError, osErrorMsg(res.error()))) + timer = nil + proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - if not(isNil(timer)): - clearTimer(timer) - # Ignore any errors because of cancellation. - discard removeProcess2(processHandle) + if not(isNil(timer)): + clearTimer(timer) + timer = nil + # Ignore any errors because of cancellation. + discard removeProcess2(processHandle) if timeout != InfiniteDuration: timer = setTimer(Moment.fromNow(timeout), continuation, cast[pointer](2)) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index d364dc550..206f89c1e 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1386,13 +1386,15 @@ proc sleepAsync*(duration: Duration): Future[void] {. proc completion(data: pointer) {.gcsafe.} = if not(retFuture.finished()): retFuture.complete() + timer = nil # Release circular reference (for gc:arc) proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): + if not isNil(timer): clearTimer(timer) + timer = nil # Release circular reference (for gc:arc) retFuture.cancelCallback = cancellation - timer = setTimer(moment, completion, cast[pointer](retFuture)) + timer = setTimer(moment, completion) return retFuture proc sleepAsync*(ms: int): Future[void] {. @@ -1487,6 +1489,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. if not(isNil(timer)): clearTimer(timer) fut.completeFuture() + timer = nil # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 @@ -1497,6 +1500,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. fut.cancelSoon() else: fut.completeFuture() + timer = nil if fut.finished(): retFuture.complete(true) @@ -1549,6 +1553,7 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = if not(isNil(timer)): clearTimer(timer) fut.completeFuture() + timer = nil var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = @@ -1559,6 +1564,8 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = else: fut.completeFuture() + timer = nil + if fut.finished(): fut.completeFuture() else: From 17b7a76c7e40c89f31351cd1a5faf76f177b30ac Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 7 Mar 2024 08:09:16 +0100 Subject: [PATCH 120/146] Ensure `transp.reader` is reset to `nil` on error (#508) In `stream.readLoop`, a finished `Future` was left in `transp.reader` if there was an error in `resumeRead`. Set it to `nil` as well. Co-authored-by: Jacek Sieka --- chronos/transports/stream.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 33a863133..fa3cbacd4 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2372,7 +2372,7 @@ template readLoop(name, body: untyped): untyped = # resumeRead() could not return any error. raiseOsDefect(errorCode, "readLoop(): Unable to resume reading") else: - transp.reader.complete() + transp.completeReader() if errorCode == oserrno.ESRCH: # ESRCH 3 "No such process" # This error could be happened on pipes only, when process which From 47cc17719f4293bf80a22ebe28e3bfc54b2a59a1 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 8 Mar 2024 14:43:42 +0100 Subject: [PATCH 121/146] print warning when calling failed (#521) `failed` cannot return true for futures that don't forward exceptions --- chronos/internal/raisesfutures.nim | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index ed85c036e..2e09a1db8 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -2,6 +2,8 @@ import std/[macros, sequtils], ../futures +{.push raises: [].} + type InternalRaisesFuture*[T, E] = ref object of Future[T] ## Future with a tuple of possible exception types @@ -205,13 +207,20 @@ macro checkRaises*[T: CatchableError]( `warning` assert(`runtimeChecker`, `errorMsg`) -proc error*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. +func failed*[T](future: InternalRaisesFuture[T, void]): bool {.inline.} = + ## Determines whether ``future`` finished with an error. + static: + warning("No exceptions possible with this operation, `failed` always returns false") + + false + +func error*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. raises: [].} = static: warning("No exceptions possible with this operation, `error` always returns nil") nil -proc readError*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. +func readError*[T](future: InternalRaisesFuture[T, void]): ref CatchableError {. raises: [ValueError].} = static: warning("No exceptions possible with this operation, `readError` always raises") From d4f1487b0cd51a90b76c83f633c02dcae4a84610 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 19 Mar 2024 18:28:52 +0200 Subject: [PATCH 122/146] Disable libbacktrace enabled test on X86 platforms. (#523) * Disable libbacktrace enabled test on X86 platforms. * Fix mistype. * Use macos-12 workers from now. --- .github/workflows/ci.yml | 2 +- chronos.nimble | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e64f75439..cab855580 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,7 +34,7 @@ jobs: shell: bash - target: os: macos - builder: macos-11 + builder: macos-12 shell: bash - target: os: windows diff --git a/chronos.nimble b/chronos.nimble index 48c778175..c754a5c0a 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -20,6 +20,7 @@ let nimc = getEnv("NIMC", "nim") # Which nim compiler to use let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js) let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler let verbose = getEnv("V", "") notin ["", "0"] +let platform = getEnv("PLATFORM", "") let testArguments = when defined(windows): [ @@ -60,15 +61,16 @@ task test, "Run all tests": run args, "tests/testall" task test_libbacktrace, "test with libbacktrace": - let allArgs = @[ - "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", - ] + if platform != "x86": + let allArgs = @[ + "-d:release --debugger:native -d:chronosStackTrace -d:nimStackTraceOverride --import:libbacktrace", + ] - for args in allArgs: - if (NimMajor, NimMinor) > (1, 6): - # First run tests with `refc` memory manager. - run args & " --mm:refc", "tests/testall" - run args, "tests/testall" + for args in allArgs: + if (NimMajor, NimMinor) > (1, 6): + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" + run args, "tests/testall" task docs, "Generate API documentation": exec "mdbook build docs" From 035288f3f08370d83e83c0ee4b01c1d957240138 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 20 Mar 2024 08:47:59 +0200 Subject: [PATCH 123/146] Remove `sink` and `chronosMoveSink()` usage. (#524) --- chronos/internal/asyncfutures.nim | 6 +++--- chronos/streams/asyncstream.nim | 8 ++++---- chronos/streams/tlsstream.nim | 2 +- chronos/transports/datagram.nim | 18 +++++++++--------- chronos/transports/stream.nim | 8 ++++---- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 206f89c1e..49c6acd7c 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -202,14 +202,14 @@ proc finish(fut: FutureBase, state: FutureState) = when chronosFutureTracking: scheduleDestructor(fut) -proc complete[T](future: Future[T], val: sink T, loc: ptr SrcLoc) = +proc complete[T](future: Future[T], val: T, loc: ptr SrcLoc) = if not(future.cancelled()): checkFinished(future, loc) doAssert(isNil(future.internalError)) - future.internalValue = chronosMoveSink(val) + future.internalValue = val future.finish(FutureState.Completed) -template complete*[T](future: Future[T], val: sink T) = +template complete*[T](future: Future[T], val: T) = ## Completes ``future`` with value ``val``. complete(future, val, getSrcLocation()) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index e688f2889..3d2f858d8 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -736,7 +736,7 @@ proc write*(wstream: AsyncStreamWriter, pbytes: pointer, await item.future wstream.bytesCount = wstream.bytesCount + uint64(item.size) -proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], +proc write*(wstream: AsyncStreamWriter, sbytes: seq[byte], msglen = -1) {. async: (raises: [CancelledError, AsyncStreamError]).} = ## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer @@ -771,14 +771,14 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink seq[byte], wstream.bytesCount = wstream.bytesCount + uint64(length) else: let item = WriteItem( - kind: Sequence, dataSeq: move(sbytes), size: length, + kind: Sequence, dataSeq: sbytes, size: length, future: Future[void].Raising([CancelledError, AsyncStreamError]) .init("async.stream.write(seq)")) await wstream.queue.put(item) await item.future wstream.bytesCount = wstream.bytesCount + uint64(item.size) -proc write*(wstream: AsyncStreamWriter, sbytes: sink string, +proc write*(wstream: AsyncStreamWriter, sbytes: string, msglen = -1) {. async: (raises: [CancelledError, AsyncStreamError]).} = ## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``. @@ -812,7 +812,7 @@ proc write*(wstream: AsyncStreamWriter, sbytes: sink string, wstream.bytesCount = wstream.bytesCount + uint64(length) else: let item = WriteItem( - kind: String, dataStr: move(sbytes), size: length, + kind: String, dataStr: sbytes, size: length, future: Future[void].Raising([CancelledError, AsyncStreamError]) .init("async.stream.write(string)")) await wstream.queue.put(item) diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 6c019f11d..86f6d4c3f 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -161,7 +161,7 @@ proc tlsWriteRec(engine: ptr SslEngineContext, var length = 0'u var buf = sslEngineSendrecBuf(engine[], length) doAssert(length != 0 and not isNil(buf)) - await writer.wsource.write(chronosMoveSink(buf), int(length)) + await writer.wsource.write(buf, int(length)) sslEngineSendrecAck(engine[], length) TLSResult.Success except AsyncStreamError as exc: diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index cd335dfd9..f89a6b445 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -873,7 +873,7 @@ proc send*(transp: DatagramTransport, pbytes: pointer, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc send*(transp: DatagramTransport, msg: sink string, +proc send*(transp: DatagramTransport, msg: string, msglen = -1): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination @@ -882,7 +882,7 @@ proc send*(transp: DatagramTransport, msg: sink string, transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithoutAddress, buf: baseAddr localCopy, @@ -896,16 +896,16 @@ proc send*(transp: DatagramTransport, msg: sink string, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc send*[T](transp: DatagramTransport, msg: sink seq[T], +proc send*[T](transp: DatagramTransport, msg: seq[T], msglen = -1): Future[void] {. - async: (raw: true, raises: [TransportError, CancelledError]).} = + async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address which was bounded on transport. let retFuture = newFuture[void]("datagram.transport.send(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithoutAddress, buf: baseAddr localCopy, @@ -935,7 +935,7 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, return retFuture proc sendTo*(transp: DatagramTransport, remote: TransportAddress, - msg: sink string, msglen = -1): Future[void] {. + msg: string, msglen = -1): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send string ``msg`` using transport ``transp`` to remote destination ## address ``remote``. @@ -943,7 +943,7 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, transp.checkClosed(retFuture) let length = if msglen <= 0: len(msg) else: msglen - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithAddress, buf: baseAddr localCopy, @@ -958,14 +958,14 @@ proc sendTo*(transp: DatagramTransport, remote: TransportAddress, return retFuture proc sendTo*[T](transp: DatagramTransport, remote: TransportAddress, - msg: sink seq[T], msglen = -1): Future[void] {. + msg: seq[T], msglen = -1): Future[void] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Send sequence ``msg`` using transport ``transp`` to remote destination ## address ``remote``. let retFuture = newFuture[void]("datagram.transport.sendTo(seq)") transp.checkClosed(retFuture) let length = if msglen <= 0: (len(msg) * sizeof(T)) else: (msglen * sizeof(T)) - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) let vector = GramVector(kind: WithAddress, buf: baseAddr localCopy, diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index fa3cbacd4..c80d99285 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2248,7 +2248,7 @@ proc write*(transp: StreamTransport, pbytes: pointer, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc write*(transp: StreamTransport, msg: sink string, +proc write*(transp: StreamTransport, msg: string, msglen = -1): Future[int] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write data from string ``msg`` using transport ``transp``. @@ -2267,7 +2267,7 @@ proc write*(transp: StreamTransport, msg: sink string, let written = nbytes - rbytes # In case fastWrite wrote some - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) pbytes = cast[ptr byte](addr localCopy[written]) @@ -2280,7 +2280,7 @@ proc write*(transp: StreamTransport, msg: sink string, retFuture.fail(getTransportOsError(wres.error())) return retFuture -proc write*[T](transp: StreamTransport, msg: sink seq[T], +proc write*[T](transp: StreamTransport, msg: seq[T], msglen = -1): Future[int] {. async: (raw: true, raises: [TransportError, CancelledError]).} = ## Write sequence ``msg`` using transport ``transp``. @@ -2300,7 +2300,7 @@ proc write*[T](transp: StreamTransport, msg: sink seq[T], let written = nbytes - rbytes # In case fastWrite wrote some - var localCopy = chronosMoveSink(msg) + var localCopy = msg retFuture.addCallback(proc(_: pointer) = reset(localCopy)) pbytes = cast[ptr byte](addr localCopy[written]) From d5bc90fef22cc32ef5ce4a037a852fd081dd5c0c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 20 Mar 2024 12:08:26 +0100 Subject: [PATCH 124/146] Work around type resolution with empty generic (#522) * Work around type resolution with empty generic * workaround --- chronos/internal/raisesfutures.nim | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 2e09a1db8..07e3438e1 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -12,6 +12,12 @@ type ## This type gets injected by `async: (raises: ...)` and similar utilities ## and should not be used manually as the internal exception representation ## is subject to change in future chronos versions. + # TODO https://github.com/nim-lang/Nim/issues/23418 + # TODO https://github.com/nim-lang/Nim/issues/23419 + when E is void: + dummy: E + else: + dummy: array[0, E] proc makeNoRaises*(): NimNode {.compileTime.} = # An empty tuple would have been easier but... From 0e806d59aea5e49a65524567cfa18de34e37ccca Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 21 Mar 2024 09:21:51 +0100 Subject: [PATCH 125/146] v4.0.1 --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index c754a5c0a..5aa51b828 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "4.0.0" +version = "4.0.1" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From b8b4e1fc477edeb1ca0b0ae641d0e8fa5c3416ab Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 25 Mar 2024 10:37:42 +0100 Subject: [PATCH 126/146] make `Raising` compatible with 2.0 (#526) * make `Raising` compatible with 2.0 See https://github.com/nim-lang/Nim/issues/23432 * Update tests/testfut.nim * Update tests/testfut.nim --- chronos/internal/raisesfutures.nim | 23 +++++++++++++++++++---- tests/testfut.nim | 18 ++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/chronos/internal/raisesfutures.nim b/chronos/internal/raisesfutures.nim index 07e3438e1..546f3b731 100644 --- a/chronos/internal/raisesfutures.nim +++ b/chronos/internal/raisesfutures.nim @@ -59,17 +59,32 @@ proc members(tup: NimNode): seq[NimNode] {.compileTime.} = macro hasException(raises: typedesc, ident: static string): bool = newLit(raises.members.anyIt(it.eqIdent(ident))) -macro Raising*[T](F: typedesc[Future[T]], E: varargs[typedesc]): untyped = +macro Raising*[T](F: typedesc[Future[T]], E: typed): untyped = ## Given a Future type instance, return a type storing `{.raises.}` ## information ## ## Note; this type may change in the future - E.expectKind(nnkBracket) - let raises = if E.len == 0: + # An earlier version used `E: varargs[typedesc]` here but this is buggyt/no + # longer supported in 2.0 in certain cases: + # https://github.com/nim-lang/Nim/issues/23432 + let + e = + case E.getTypeInst().typeKind() + of ntyTypeDesc: @[E] + of ntyArray: + for x in E: + if x.getTypeInst().typeKind != ntyTypeDesc: + error("Expected typedesc, got " & repr(x), x) + E.mapIt(it) + else: + error("Expected typedesc, got " & repr(E), E) + @[] + + let raises = if e.len == 0: makeNoRaises() else: - nnkTupleConstr.newTree(E.mapIt(it)) + nnkTupleConstr.newTree(e) nnkBracketExpr.newTree( ident "InternalRaisesFuture", nnkDotExpr.newTree(F, ident"T"), diff --git a/tests/testfut.nim b/tests/testfut.nim index 8c0829317..c2231f126 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -2047,9 +2047,27 @@ suite "Future[T] behavior test suite": check: future1.cancelled() == true future2.cancelled() == true + test "Sink with literals": # https://github.com/nim-lang/Nim/issues/22175 let fut = newFuture[string]() fut.complete("test") check: fut.value() == "test" + + test "Raising type matching": + type X[E] = Future[void].Raising(E) + + proc f(x: X) = discard + + var v: Future[void].Raising([ValueError]) + f(v) + + type Object = object + # TODO cannot use X[[ValueError]] here.. + field: Future[void].Raising([ValueError]) + discard Object(field: v) + + check: + not compiles(Future[void].Raising([42])) + not compiles(Future[void].Raising(42)) From ef1b077adfdc803fcce880e81a5740b964bac0bc Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 25 Mar 2024 10:38:17 +0100 Subject: [PATCH 127/146] v4.0.2 --- chronos.nimble | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos.nimble b/chronos.nimble index 5aa51b828..ba92ac657 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -1,7 +1,7 @@ mode = ScriptMode.Verbose packageName = "chronos" -version = "4.0.1" +version = "4.0.2" author = "Status Research & Development GmbH" description = "Networking framework with async/await support" license = "MIT or Apache License 2.0" From 402914f4cfb82a35a44511230bf8a9ab06aa3a8f Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Tue, 26 Mar 2024 22:33:19 +0200 Subject: [PATCH 128/146] Add custom ring buffer into chronos streams and transports. (#485) * Add custom ring buffer into chronos stream transport. * Rename BipBuffer.decommit() to BipBuffer.consume() Make asyncstream's using BipBuffer. * Address review comments part 1. * Address review comments part 2. * Address review comments. * Remove unused import results. * Address review comments. --- chronos/bipbuffer.nim | 140 ++++++++++++++++++++ chronos/streams/asyncstream.nim | 221 ++++++++++++++++---------------- chronos/streams/boundstream.nim | 10 +- chronos/streams/chunkstream.nim | 12 +- chronos/streams/tlsstream.nim | 2 +- chronos/transports/datagram.nim | 2 +- chronos/transports/stream.nim | 209 +++++++++++++++++------------- 7 files changed, 378 insertions(+), 218 deletions(-) create mode 100644 chronos/bipbuffer.nim diff --git a/chronos/bipbuffer.nim b/chronos/bipbuffer.nim new file mode 100644 index 000000000..5aa34c4dd --- /dev/null +++ b/chronos/bipbuffer.nim @@ -0,0 +1,140 @@ +# +# Chronos +# +# (c) Copyright 2018-Present Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +## This module implements Bip Buffer (bi-partite circular buffer) by Simone +## Cooke. +## +## The Bip-Buffer is like a circular buffer, but slightly different. Instead of +## keeping one head and tail pointer to the data in the buffer, it maintains two +## revolving regions, allowing for fast data access without having to worry +## about wrapping at the end of the buffer. Buffer allocations are always +## maintained as contiguous blocks, allowing the buffer to be used in a highly +## efficient manner with API calls, and also reducing the amount of copying +## which needs to be performed to put data into the buffer. Finally, a two-phase +## allocation system allows the user to pessimistically reserve an area of +## buffer space, and then trim back the buffer to commit to only the space which +## was used. +## +## https://www.codeproject.com/Articles/3479/The-Bip-Buffer-The-Circular-Buffer-with-a-Twist + +{.push raises: [].} + +type + BipPos = object + start: Natural + finish: Natural + + BipBuffer* = object + a, b, r: BipPos + data: seq[byte] + +proc init*(t: typedesc[BipBuffer], size: int): BipBuffer = + ## Creates new Bip Buffer with size `size`. + BipBuffer(data: newSeq[byte](size)) + +template len(pos: BipPos): Natural = + pos.finish - pos.start + +template reset(pos: var BipPos) = + pos = BipPos() + +func init(t: typedesc[BipPos], start, finish: Natural): BipPos = + BipPos(start: start, finish: finish) + +func calcReserve(bp: BipBuffer): tuple[space: Natural, start: Natural] = + if len(bp.b) > 0: + (Natural(bp.a.start - bp.b.finish), bp.b.finish) + else: + let spaceAfterA = Natural(len(bp.data) - bp.a.finish) + if spaceAfterA >= bp.a.start: + (spaceAfterA, bp.a.finish) + else: + (bp.a.start, Natural(0)) + +func availSpace*(bp: BipBuffer): Natural = + ## Returns amount of space available for reserve in buffer `bp`. + let (res, _) = bp.calcReserve() + res + +func len*(bp: BipBuffer): Natural = + ## Returns amount of used space in buffer `bp`. + len(bp.b) + len(bp.a) + +proc reserve*(bp: var BipBuffer, + size: Natural = 0): tuple[data: ptr byte, size: Natural] = + ## Reserve `size` bytes in buffer. + ## + ## If `size == 0` (default) reserve all available space from buffer. + ## + ## If there is not enough space in buffer for resevation - error will be + ## returned. + ## + ## Returns current reserved range as pointer of type `pt` and size of + ## type `st`. + const ErrorMessage = "Not enough space available" + doAssert(size <= len(bp.data)) + let (availableSpace, reserveStart) = bp.calcReserve() + if availableSpace == 0: + raiseAssert ErrorMessage + let reserveLength = + if size == 0: + availableSpace + else: + if size < availableSpace: + raiseAssert ErrorMessage + size + bp.r = BipPos.init(reserveStart, Natural(reserveStart + reserveLength)) + (addr bp.data[bp.r.start], len(bp.r)) + +proc commit*(bp: var BipBuffer, size: Natural) = + ## Updates structure's pointers when new data inserted into buffer. + doAssert(len(bp.r) >= size, + "Committed size could not be larger than the previously reserved one") + if size == 0: + bp.r.reset() + return + + let toCommit = min(size, len(bp.r)) + if len(bp.a) == 0 and len(bp.b) == 0: + bp.a.start = bp.r.start + bp.a.finish = bp.r.start + toCommit + elif bp.r.start == bp.a.finish: + bp.a.finish += toCommit + else: + bp.b.finish += toCommit + bp.r.reset() + +proc consume*(bp: var BipBuffer, size: Natural) = + ## The procedure removes/frees `size` bytes from the buffer ``bp``. + var currentSize = size + if currentSize >= len(bp.a): + currentSize -= len(bp.a) + bp.a = bp.b + bp.b.reset() + if currentSize >= len(bp.a): + currentSize -= len(bp.a) + bp.a.reset() + else: + bp.a.start += currentSize + else: + bp.a.start += currentSize + +iterator items*(bp: BipBuffer): byte = + ## Iterates over all the bytes in the buffer. + for index in bp.a.start ..< bp.a.finish: + yield bp.data[index] + for index in bp.b.start ..< bp.b.finish: + yield bp.data[index] + +iterator regions*(bp: var BipBuffer): tuple[data: ptr byte, size: Natural] = + ## Iterates over all the regions (`a` and `b`) in the buffer. + if len(bp.a) > 0: + yield (addr bp.data[bp.a.start], len(bp.a)) + if len(bp.b) > 0: + yield (addr bp.data[bp.b.start], len(bp.b)) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 3d2f858d8..473cc38bc 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -9,7 +9,7 @@ {.push raises: [].} -import ../[config, asyncloop, asyncsync] +import ../[config, asyncloop, asyncsync, bipbuffer] import ../transports/[common, stream] export asyncloop, asyncsync, stream, common @@ -34,10 +34,11 @@ type AsyncStreamWriteEOFError* = object of AsyncStreamWriteError AsyncBuffer* = object - offset*: int - buffer*: seq[byte] + backend*: BipBuffer events*: array[2, AsyncEvent] + AsyncBufferRef* = ref AsyncBuffer + WriteType* = enum Pointer, Sequence, String @@ -73,7 +74,7 @@ type tsource*: StreamTransport readerLoop*: StreamReaderLoop state*: AsyncStreamState - buffer*: AsyncBuffer + buffer*: AsyncBufferRef udata: pointer error*: ref AsyncStreamError bytesCount*: uint64 @@ -96,85 +97,51 @@ type AsyncStreamRW* = AsyncStreamReader | AsyncStreamWriter -proc init*(t: typedesc[AsyncBuffer], size: int): AsyncBuffer = - AsyncBuffer( - buffer: newSeq[byte](size), - events: [newAsyncEvent(), newAsyncEvent()], - offset: 0 +proc new*(t: typedesc[AsyncBufferRef], size: int): AsyncBufferRef = + AsyncBufferRef( + backend: BipBuffer.init(size), + events: [newAsyncEvent(), newAsyncEvent()] ) -proc getBuffer*(sb: AsyncBuffer): pointer {.inline.} = - unsafeAddr sb.buffer[sb.offset] - -proc bufferLen*(sb: AsyncBuffer): int {.inline.} = - len(sb.buffer) - sb.offset - -proc getData*(sb: AsyncBuffer): pointer {.inline.} = - unsafeAddr sb.buffer[0] - -template dataLen*(sb: AsyncBuffer): int = - sb.offset - -proc `[]`*(sb: AsyncBuffer, index: int): byte {.inline.} = - doAssert(index < sb.offset) - sb.buffer[index] - -proc update*(sb: var AsyncBuffer, size: int) {.inline.} = - sb.offset += size - -template wait*(sb: var AsyncBuffer): untyped = +template wait*(sb: AsyncBufferRef): untyped = sb.events[0].clear() sb.events[1].fire() sb.events[0].wait() -template transfer*(sb: var AsyncBuffer): untyped = +template transfer*(sb: AsyncBufferRef): untyped = sb.events[1].clear() sb.events[0].fire() sb.events[1].wait() -proc forget*(sb: var AsyncBuffer) {.inline.} = +proc forget*(sb: AsyncBufferRef) {.inline.} = sb.events[1].clear() sb.events[0].fire() -proc shift*(sb: var AsyncBuffer, size: int) {.inline.} = - if sb.offset > size: - moveMem(addr sb.buffer[0], addr sb.buffer[size], sb.offset - size) - sb.offset = sb.offset - size - else: - sb.offset = 0 - -proc copyData*(sb: AsyncBuffer, dest: pointer, offset, length: int) {.inline.} = - copyMem(cast[pointer](cast[uint](dest) + cast[uint](offset)), - unsafeAddr sb.buffer[0], length) - -proc upload*(sb: ptr AsyncBuffer, pbytes: ptr byte, +proc upload*(sb: AsyncBufferRef, pbytes: ptr byte, nbytes: int): Future[void] {. async: (raises: [CancelledError]).} = ## You can upload any amount of bytes to the buffer. If size of internal ## buffer is not enough to fit all the data at once, data will be uploaded ## via chunks of size up to internal buffer size. - var length = nbytes - var srcBuffer = cast[ptr UncheckedArray[byte]](pbytes) - var srcOffset = 0 + var + length = nbytes + srcBuffer = pbytes.toUnchecked() + offset = 0 + while length > 0: - let size = min(length, sb[].bufferLen()) + let size = min(length, sb.backend.availSpace()) if size == 0: - # Internal buffer is full, we need to transfer data to consumer. - await sb[].transfer() + # Internal buffer is full, we need to notify consumer. + await sb.transfer() else: + let (data, _) = sb.backend.reserve() # Copy data from `pbytes` to internal buffer. - copyMem(addr sb[].buffer[sb.offset], addr srcBuffer[srcOffset], size) - sb[].offset = sb[].offset + size - srcOffset = srcOffset + size + copyMem(data, addr srcBuffer[offset], size) + sb.backend.commit(size) + offset = offset + size length = length - size # We notify consumers that new data is available. - sb[].forget() - -template toDataOpenArray*(sb: AsyncBuffer): auto = - toOpenArray(sb.buffer, 0, sb.offset - 1) - -template toBufferOpenArray*(sb: AsyncBuffer): auto = - toOpenArray(sb.buffer, sb.offset, len(sb.buffer) - 1) + sb.forget() template copyOut*(dest: pointer, item: WriteItem, length: int) = if item.kind == Pointer: @@ -243,7 +210,7 @@ proc atEof*(rstream: AsyncStreamReader): bool = rstream.rsource.atEof() else: (rstream.state != AsyncStreamState.Running) and - (rstream.buffer.dataLen() == 0) + (len(rstream.buffer.backend) == 0) proc atEof*(wstream: AsyncStreamWriter): bool = ## Returns ``true`` is writing stream ``wstream`` closed or finished. @@ -331,12 +298,12 @@ template checkStreamFinished*(t: untyped) = template readLoop(body: untyped): untyped = while true: - if rstream.buffer.dataLen() == 0: + if len(rstream.buffer.backend) == 0: if rstream.state == AsyncStreamState.Error: raise rstream.error let (consumed, done) = body - rstream.buffer.shift(consumed) + rstream.buffer.backend.consume(consumed) rstream.bytesCount = rstream.bytesCount + uint64(consumed) if done: break @@ -373,17 +340,23 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, if isNil(rstream.readerLoop): await readExactly(rstream.rsource, pbytes, nbytes) else: - var index = 0 - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) + var + index = 0 + pbuffer = pbytes.toUnchecked() readLoop(): - if rstream.buffer.dataLen() == 0: + if len(rstream.buffer.backend) == 0: if rstream.atEof(): raise newAsyncStreamIncompleteError() - let count = min(nbytes - index, rstream.buffer.dataLen()) - if count > 0: - rstream.buffer.copyData(addr pbuffer[index], 0, count) - index += count - (consumed: count, done: index == nbytes) + var readed = 0 + for (region, rsize) in rstream.buffer.backend.regions(): + let count = min(nbytes - index, rsize) + readed += count + if count > 0: + copyMem(addr pbuffer[index], region, count) + index += count + if index == nbytes: + break + (consumed: readed, done: index == nbytes) proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int): Future[int] {. @@ -407,15 +380,21 @@ proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, if isNil(rstream.readerLoop): return await readOnce(rstream.rsource, pbytes, nbytes) else: - var count = 0 + var + pbuffer = pbytes.toUnchecked() + index = 0 readLoop(): - if rstream.buffer.dataLen() == 0: + if len(rstream.buffer.backend) == 0: (0, rstream.atEof()) else: - count = min(rstream.buffer.dataLen(), nbytes) - rstream.buffer.copyData(pbytes, 0, count) - (count, true) - return count + for (region, rsize) in rstream.buffer.backend.regions(): + let size = min(rsize, nbytes - index) + copyMem(addr pbuffer[index], region, size) + index += size + if index >= nbytes: + break + (index, true) + index proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, sep: seq[byte]): Future[int] {. @@ -456,28 +435,32 @@ proc readUntil*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int, if isNil(rstream.readerLoop): return await readUntil(rstream.rsource, pbytes, nbytes, sep) else: - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) - var state = 0 - var k = 0 + var + pbuffer = pbytes.toUnchecked() + state = 0 + k = 0 readLoop(): if rstream.atEof(): raise newAsyncStreamIncompleteError() + var index = 0 - while index < rstream.buffer.dataLen(): + for ch in rstream.buffer.backend: if k >= nbytes: raise newAsyncStreamLimitError() - let ch = rstream.buffer[index] + inc(index) pbuffer[k] = ch inc(k) + if sep[state] == ch: inc(state) if state == len(sep): break else: state = 0 + (index, state == len(sep)) - return k + k proc readLine*(rstream: AsyncStreamReader, limit = 0, sep = "\r\n"): Future[string] {. @@ -507,18 +490,19 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0, return await readLine(rstream.rsource, limit, sep) else: let lim = if limit <= 0: -1 else: limit - var state = 0 - var res = "" + var + state = 0 + res = "" + readLoop(): if rstream.atEof(): (0, true) else: var index = 0 - while index < rstream.buffer.dataLen(): - let ch = char(rstream.buffer[index]) + for ch in rstream.buffer.backend: inc(index) - if sep[state] == ch: + if sep[state] == char(ch): inc(state) if state == len(sep): break @@ -529,11 +513,14 @@ proc readLine*(rstream: AsyncStreamReader, limit = 0, res.add(sep[0 ..< missing]) else: res.add(sep[0 ..< state]) - res.add(ch) + state = 0 + + res.add(char(ch)) if len(res) == lim: break + (index, (state == len(sep)) or (lim == len(res))) - return res + res proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -555,15 +542,17 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. if isNil(rstream.readerLoop): return await read(rstream.rsource) else: - var res = newSeq[byte]() + var res: seq[byte] readLoop(): if rstream.atEof(): (0, true) else: - let count = rstream.buffer.dataLen() - res.add(rstream.buffer.buffer.toOpenArray(0, count - 1)) - (count, false) - return res + var readed = 0 + for (region, rsize) in rstream.buffer.backend.regions(): + readed += rsize + res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) + (readed, false) + res proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -592,10 +581,13 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. if rstream.atEof(): (0, true) else: - let count = min(rstream.buffer.dataLen(), n - len(res)) - res.add(rstream.buffer.buffer.toOpenArray(0, count - 1)) - (count, len(res) == n) - return res + var readed = 0 + for (region, rsize) in rstream.buffer.backend.regions(): + let count = min(rsize, n - len(res)) + readed += count + res.add(region.toUnchecked().toOpenArray(0, count - 1)) + (readed, len(res) == n) + res proc consume*(rstream: AsyncStreamReader): Future[int] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -622,9 +614,10 @@ proc consume*(rstream: AsyncStreamReader): Future[int] {. if rstream.atEof(): (0, true) else: - res += rstream.buffer.dataLen() - (rstream.buffer.dataLen(), false) - return res + let used = len(rstream.buffer.backend) + res += used + (used, false) + res proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -652,13 +645,12 @@ proc consume*(rstream: AsyncStreamReader, n: int): Future[int] {. else: var res = 0 readLoop(): - if rstream.atEof(): - (0, true) - else: - let count = min(rstream.buffer.dataLen(), n - res) - res += count - (count, res == n) - return res + let + used = len(rstream.buffer.backend) + count = min(used, n - res) + res += count + (count, res == n) + res proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. async: (raises: [CancelledError, AsyncStreamError]).} = @@ -689,15 +681,18 @@ proc readMessage*(rstream: AsyncStreamReader, pred: ReadMessagePredicate) {. await readMessage(rstream.rsource, pred) else: readLoop(): - let count = rstream.buffer.dataLen() - if count == 0: + if len(rstream.buffer.backend) == 0: if rstream.atEof(): pred([]) else: # Case, when transport's buffer is not yet filled with data. (0, false) else: - pred(rstream.buffer.buffer.toOpenArray(0, count - 1)) + var res: tuple[consumed: int, done: bool] + for (region, rsize) in rstream.buffer.backend.regions(): + res = pred(region.toUnchecked().toOpenArray(0, rsize - 1)) + break + res proc write*(wstream: AsyncStreamWriter, pbytes: pointer, nbytes: int) {. @@ -951,7 +946,7 @@ proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBuffer.init(bufferSize) + child.buffer = AsyncBufferRef.new(bufferSize) trackCounter(AsyncStreamReaderTrackerName) child.startReader() @@ -963,7 +958,7 @@ proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBuffer.init(bufferSize) + child.buffer = AsyncBufferRef.new(bufferSize) if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) diff --git a/chronos/streams/boundstream.nim b/chronos/streams/boundstream.nim index 8d2e52c5c..0f7eba1c3 100644 --- a/chronos/streams/boundstream.nim +++ b/chronos/streams/boundstream.nim @@ -18,7 +18,7 @@ {.push raises: [].} import results -import ../[asyncloop, timer, config] +import ../[asyncloop, timer, bipbuffer, config] import asyncstream, ../transports/[stream, common] export asyncloop, asyncstream, stream, timer, common @@ -103,7 +103,7 @@ func endsWith(s, suffix: openArray[byte]): bool = proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var rstream = BoundedStreamReader(stream) rstream.state = AsyncStreamState.Running - var buffer = newSeq[byte](rstream.buffer.bufferLen()) + var buffer = newSeq[byte](rstream.buffer.backend.availSpace()) while true: let toRead = if rstream.boundSize.isNone(): @@ -127,7 +127,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = # There should be one step between transferring last bytes to the # consumer and declaring stream EOF. Otherwise could not be # consumed. - await upload(addr rstream.buffer, addr buffer[0], length) + await upload(rstream.buffer, addr buffer[0], length) if rstream.state == AsyncStreamState.Running: rstream.state = AsyncStreamState.Finished else: @@ -135,7 +135,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = # There should be one step between transferring last bytes to the # consumer and declaring stream EOF. Otherwise could not be # consumed. - await upload(addr rstream.buffer, addr buffer[0], res) + await upload(rstream.buffer, addr buffer[0], res) if (res < toRead) and rstream.rsource.atEof(): case rstream.cmpop @@ -151,7 +151,7 @@ proc boundedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = # There should be one step between transferring last bytes to the # consumer and declaring stream EOF. Otherwise could not be # consumed. - await upload(addr rstream.buffer, addr buffer[0], res) + await upload(rstream.buffer, addr buffer[0], res) if (res < toRead) and rstream.rsource.atEof(): case rstream.cmpop diff --git a/chronos/streams/chunkstream.nim b/chronos/streams/chunkstream.nim index f3e73e0cc..b9475d58d 100644 --- a/chronos/streams/chunkstream.nim +++ b/chronos/streams/chunkstream.nim @@ -11,7 +11,7 @@ {.push raises: [].} -import ../[asyncloop, timer, config] +import ../[asyncloop, timer, bipbuffer, config] import asyncstream, ../transports/[stream, common] import results export asyncloop, asyncstream, stream, timer, common, results @@ -118,11 +118,11 @@ proc chunkedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} = var chunksize = cres.get() if chunksize > 0'u64: while chunksize > 0'u64: - let toRead = int(min(chunksize, - uint64(rstream.buffer.bufferLen()))) - await rstream.rsource.readExactly(rstream.buffer.getBuffer(), - toRead) - rstream.buffer.update(toRead) + let + (data, rsize) = rstream.buffer.backend.reserve() + toRead = int(min(chunksize, uint64(rsize))) + await rstream.rsource.readExactly(data, toRead) + rstream.buffer.backend.commit(toRead) await rstream.buffer.transfer() chunksize = chunksize - uint64(toRead) diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 86f6d4c3f..9d90ab718 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -242,7 +242,7 @@ proc tlsReadApp(engine: ptr SslEngineContext, try: var length = 0'u var buf = sslEngineRecvappBuf(engine[], length) - await upload(addr reader.buffer, buf, int(length)) + await upload(reader.buffer, buf, int(length)) sslEngineRecvappAck(engine[], length) TLSResult.Success except CancelledError: diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index f89a6b445..d6391219c 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -11,7 +11,7 @@ import std/deques when not(defined(windows)): import ".."/selectors2 -import ".."/[asyncloop, config, osdefs, oserrno, osutils, handles] +import ".."/[asyncloop, osdefs, oserrno, osutils, handles] import "."/common import stew/ptrops diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index c80d99285..b81a512d9 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -11,7 +11,7 @@ import std/deques import stew/ptrops -import ".."/[asyncloop, config, handles, osdefs, osutils, oserrno] +import ".."/[asyncloop, config, handles, bipbuffer, osdefs, osutils, oserrno] import ./common type @@ -72,8 +72,7 @@ when defined(windows): fd*: AsyncFD # File descriptor state: set[TransportState] # Current Transport state reader: ReaderFuture # Current reader Future - buffer: seq[byte] # Reading buffer - offset: int # Reading buffer offset + buffer: BipBuffer # Reading buffer error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void].Raising([]) # Stream life future @@ -82,7 +81,6 @@ when defined(windows): wwsabuf: WSABUF # Writer WSABUF rovl: CustomOverlapped # Reader OVERLAPPED structure wovl: CustomOverlapped # Writer OVERLAPPED structure - roffset: int # Pending reading offset flags: set[TransportFlags] # Internal flags case kind*: TransportKind of TransportKind.Socket: @@ -99,8 +97,7 @@ else: fd*: AsyncFD # File descriptor state: set[TransportState] # Current Transport state reader: ReaderFuture # Current reader Future - buffer: seq[byte] # Reading buffer - offset: int # Reading buffer offset + buffer: BipBuffer # Reading buffer error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void].Raising([]) # Stream life future @@ -184,14 +181,6 @@ template checkPending(t: untyped) = if not(isNil((t).reader)): raise newException(TransportError, "Read operation already pending!") -template shiftBuffer(t, c: untyped) = - if (t).offset > c: - if c > 0: - moveMem(addr((t).buffer[0]), addr((t).buffer[(c)]), (t).offset - (c)) - (t).offset = (t).offset - (c) - else: - (t).offset = 0 - template shiftVectorBuffer(v: var StreamVector, o: untyped) = (v).buf = cast[pointer](cast[uint]((v).buf) + uint(o)) (v).buflen -= int(o) @@ -228,6 +217,9 @@ proc clean(transp: StreamTransport) {.inline.} = transp.future.complete() GC_unref(transp) +template toUnchecked*(a: untyped): untyped = + cast[ptr UncheckedArray[byte]](a) + when defined(windows): template zeroOvelappedOffset(t: untyped) = @@ -245,9 +237,9 @@ when defined(windows): cast[HANDLE]((v).buflen) template setReaderWSABuffer(t: untyped) = - (t).rwsabuf.buf = cast[cstring]( - cast[uint](addr t.buffer[0]) + uint((t).roffset)) - (t).rwsabuf.len = ULONG(len((t).buffer) - (t).roffset) + let res = (t).buffer.reserve() + (t).rwsabuf.buf = cast[cstring](res.data) + (t).rwsabuf.len = uint32(res.size) template setWriterWSABuffer(t, v: untyped) = (t).wwsabuf.buf = cast[cstring](v.buf) @@ -381,8 +373,9 @@ when defined(windows): else: transp.queue.addFirst(vector) else: - let loop = getThreadDispatcher() - let size = min(uint32(getFileSize(vector)), 2_147_483_646'u32) + let + loop = getThreadDispatcher() + size = min(uint32(getFileSize(vector)), 2_147_483_646'u32) transp.wovl.setOverlappedOffset(vector.offset) var ret = loop.transmitFile(sock, getFileHandle(vector), size, @@ -481,29 +474,28 @@ when defined(windows): if bytesCount == 0: transp.state.incl({ReadEof, ReadPaused}) else: - if transp.offset != transp.roffset: - moveMem(addr transp.buffer[transp.offset], - addr transp.buffer[transp.roffset], - bytesCount) - transp.offset += int(bytesCount) - transp.roffset = transp.offset - if transp.offset == len(transp.buffer): + transp.buffer.commit(bytesCount) + if transp.buffer.availSpace() == 0: transp.state.incl(ReadPaused) of ERROR_OPERATION_ABORTED, ERROR_CONNECTION_ABORTED, ERROR_BROKEN_PIPE: # CancelIO() interrupt or closeSocket() call. + transp.buffer.commit(0) transp.state.incl(ReadPaused) of ERROR_NETNAME_DELETED, WSAECONNABORTED: + transp.buffer.commit(0) if transp.kind == TransportKind.Socket: transp.state.incl({ReadEof, ReadPaused}) else: transp.setReadError(err) of ERROR_PIPE_NOT_CONNECTED: + transp.buffer.commit(0) if transp.kind == TransportKind.Pipe: transp.state.incl({ReadEof, ReadPaused}) else: transp.setReadError(err) else: + transp.buffer.commit(0) transp.setReadError(err) transp.completeReader() @@ -524,7 +516,6 @@ when defined(windows): transp.state.incl(ReadPending) if transp.kind == TransportKind.Socket: let sock = SocketHandle(transp.fd) - transp.roffset = transp.offset transp.setReaderWSABuffer() let ret = wsaRecv(sock, addr transp.rwsabuf, 1, addr bytesCount, addr flags, @@ -549,7 +540,6 @@ when defined(windows): transp.completeReader() elif transp.kind == TransportKind.Pipe: let pipe = HANDLE(transp.fd) - transp.roffset = transp.offset transp.setReaderWSABuffer() let ret = readFile(pipe, cast[pointer](transp.rwsabuf.buf), DWORD(transp.rwsabuf.len), addr bytesCount, @@ -595,7 +585,7 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -616,7 +606,7 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.flags = flags transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() @@ -1390,11 +1380,12 @@ else: else: if transp.kind == TransportKind.Socket: while true: - let res = handleEintr( - osdefs.recv(fd, addr transp.buffer[transp.offset], - len(transp.buffer) - transp.offset, cint(0))) + let + (data, size) = transp.buffer.reserve() + res = handleEintr(osdefs.recv(fd, data, size, cint(0))) if res < 0: let err = osLastError() + transp.buffer.commit(0) case err of oserrno.ECONNRESET: transp.state.incl({ReadEof, ReadPaused}) @@ -1408,13 +1399,14 @@ else: discard removeReader2(transp.fd) elif res == 0: transp.state.incl({ReadEof, ReadPaused}) + transp.buffer.commit(0) let rres = removeReader2(transp.fd) if rres.isErr(): transp.state.incl(ReadError) transp.setReadError(rres.error()) else: - transp.offset += res - if transp.offset == len(transp.buffer): + transp.buffer.commit(res) + if transp.buffer.availSpace() == 0: transp.state.incl(ReadPaused) let rres = removeReader2(transp.fd) if rres.isErr(): @@ -1424,23 +1416,25 @@ else: break elif transp.kind == TransportKind.Pipe: while true: - let res = handleEintr( - osdefs.read(cint(fd), addr transp.buffer[transp.offset], - len(transp.buffer) - transp.offset)) + let + (data, size) = transp.buffer.reserve() + res = handleEintr(osdefs.read(cint(fd), data, size)) if res < 0: let err = osLastError() + transp.buffer.commit(0) transp.state.incl(ReadPaused) transp.setReadError(err) discard removeReader2(transp.fd) elif res == 0: transp.state.incl({ReadEof, ReadPaused}) + transp.buffer.commit(0) let rres = removeReader2(transp.fd) if rres.isErr(): transp.state.incl(ReadError) transp.setReadError(rres.error()) else: - transp.offset += res - if transp.offset == len(transp.buffer): + transp.buffer.commit(res) + if transp.buffer.availSpace() == 0: transp.state.incl(ReadPaused) let rres = removeReader2(transp.fd) if rres.isErr(): @@ -1458,7 +1452,7 @@ else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -1475,7 +1469,7 @@ else: transp = StreamTransport(kind: TransportKind.Pipe) transp.fd = fd - transp.buffer = newSeq[byte](bufsize) + transp.buffer = BipBuffer.init(bufsize) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -2339,7 +2333,7 @@ proc writeFile*(transp: StreamTransport, handle: int, proc atEof*(transp: StreamTransport): bool {.inline.} = ## Returns ``true`` if ``transp`` is at EOF. - (transp.offset == 0) and (ReadEof in transp.state) and + (len(transp.buffer) == 0) and (ReadEof in transp.state) and (ReadPaused in transp.state) template readLoop(name, body: untyped): untyped = @@ -2351,16 +2345,17 @@ template readLoop(name, body: untyped): untyped = if ReadClosed in transp.state: raise newException(TransportUseClosedError, "Attempt to read data from closed stream") - if transp.offset == 0: + if len(transp.buffer) == 0: # We going to raise an error, only if transport buffer is empty. if ReadError in transp.state: raise transp.getError() let (consumed, done) = body - transp.shiftBuffer(consumed) + transp.buffer.consume(consumed) if done: break - else: + + if len(transp.buffer) == 0: checkPending(transp) let fut = ReaderFuture.init(name) transp.reader = fut @@ -2403,17 +2398,23 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, if nbytes == 0: return - var index = 0 - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) + var + index = 0 + pbuffer = pbytes.toUnchecked() readLoop("stream.transport.readExactly"): - if transp.offset == 0: + if len(transp.buffer) == 0: if transp.atEof(): raise newException(TransportIncompleteError, "Data incomplete!") - let count = min(nbytes - index, transp.offset) - if count > 0: - copyMem(addr pbuffer[index], addr(transp.buffer[0]), count) - index += count - (consumed: count, done: index == nbytes) + var readed = 0 + for (region, rsize) in transp.buffer.regions(): + let count = min(nbytes - index, rsize) + readed += count + if count > 0: + copyMem(addr pbuffer[index], region, count) + index += count + if index == nbytes: + break + (consumed: readed, done: index == nbytes) proc readOnce*(transp: StreamTransport, pbytes: pointer, nbytes: int): Future[int] {. @@ -2425,15 +2426,21 @@ proc readOnce*(transp: StreamTransport, pbytes: pointer, doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes > 0, "nbytes must be positive integer") - var count = 0 + var + pbuffer = pbytes.toUnchecked() + index = 0 readLoop("stream.transport.readOnce"): - if transp.offset == 0: + if len(transp.buffer) == 0: (0, transp.atEof()) else: - count = min(transp.offset, nbytes) - copyMem(pbytes, addr(transp.buffer[0]), count) - (count, true) - return count + for (region, rsize) in transp.buffer.regions(): + let size = min(rsize, nbytes - index) + copyMem(addr pbuffer[index], region, size) + index += size + if index >= nbytes: + break + (index, true) + index proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, sep: seq[byte]): Future[int] {. @@ -2457,7 +2464,7 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, if nbytes == 0: raise newException(TransportLimitError, "Limit reached!") - var pbuffer = cast[ptr UncheckedArray[byte]](pbytes) + var pbuffer = pbytes.toUnchecked() var state = 0 var k = 0 @@ -2466,14 +2473,11 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, raise newException(TransportIncompleteError, "Data incomplete!") var index = 0 - - while index < transp.offset: + for ch in transp.buffer: if k >= nbytes: raise newException(TransportLimitError, "Limit reached!") - let ch = transp.buffer[index] inc(index) - pbuffer[k] = ch inc(k) @@ -2485,8 +2489,7 @@ proc readUntil*(transp: StreamTransport, pbytes: pointer, nbytes: int, state = 0 (index, state == len(sep)) - - return k + k proc readLine*(transp: StreamTransport, limit = 0, sep = "\r\n"): Future[string] {. @@ -2503,46 +2506,52 @@ proc readLine*(transp: StreamTransport, limit = 0, ## If ``limit`` more then 0, then read is limited to ``limit`` bytes. let lim = if limit <= 0: -1 else: limit var state = 0 + var res: string readLoop("stream.transport.readLine"): if transp.atEof(): (0, true) else: var index = 0 - while index < transp.offset: - let ch = char(transp.buffer[index]) - index += 1 + for ch in transp.buffer: + inc(index) - if sep[state] == ch: + if sep[state] == char(ch): inc(state) if state == len(sep): break else: if state != 0: if limit > 0: - let missing = min(state, lim - len(result) - 1) - result.add(sep[0 ..< missing]) + let missing = min(state, lim - len(res) - 1) + res.add(sep[0 ..< missing]) else: - result.add(sep[0 ..< state]) + res.add(sep[0 ..< state]) state = 0 - result.add(ch) - if len(result) == lim: + res.add(char(ch)) + if len(res) == lim: break - (index, (state == len(sep)) or (lim == len(result))) + (index, (state == len(sep)) or (lim == len(res))) + res proc read*(transp: StreamTransport): Future[seq[byte]] {. async: (raises: [TransportError, CancelledError]).} = ## Read all bytes from transport ``transp``. ## ## This procedure allocates buffer seq[byte] and return it as result. + var res: seq[byte] readLoop("stream.transport.read"): if transp.atEof(): (0, true) else: - result.add(transp.buffer.toOpenArray(0, transp.offset - 1)) - (transp.offset, false) + var readed = 0 + for (region, rsize) in transp.buffer.regions(): + readed += rsize + res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) + (readed, false) + res proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. async: (raises: [TransportError, CancelledError]).} = @@ -2550,27 +2559,35 @@ proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. ## ## This procedure allocates buffer seq[byte] and return it as result. if n <= 0: - return await transp.read() + await transp.read() else: + var res: seq[byte] readLoop("stream.transport.read"): if transp.atEof(): (0, true) else: - let count = min(transp.offset, n - len(result)) - result.add(transp.buffer.toOpenArray(0, count - 1)) - (count, len(result) == n) + var readed = 0 + for (region, rsize) in transp.buffer.regions(): + let count = min(rsize, n - len(res)) + readed += count + res.add(region.toUnchecked().toOpenArray(0, count - 1)) + (readed, len(res) == n) + res proc consume*(transp: StreamTransport): Future[int] {. async: (raises: [TransportError, CancelledError]).} = ## Consume all bytes from transport ``transp`` and discard it. ## ## Return number of bytes actually consumed and discarded. + var res = 0 readLoop("stream.transport.consume"): if transp.atEof(): (0, true) else: - result += transp.offset - (transp.offset, false) + let used = len(transp.buffer) + res += used + (used, false) + res proc consume*(transp: StreamTransport, n: int): Future[int] {. async: (raises: [TransportError, CancelledError]).} = @@ -2579,15 +2596,19 @@ proc consume*(transp: StreamTransport, n: int): Future[int] {. ## ## Return number of bytes actually consumed and discarded. if n <= 0: - return await transp.consume() + await transp.consume() else: + var res = 0 readLoop("stream.transport.consume"): if transp.atEof(): (0, true) else: - let count = min(transp.offset, n - result) - result += count - (count, result == n) + let + used = len(transp.buffer) + count = min(used, n - res) + res += count + (count, res == n) + res proc readMessage*(transp: StreamTransport, predicate: ReadMessagePredicate) {. @@ -2605,14 +2626,18 @@ proc readMessage*(transp: StreamTransport, ## ``predicate`` callback will receive (zero-length) openArray, if transport ## is at EOF. readLoop("stream.transport.readMessage"): - if transp.offset == 0: + if len(transp.buffer) == 0: if transp.atEof(): predicate([]) else: # Case, when transport's buffer is not yet filled with data. (0, false) else: - predicate(transp.buffer.toOpenArray(0, transp.offset - 1)) + var res: tuple[consumed: int, done: bool] + for (region, rsize) in transp.buffer.regions(): + res = predicate(region.toUnchecked().toOpenArray(0, rsize - 1)) + break + res proc join*(transp: StreamTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = @@ -2630,7 +2655,7 @@ proc join*(transp: StreamTransport): Future[void] {. retFuture.cancelCallback = cancel else: retFuture.complete() - return retFuture + retFuture proc closed*(transp: StreamTransport): bool {.inline.} = ## Returns ``true`` if transport in closed state. From 2d85229dce6a2c0229d5c1985c6dce211ed9e8ee Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 4 Apr 2024 00:30:01 +0300 Subject: [PATCH 129/146] Add `join()` operation to wait for future completion. (#525) * Add `join()` operation to wait for future completion without cancelling it when `join()` got cancelled. * Start using join() operation. --- chronos/apps/http/httpserver.nim | 18 +---- chronos/internal/asyncfutures.nim | 33 ++++++++++ chronos/streams/asyncstream.nim | 19 +----- chronos/transports/datagram.nim | 16 +---- chronos/transports/stream.nim | 15 +---- tests/testfut.nim | 106 ++++++++++++++++++++++++++++++ 6 files changed, 143 insertions(+), 64 deletions(-) diff --git a/chronos/apps/http/httpserver.nim b/chronos/apps/http/httpserver.nim index c1e52793b..1adb8fc8b 100644 --- a/chronos/apps/http/httpserver.nim +++ b/chronos/apps/http/httpserver.nim @@ -1187,23 +1187,7 @@ proc closeWait*(server: HttpServerRef) {.async: (raises: []).} = proc join*(server: HttpServerRef): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until HTTP server will not be closed. - var retFuture = newFuture[void]("http.server.join") - - proc continuation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe.} = - if not(retFuture.finished()): - server.lifetime.removeCallback(continuation, cast[pointer](retFuture)) - - if server.state == ServerClosed: - retFuture.complete() - else: - server.lifetime.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation - - retFuture + server.lifetime.join() proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] = ## Create new MultiPartReader interface for specific request. diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 49c6acd7c..7f93b0e15 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1607,6 +1607,39 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. else: wait(fut, timeout.milliseconds()) +proc join*(future: FutureBase): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = + ## Returns a future which will complete once future ``future`` completes. + ## + ## This primitive helps to carefully monitor ``future`` state, in case of + ## cancellation ``join`` operation it will not going to cancel ``future``. + ## + ## If ``future`` is already completed - ``join`` will return completed + ## future immediately. + let retFuture = newFuture[void]("chronos.join()") + + proc continuation(udata: pointer) {.gcsafe.} = + retFuture.complete() + + proc cancellation(udata: pointer) {.gcsafe.} = + future.removeCallback(continuation, cast[pointer](retFuture)) + + if not(future.finished()): + future.addCallback(continuation, cast[pointer](retFuture)) + retFuture.cancelCallback = cancellation + else: + retFuture.complete() + + retFuture + +proc join*(future: SomeFuture): Future[void] {. + async: (raw: true, raises: [CancelledError]).} = + ## Returns a future which will complete once future ``future`` completes. + ## + ## This primitive helps to carefully monitor ``future`` state, in case of + ## cancellation ``join`` operation it will not going to cancel ``future``. + join(FutureBase(future)) + when defined(windows): import ../osdefs diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 473cc38bc..bb878dbcb 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -836,24 +836,7 @@ proc join*(rw: AsyncStreamRW): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Get Future[void] which will be completed when stream become finished or ## closed. - when rw is AsyncStreamReader: - var retFuture = newFuture[void]("async.stream.reader.join") - else: - var retFuture = newFuture[void]("async.stream.writer.join") - - proc continuation(udata: pointer) {.gcsafe, raises:[].} = - retFuture.complete() - - proc cancellation(udata: pointer) {.gcsafe, raises:[].} = - rw.future.removeCallback(continuation, cast[pointer](retFuture)) - - if not(rw.future.finished()): - rw.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancellation - else: - retFuture.complete() - - return retFuture + rw.future.join() proc close*(rw: AsyncStreamRW) = ## Close and frees resources of stream ``rw``. diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index d6391219c..7f471424a 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -827,21 +827,7 @@ proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback, proc join*(transp: DatagramTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until the transport ``transp`` will be closed. - let retFuture = newFuture[void]("datagram.transport.join") - - proc continuation(udata: pointer) {.gcsafe.} = - retFuture.complete() - - proc cancel(udata: pointer) {.gcsafe.} = - transp.future.removeCallback(continuation, cast[pointer](retFuture)) - - if not(transp.future.finished()): - transp.future.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancel - else: - retFuture.complete() - - return retFuture + transp.future.join() proc closed*(transp: DatagramTransport): bool {.inline.} = ## Returns ``true`` if transport in closed state. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index b81a512d9..7b5925b70 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -1780,20 +1780,7 @@ proc stop*(server: StreamServer) {.raises: [TransportOsError].} = proc join*(server: StreamServer): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Waits until ``server`` is not closed. - var retFuture = newFuture[void]("stream.transport.server.join") - - proc continuation(udata: pointer) = - retFuture.complete() - - proc cancel(udata: pointer) = - server.loopFuture.removeCallback(continuation, cast[pointer](retFuture)) - - if not(server.loopFuture.finished()): - server.loopFuture.addCallback(continuation, cast[pointer](retFuture)) - retFuture.cancelCallback = cancel - else: - retFuture.complete() - return retFuture + server.loopFuture.join() proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, diff --git a/tests/testfut.nim b/tests/testfut.nim index c2231f126..1cf0aed5f 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -2048,6 +2048,112 @@ suite "Future[T] behavior test suite": future1.cancelled() == true future2.cancelled() == true + asyncTest "join() test": + proc joinFoo0(future: FutureBase) {.async.} = + await join(future) + + proc joinFoo1(future: Future[void]) {.async.} = + await join(future) + + proc joinFoo2(future: Future[void]) {. + async: (raises: [CancelledError]).} = + await join(future) + + let + future0 = newFuture[void]() + future1 = newFuture[void]() + future2 = Future[void].Raising([CancelledError]).init() + + let + resfut0 = joinFoo0(future0) + resfut1 = joinFoo1(future1) + resfut2 = joinFoo2(future2) + + check: + resfut0.finished() == false + resfut1.finished() == false + resfut2.finished() == false + + future0.complete() + future1.complete() + future2.complete() + + let res = + try: + await noCancel allFutures(resfut0, resfut1, resfut2).wait(1.seconds) + true + except AsyncTimeoutError: + false + + check: + res == true + resfut0.finished() == true + resfut1.finished() == true + resfut2.finished() == true + future0.finished() == true + future1.finished() == true + future2.finished() == true + + asyncTest "join() cancellation test": + proc joinFoo0(future: FutureBase) {.async.} = + await join(future) + + proc joinFoo1(future: Future[void]) {.async.} = + await join(future) + + proc joinFoo2(future: Future[void]) {. + async: (raises: [CancelledError]).} = + await join(future) + + let + future0 = newFuture[void]() + future1 = newFuture[void]() + future2 = Future[void].Raising([CancelledError]).init() + + let + resfut0 = joinFoo0(future0) + resfut1 = joinFoo1(future1) + resfut2 = joinFoo2(future2) + + check: + resfut0.finished() == false + resfut1.finished() == false + resfut2.finished() == false + + let + cancelfut0 = cancelAndWait(resfut0) + cancelfut1 = cancelAndWait(resfut1) + cancelfut2 = cancelAndWait(resfut2) + + let res = + try: + await noCancel allFutures(cancelfut0, cancelfut1, + cancelfut2).wait(1.seconds) + true + except AsyncTimeoutError: + false + + check: + res == true + cancelfut0.finished() == true + cancelfut1.finished() == true + cancelfut2.finished() == true + resfut0.cancelled() == true + resfut1.cancelled() == true + resfut2.cancelled() == true + future0.finished() == false + future1.finished() == false + future2.finished() == false + + future0.complete() + future1.complete() + future2.complete() + + check: + future0.finished() == true + future1.finished() == true + future2.finished() == true + test "Sink with literals": # https://github.com/nim-lang/Nim/issues/22175 let fut = newFuture[string]() From 8e49df14007e27370cd1ce77edb2843783b45e6d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 7 Apr 2024 07:03:12 +0300 Subject: [PATCH 130/146] Ensure that all buffers used inside HTTP client will follow original buffer size. (#530) Ensure that buffer size cannot be lower than default size. --- chronos/apps/http/httpclient.nim | 18 ++++++++++++------ chronos/streams/asyncstream.nim | 6 ++++-- chronos/transports/stream.nim | 12 ++++++++---- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 33a6b7f3f..414b1d3fd 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -567,7 +567,8 @@ proc new( tls = try: newTLSClientAsyncStream(treader, twriter, ha.hostname, - flags = session.flags.getTLSFlags()) + flags = session.flags.getTLSFlags(), + bufferSize = session.connectionBufferSize) except TLSStreamInitError as exc: return err(exc.msg) @@ -1327,13 +1328,18 @@ proc getBodyReader*(response: HttpClientResponseRef): HttpBodyReader {. let reader = case response.bodyFlag of HttpClientBodyFlag.Sized: - let bstream = newBoundedStreamReader(response.connection.reader, - response.contentLength) - newHttpBodyReader(bstream) + newHttpBodyReader( + newBoundedStreamReader( + response.connection.reader, response.contentLength, + bufferSize = response.session.connectionBufferSize)) of HttpClientBodyFlag.Chunked: - newHttpBodyReader(newChunkedStreamReader(response.connection.reader)) + newHttpBodyReader( + newChunkedStreamReader( + response.connection.reader, + bufferSize = response.session.connectionBufferSize)) of HttpClientBodyFlag.Custom: - newHttpBodyReader(newAsyncStreamReader(response.connection.reader)) + newHttpBodyReader( + newAsyncStreamReader(response.connection.reader)) response.connection.state = HttpClientConnectionState.ResponseBodyReceiving response.reader = reader response.reader diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index bb878dbcb..301b0868d 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -929,7 +929,8 @@ proc init*(child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBufferRef.new(bufferSize) + let size = max(AsyncStreamDefaultBufferSize, bufferSize) + child.buffer = AsyncBufferRef.new(size) trackCounter(AsyncStreamReaderTrackerName) child.startReader() @@ -941,7 +942,8 @@ proc init*[T](child, rsource: AsyncStreamReader, loop: StreamReaderLoop, child.readerLoop = loop child.rsource = rsource child.tsource = rsource.tsource - child.buffer = AsyncBufferRef.new(bufferSize) + let size = max(AsyncStreamDefaultBufferSize, bufferSize) + child.buffer = AsyncBufferRef.new(size) if not isNil(udata): GC_ref(udata) child.udata = cast[pointer](udata) diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 7b5925b70..0f006b8d7 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -585,7 +585,8 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -606,7 +607,8 @@ when defined(windows): udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, udata: cast[pointer](transp)) - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.flags = flags transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() @@ -1452,7 +1454,8 @@ else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( @@ -1469,7 +1472,8 @@ else: transp = StreamTransport(kind: TransportKind.Pipe) transp.fd = fd - transp.buffer = BipBuffer.init(bufsize) + let size = max(bufsize, DefaultStreamBufferSize) + transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} transp.queue = initDeque[StreamVector]() transp.future = Future[void].Raising([]).init( From 0d050d582306e8c521c3a4a6f6dcb3c83c93a90f Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 13 Apr 2024 03:04:42 +0300 Subject: [PATCH 131/146] Add automatic constructors for TCP and UDP transports. (#512) * Add automatic constructors for TCP and UDP transports. * Add port number argument. Add some documentation comments. Fix tests. * Make datagram test use request/response scheme. * Add helper. * Fix issue with non-zero port setups. Add test. * Fix tests to probe ports. * Attempt to fix MacOS issue. * Add Opt[IpAddress]. Make IPv4 mapping to IPv6 space automatic. * Add tests. * Add stream capabilities. * Fix Linux issues. * Make getTransportFlags() available for all OSes. * Fix one more compilation issue. * Workaround weird compiler bug. * Fix forgotten typed version of constructor. * Make single source for addresses calculation. * Add one more check into tests. * Fix flags not being set in transport constructor. * Fix post-rebase issues with flags not being set. * Address review comments. --- chronos/transports/common.nim | 36 +- chronos/transports/datagram.nim | 279 ++++++++++++--- chronos/transports/stream.nim | 589 +++++++++++++++++++++----------- tests/testdatagram.nim | 342 +++++++++++++++++++ tests/teststream.nim | 254 ++++++++++++++ 5 files changed, 1250 insertions(+), 250 deletions(-) diff --git a/chronos/transports/common.nim b/chronos/transports/common.nim index 8fcf0eb63..6419b003c 100644 --- a/chronos/transports/common.nim +++ b/chronos/transports/common.nim @@ -10,6 +10,7 @@ {.push raises: [].} import std/[strutils] +import results import stew/[base10, byteutils] import ".."/[config, asyncloop, osdefs, oserrno, handles] @@ -18,7 +19,7 @@ from std/net import Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, from std/nativesockets import toInt, `$` export Domain, `==`, IpAddress, IpAddressFamily, parseIpAddress, SockType, - Protocol, Port, toInt, `$` + Protocol, Port, toInt, `$`, results const DefaultStreamBufferSize* = chronosTransportDefaultBufferSize @@ -29,7 +30,7 @@ type ServerFlags* = enum ## Server's flags ReuseAddr, ReusePort, TcpNoDelay, NoAutoRead, GCUserData, FirstPipe, - NoPipeFlash, Broadcast + NoPipeFlash, Broadcast, V4Mapped DualStackType* {.pure.} = enum Auto, Enabled, Disabled, Default @@ -200,6 +201,15 @@ proc `$`*(address: TransportAddress): string = of AddressFamily.None: "None" +proc toIpAddress*(address: TransportAddress): IpAddress = + case address.family + of AddressFamily.IPv4: + IpAddress(family: IpAddressFamily.IPv4, address_v4: address.address_v4) + of AddressFamily.IPv6: + IpAddress(family: IpAddressFamily.IPv6, address_v6: address.address_v6) + else: + raiseAssert "IpAddress do not support address family " & $address.family + proc toHex*(address: TransportAddress): string = ## Returns hexadecimal representation of ``address``. case address.family @@ -783,3 +793,25 @@ proc setDualstack*(socket: AsyncFD, else: ? getDomain(socket) setDualstack(socket, family, flag) + +proc getAutoAddress*(port: Port): TransportAddress = + var res = + if isAvailable(AddressFamily.IPv6): + AnyAddress6 + else: + AnyAddress + res.port = port + res + +proc getAutoAddresses*( + localPort: Port, + remotePort: Port +): tuple[local: TransportAddress, remote: TransportAddress] = + var (local, remote) = + if isAvailable(AddressFamily.IPv6): + (AnyAddress6, AnyAddress6) + else: + (AnyAddress, AnyAddress) + local.port = localPort + remote.port = remotePort + (local, remote) diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index 7f471424a..fdb406ba4 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -10,11 +10,14 @@ {.push raises: [].} import std/deques +import results when not(defined(windows)): import ".."/selectors2 import ".."/[asyncloop, osdefs, oserrno, osutils, handles] -import "."/common +import "."/[common, ipnet] import stew/ptrops +export results + type VectorKind = enum WithoutAddress, WithAddress @@ -60,29 +63,78 @@ type const DgramTransportTrackerName* = "datagram.transport" -proc remoteAddress*(transp: DatagramTransport): TransportAddress {. - raises: [TransportOsError].} = +proc getRemoteAddress(transp: DatagramTransport, + address: Sockaddr_storage, length: SockLen, + ): TransportAddress = + var raddr: TransportAddress + fromSAddr(unsafeAddr address, length, raddr) + if ServerFlags.V4Mapped in transp.flags: + if raddr.isV4Mapped(): raddr.toIPv4() else: raddr + else: + raddr + +proc getRemoteAddress(transp: DatagramTransport): TransportAddress = + transp.getRemoteAddress(transp.raddr, transp.ralen) + +proc setRemoteAddress(transp: DatagramTransport, + address: TransportAddress): TransportAddress = + let + fixedAddress = + when defined(windows): + windowsAnyAddressFix(address) + else: + address + remoteAddress = + if ServerFlags.V4Mapped in transp.flags: + if address.family == AddressFamily.IPv4: + fixedAddress.toIPv6() + else: + fixedAddress + else: + fixedAddress + toSAddr(remoteAddress, transp.waddr, transp.walen) + remoteAddress + +proc remoteAddress2*( + transp: DatagramTransport +): Result[TransportAddress, OSErrorCode] = ## Returns ``transp`` remote socket address. if transp.remote.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportOsError(osLastError()) - fromSAddr(addr saddr, slen, transp.remote) - transp.remote + return err(osLastError()) + transp.remote = transp.getRemoteAddress(saddr, slen) + ok(transp.remote) -proc localAddress*(transp: DatagramTransport): TransportAddress {. - raises: [TransportOsError].} = +proc localAddress2*( + transp: DatagramTransport +): Result[TransportAddress, OSErrorCode] = ## Returns ``transp`` local socket address. if transp.local.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportOsError(osLastError()) + return err(osLastError()) fromSAddr(addr saddr, slen, transp.local) - transp.local + ok(transp.local) + +func toException(v: OSErrorCode): ref TransportOsError = + getTransportOsError(v) + +proc remoteAddress*(transp: DatagramTransport): TransportAddress {. + raises: [TransportOsError].} = + ## Returns ``transp`` remote socket address. + remoteAddress2(transp).tryGet() + +proc localAddress*(transp: DatagramTransport): TransportAddress {. + raises: [TransportOsError].} = + ## Returns ``transp`` remote socket address. + localAddress2(transp).tryGet() template setReadError(t, e: untyped) = (t).state.incl(ReadError) @@ -124,8 +176,8 @@ when defined(windows): transp.setWriterWSABuffer(vector) let ret = if vector.kind == WithAddress: - var fixedAddress = windowsAnyAddressFix(vector.address) - toSAddr(fixedAddress, transp.waddr, transp.walen) + # We only need `Sockaddr_storage` data here, so result discarded. + discard transp.setRemoteAddress(vector.address) wsaSendTo(fd, addr transp.wwsabuf, DWORD(1), addr bytesCount, DWORD(0), cast[ptr SockAddr](addr transp.waddr), cint(transp.walen), @@ -159,22 +211,24 @@ when defined(windows): proc readDatagramLoop(udata: pointer) = var bytesCount: uint32 - raddr: TransportAddress - var ovl = cast[PtrCustomOverlapped](udata) - var transp = cast[DatagramTransport](ovl.data.udata) + ovl = cast[PtrCustomOverlapped](udata) + + let transp = cast[DatagramTransport](ovl.data.udata) + while true: if ReadPending in transp.state: ## Continuation transp.state.excl(ReadPending) - let err = transp.rovl.data.errCode + let + err = transp.rovl.data.errCode + remoteAddress = transp.getRemoteAddress() case err of OSErrorCode(-1): let bytesCount = transp.rovl.data.bytesCount if bytesCount == 0: transp.state.incl({ReadEof, ReadPaused}) - fromSAddr(addr transp.raddr, transp.ralen, raddr) transp.buflen = int(bytesCount) - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, remoteAddress) of ERROR_OPERATION_ABORTED: # CancelIO() interrupt or closeSocket() call. transp.state.incl(ReadPaused) @@ -189,7 +243,7 @@ when defined(windows): transp.setReadError(err) transp.state.incl(ReadPaused) transp.buflen = 0 - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, remoteAddress) else: ## Initiation if transp.state * {ReadEof, ReadClosed, ReadError} == {}: @@ -220,7 +274,7 @@ when defined(windows): transp.state.incl(ReadPaused) transp.setReadError(err) transp.buflen = 0 - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, transp.getRemoteAddress()) else: # Transport closure happens in callback, and we not started new # WSARecvFrom session. @@ -341,18 +395,25 @@ when defined(windows): closeSocket(localSock) raiseTransportOsError(err) + res.flags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res = flags + if (local.family == AddressFamily.IPv6) and local.isAnyLocal(): + if dualstack in {DualStackType.Enabled, DualStackType.Auto}: + res.incl(ServerFlags.V4Mapped) + res + if remote.port != Port(0): - var fixedAddress = windowsAnyAddressFix(remote) - var saddr: Sockaddr_storage - var slen: SockLen - toSAddr(fixedAddress, saddr, slen) - if connect(SocketHandle(localSock), cast[ptr SockAddr](addr saddr), - slen) != 0: + let remoteAddress = res.setRemoteAddress(remote) + if connect(SocketHandle(localSock), cast[ptr SockAddr](addr res.waddr), + res.walen) != 0: let err = osLastError() if sock == asyncInvalidSocket: closeSocket(localSock) raiseTransportOsError(err) - res.remote = fixedAddress + res.remote = remoteAddress res.fd = localSock res.function = cbproc @@ -362,12 +423,12 @@ when defined(windows): res.state = {ReadPaused, WritePaused} res.future = Future[void].Raising([]).init( "datagram.transport", {FutureFlag.OwnCancelSchedule}) - res.rovl.data = CompletionData(cb: readDatagramLoop, - udata: cast[pointer](res)) - res.wovl.data = CompletionData(cb: writeDatagramLoop, - udata: cast[pointer](res)) - res.rwsabuf = WSABUF(buf: cast[cstring](baseAddr res.buffer), - len: ULONG(len(res.buffer))) + res.rovl.data = CompletionData( + cb: readDatagramLoop, udata: cast[pointer](res)) + res.wovl.data = CompletionData( + cb: writeDatagramLoop, udata: cast[pointer](res)) + res.rwsabuf = WSABUF( + buf: cast[cstring](baseAddr res.buffer), len: ULONG(len(res.buffer))) GC_ref(res) # Start tracking transport trackCounter(DgramTransportTrackerName) @@ -380,10 +441,10 @@ else: # Linux/BSD/MacOS part proc readDatagramLoop(udata: pointer) {.raises: [].}= - var raddr: TransportAddress doAssert(not isNil(udata)) - let transp = cast[DatagramTransport](udata) - let fd = SocketHandle(transp.fd) + let + transp = cast[DatagramTransport](udata) + fd = SocketHandle(transp.fd) if int(fd) == 0: ## This situation can be happen, when there events present ## after transport was closed. @@ -398,9 +459,8 @@ else: cast[ptr SockAddr](addr transp.raddr), addr transp.ralen) if res >= 0: - fromSAddr(addr transp.raddr, transp.ralen, raddr) transp.buflen = res - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, transp.getRemoteAddress()) else: let err = osLastError() case err @@ -409,14 +469,15 @@ else: else: transp.buflen = 0 transp.setReadError(err) - asyncSpawn transp.function(transp, raddr) + asyncSpawn transp.function(transp, transp.getRemoteAddress()) break proc writeDatagramLoop(udata: pointer) = var res: int doAssert(not isNil(udata)) - var transp = cast[DatagramTransport](udata) - let fd = SocketHandle(transp.fd) + let + transp = cast[DatagramTransport](udata) + fd = SocketHandle(transp.fd) if int(fd) == 0: ## This situation can be happen, when there events present ## after transport was closed. @@ -428,7 +489,8 @@ else: let vector = transp.queue.popFirst() while true: if vector.kind == WithAddress: - toSAddr(vector.address, transp.waddr, transp.walen) + # We only need `Sockaddr_storage` data here, so result discarded. + discard transp.setRemoteAddress(vector.address) res = osdefs.sendto(fd, vector.buf, vector.buflen, MSG_NOSIGNAL, cast[ptr SockAddr](addr transp.waddr), transp.walen) @@ -551,21 +613,28 @@ else: closeSocket(localSock) raiseTransportOsError(err) + res.flags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res = flags + if (local.family == AddressFamily.IPv6) and local.isAnyLocal(): + if dualstack != DualStackType.Disabled: + res.incl(ServerFlags.V4Mapped) + res + if remote.port != Port(0): - var saddr: Sockaddr_storage - var slen: SockLen - toSAddr(remote, saddr, slen) - if connect(SocketHandle(localSock), cast[ptr SockAddr](addr saddr), - slen) != 0: + let remoteAddress = res.setRemoteAddress(remote) + if connect(SocketHandle(localSock), cast[ptr SockAddr](addr res.waddr), + res.walen) != 0: let err = osLastError() if sock == asyncInvalidSocket: closeSocket(localSock) raiseTransportOsError(err) - res.remote = remote + res.remote = remoteAddress res.fd = localSock res.function = cbproc - res.flags = flags res.buffer = newSeq[byte](bufferSize) res.queue = initDeque[GramVector]() res.udata = udata @@ -605,6 +674,24 @@ proc close*(transp: DatagramTransport) = transp.state.incl({WriteClosed, ReadClosed}) closeSocket(transp.fd, continuation) +proc getTransportAddresses( + local, remote: Opt[IpAddress], + localPort, remotePort: Port +): tuple[local: TransportAddress, remote: TransportAddress] = + let + (localAuto, remoteAuto) = getAutoAddresses(localPort, remotePort) + lres = + if local.isSome(): + initTAddress(local.get(), localPort) + else: + localAuto + rres = + if remote.isSome(): + initTAddress(remote.get(), remotePort) + else: + remoteAuto + (lres, rres) + proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback, remote: TransportAddress, local: TransportAddress, @@ -824,6 +911,92 @@ proc newDatagramTransport6*[T](cbproc: UnsafeDatagramCallback, cast[pointer](udata), child, bufSize, ttl, dualstack) +proc newDatagramTransport*(cbproc: DatagramCallback, + localPort: Port, + remotePort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + remote: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: pointer = nil, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + ## Create new UDP datagram transport (IPv6) and bind it to ANY_ADDRESS. + ## Depending on OS settings procedure perform an attempt to create transport + ## using IPv6 ANY_ADDRESS, if its not available it will try to bind transport + ## to IPv4 ANY_ADDRESS. + ## + ## ``cbproc`` - callback which will be called, when new datagram received. + ## ``localPort`` - local peer's port number. + ## ``remotePort`` - remote peer's port number. + ## ``local`` - optional local peer's IPv4/IPv6 address. + ## ``remote`` - optional remote peer's IPv4/IPv6 address. + ## ``sock`` - application-driven socket to use. + ## ``flags`` - flags that will be applied to socket. + ## ``udata`` - custom argument which will be passed to ``cbproc``. + ## ``bufSize`` - size of internal buffer. + ## ``ttl`` - TTL for UDP datagram packet (only usable when flags has + ## ``Broadcast`` option). + let + (localHost, remoteHost) = + getTransportAddresses(local, remote, localPort, remotePort) + newDatagramTransportCommon(cbproc, remoteHost, localHost, asyncInvalidSocket, + flags, cast[pointer](udata), child, bufSize, + ttl, dualstack) + +proc newDatagramTransport*(cbproc: DatagramCallback, + localPort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: pointer = nil, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + newDatagramTransport(cbproc, localPort, Port(0), local, Opt.none(IpAddress), + flags, udata, child, bufSize, ttl, dualstack) + +proc newDatagramTransport*[T](cbproc: DatagramCallback, + localPort: Port, + remotePort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + remote: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + let + (localHost, remoteHost) = + getTransportAddresses(local, remote, localPort, remotePort) + fflags = flags + {GCUserData} + GC_ref(udata) + newDatagramTransportCommon(cbproc, remoteHost, localHost, asyncInvalidSocket, + fflags, cast[pointer](udata), child, bufSize, ttl, + dualstack) + +proc newDatagramTransport*[T](cbproc: DatagramCallback, + localPort: Port, + local: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + child: DatagramTransport = nil, + bufSize: int = DefaultDatagramBufferSize, + ttl: int = 0, + dualstack = DualStackType.Auto + ): DatagramTransport {. + raises: [TransportOsError].} = + newDatagramTransport(cbproc, localPort, Port(0), local, Opt.none(IpAddress), + flags, udata, child, bufSize, ttl, dualstack) + proc join*(transp: DatagramTransport): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Wait until the transport ``transp`` will be closed. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 0f006b8d7..f4ef1adf7 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -11,8 +11,11 @@ import std/deques import stew/ptrops +import results import ".."/[asyncloop, config, handles, bipbuffer, osdefs, osutils, oserrno] -import ./common +import ./[common, ipnet] + +export results type VectorKind = enum @@ -48,7 +51,8 @@ type # get stuck on transport `close()`. # Please use this flag only if you are making both client and server in # the same thread. - TcpNoDelay # deprecated: Use SocketFlags.TcpNoDelay + TcpNoDelay, # deprecated: Use SocketFlags.TcpNoDelay + V4Mapped SocketFlags* {.pure.} = enum TcpNoDelay, @@ -101,6 +105,7 @@ else: error: ref TransportError # Current error queue: Deque[StreamVector] # Writer queue future: Future[void].Raising([]) # Stream life future + flags: set[TransportFlags] # Internal flags case kind*: TransportKind of TransportKind.Socket: domain: Domain # Socket transport domain (IPv4/IPv6) @@ -138,31 +143,59 @@ type init*: TransportInitCallback # callback which will be called before # transport for new client -proc remoteAddress*(transp: StreamTransport): TransportAddress {. - raises: [TransportOsError].} = +proc getRemoteAddress(transp: StreamTransport, + address: Sockaddr_storage, length: SockLen, + ): TransportAddress = + var raddr: TransportAddress + fromSAddr(unsafeAddr address, length, raddr) + if TransportFlags.V4Mapped in transp.flags: + if raddr.isV4Mapped(): raddr.toIPv4() else: raddr + else: + raddr + +proc remoteAddress2*( + transp: StreamTransport +): Result[TransportAddress, OSErrorCode] = ## Returns ``transp`` remote socket address. - doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") if transp.remote.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) if getpeername(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportOsError(osLastError()) - fromSAddr(addr saddr, slen, transp.remote) - transp.remote + return err(osLastError()) + transp.remote = transp.getRemoteAddress(saddr, slen) + ok(transp.remote) -proc localAddress*(transp: StreamTransport): TransportAddress {. - raises: [TransportOsError].} = +proc localAddress2*( + transp: StreamTransport +): Result[TransportAddress, OSErrorCode] = ## Returns ``transp`` local socket address. - doAssert(transp.kind == TransportKind.Socket, "Socket transport required!") if transp.local.family == AddressFamily.None: - var saddr: Sockaddr_storage - var slen = SockLen(sizeof(saddr)) + var + saddr: Sockaddr_storage + slen = SockLen(sizeof(saddr)) if getsockname(SocketHandle(transp.fd), cast[ptr SockAddr](addr saddr), addr slen) != 0: - raiseTransportOsError(osLastError()) + return err(osLastError()) fromSAddr(addr saddr, slen, transp.local) - transp.local + ok(transp.local) + +# TODO(cheatfate): This function should not be public, but for some weird +# reason if we will make it non-public it start generate +# Hint: 'toException' is declared but not used [XDeclaredButNotUsed] +func toException*(v: OSErrorCode): ref TransportOsError = + getTransportOsError(v) + +proc remoteAddress*(transp: StreamTransport): TransportAddress {. + raises: [TransportOsError].} = + ## Returns ``transp`` remote socket address. + remoteAddress2(transp).tryGet() + +proc localAddress*(transp: StreamTransport): TransportAddress {. + raises: [TransportOsError].} = + ## Returns ``transp`` remote socket address. + localAddress2(transp).tryGet() proc localAddress*(server: StreamServer): TransportAddress = ## Returns ``server`` bound local socket address. @@ -220,6 +253,12 @@ proc clean(transp: StreamTransport) {.inline.} = template toUnchecked*(a: untyped): untyped = cast[ptr UncheckedArray[byte]](a) +func getTransportFlags(server: StreamServer): set[TransportFlags] = + if ServerFlags.V4Mapped in server.flags: + {TransportFlags.V4Mapped} + else: + {} + when defined(windows): template zeroOvelappedOffset(t: untyped) = @@ -574,13 +613,15 @@ when defined(windows): break proc newStreamSocketTransport(sock: AsyncFD, bufsize: int, - child: StreamTransport): StreamTransport = + child: StreamTransport, + flags: set[TransportFlags]): StreamTransport = var transp: StreamTransport if not(isNil(child)): transp = child else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock + transp.flags = flags transp.rovl.data = CompletionData(cb: readStreamLoop, udata: cast[pointer](transp)) transp.wovl.data = CompletionData(cb: writeStreamLoop, @@ -617,25 +658,27 @@ when defined(windows): GC_ref(transp) transp - proc bindToDomain(handle: AsyncFD, domain: Domain): bool = - if domain == Domain.AF_INET6: + proc bindToDomain(handle: AsyncFD, + family: AddressFamily): Result[void, OSErrorCode] = + case family + of AddressFamily.IPv6: var saddr: Sockaddr_in6 saddr.sin6_family = type(saddr.sin6_family)(osdefs.AF_INET6) if osdefs.bindSocket(SocketHandle(handle), cast[ptr SockAddr](addr(saddr)), sizeof(saddr).SockLen) != 0'i32: - return false - true - elif domain == Domain.AF_INET: + return err(osLastError()) + ok() + of AddressFamily.IPv4: var saddr: Sockaddr_in saddr.sin_family = type(saddr.sin_family)(osdefs.AF_INET) if osdefs.bindSocket(SocketHandle(handle), cast[ptr SockAddr](addr(saddr)), sizeof(saddr).SockLen) != 0'i32: - return false - true + return err(osLastError()) + ok() else: - raiseAssert "Unsupported domain" + raiseAssert "Unsupported family" proc connect*(address: TransportAddress, bufferSize = DefaultStreamBufferSize, @@ -691,26 +734,36 @@ when defined(windows): retFuture.fail(getTransportOsError(error)) return retFuture - if localAddress != TransportAddress(): - if localAddress.family != address.family: - sock.closeSocket() - retFuture.fail(newException(TransportOsError, - "connect local address domain is not equal to target address domain")) - return retFuture + let transportFlags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res: set[TransportFlags] + if (localAddress.family == AddressFamily.IPv6) and + localAddress.isAnyLocal(): + if dualstack in {DualStackType.Enabled, DualStackType.Auto}: + res.incl(TransportFlags.V4Mapped) + res + + case localAddress.family + of AddressFamily.IPv4, AddressFamily.IPv6: var - localAddr: Sockaddr_storage - localAddrLen: SockLen - localAddress.toSAddr(localAddr, localAddrLen) + saddr: Sockaddr_storage + slen: SockLen + toSAddr(localAddress, saddr, slen) if bindSocket(SocketHandle(sock), - cast[ptr SockAddr](addr localAddr), localAddrLen) != 0: + cast[ptr SockAddr](addr saddr), slen) != 0: sock.closeSocket() retFuture.fail(getTransportOsError(osLastError())) return retFuture - elif not(bindToDomain(sock, raddress.getDomain())): - let err = wsaGetLastError() - sock.closeSocket() - retFuture.fail(getTransportOsError(err)) - return retFuture + of AddressFamily.Unix: + raiseAssert "Unsupported local address family" + of AddressFamily.None: + let res = bindToDomain(sock, raddress.family) + if res.isErr(): + sock.closeSocket() + retFuture.fail(getTransportOsError(res.error)) + return retFuture proc socketContinuation(udata: pointer) {.gcsafe.} = var ovl = cast[RefCustomOverlapped](udata) @@ -723,7 +776,8 @@ when defined(windows): sock.closeSocket() retFuture.fail(getTransportOsError(err)) else: - let transp = newStreamSocketTransport(sock, bufferSize, child) + let transp = newStreamSocketTransport(sock, bufferSize, child, + transportFlags) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(transp) @@ -949,10 +1003,12 @@ when defined(windows): let transp = server.init(server, server.asock) ntransp = newStreamSocketTransport(server.asock, server.bufferSize, - transp) + transp, + server.getTransportFlags()) else: ntransp = newStreamSocketTransport(server.asock, - server.bufferSize, nil) + server.bufferSize, nil, + server.getTransportFlags()) # Start tracking transport trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) @@ -1090,10 +1146,12 @@ when defined(windows): let transp = server.init(server, server.asock) ntransp = newStreamSocketTransport(server.asock, server.bufferSize, - transp) + transp, + server.getTransportFlags()) else: ntransp = newStreamSocketTransport(server.asock, - server.bufferSize, nil) + server.bufferSize, nil, + server.getTransportFlags()) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) @@ -1446,7 +1504,8 @@ else: break proc newStreamSocketTransport(sock: AsyncFD, bufsize: int, - child: StreamTransport): StreamTransport = + child: StreamTransport, + flags: set[TransportFlags]): StreamTransport = var transp: StreamTransport if not(isNil(child)): transp = child @@ -1454,6 +1513,7 @@ else: transp = StreamTransport(kind: TransportKind.Socket) transp.fd = sock + transp.flags = flags let size = max(bufsize, DefaultStreamBufferSize) transp.buffer = BipBuffer.init(size) transp.state = {ReadPaused, WritePaused} @@ -1535,21 +1595,30 @@ else: retFuture.fail(getTransportOsError(error)) return retFuture - if localAddress != TransportAddress(): - if localAddress.family != address.family: - sock.closeSocket() - retFuture.fail(newException(TransportOsError, - "connect local address domain is not equal to target address domain")) - return retFuture + let transportFlags = + block: + # Add `V4Mapped` flag when `::` address is used and dualstack is + # set to enabled or auto. + var res: set[TransportFlags] + if (localAddress.family == AddressFamily.IPv6) and + localAddress.isAnyLocal(): + if dualstack != DualStackType.Disabled: + res.incl(TransportFlags.V4Mapped) + res + + case localAddress.family + of AddressFamily.IPv4, AddressFamily.IPv6, AddressFamily.Unix: var - localAddr: Sockaddr_storage - localAddrLen: SockLen - localAddress.toSAddr(localAddr, localAddrLen) + lsaddr: Sockaddr_storage + lslen: SockLen + toSAddr(localAddress, lsaddr, lslen) if bindSocket(SocketHandle(sock), - cast[ptr SockAddr](addr localAddr), localAddrLen) != 0: + cast[ptr SockAddr](addr lsaddr), lslen) != 0: sock.closeSocket() retFuture.fail(getTransportOsError(osLastError())) return retFuture + of AddressFamily.None: + discard proc continuation(udata: pointer) = if not(retFuture.finished()): @@ -1568,7 +1637,8 @@ else: retFuture.fail(getTransportOsError(OSErrorCode(err))) return - let transp = newStreamSocketTransport(sock, bufferSize, child) + let transp = newStreamSocketTransport(sock, bufferSize, child, + transportFlags) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(transp) @@ -1581,7 +1651,8 @@ else: let res = osdefs.connect(SocketHandle(sock), cast[ptr SockAddr](addr saddr), slen) if res == 0: - let transp = newStreamSocketTransport(sock, bufferSize, child) + let transp = newStreamSocketTransport(sock, bufferSize, child, + transportFlags) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(transp) @@ -1634,9 +1705,11 @@ else: let ntransp = if not(isNil(server.init)): let transp = server.init(server, sock) - newStreamSocketTransport(sock, server.bufferSize, transp) + newStreamSocketTransport(sock, server.bufferSize, transp, + server.getTransportFlags()) else: - newStreamSocketTransport(sock, server.bufferSize, nil) + newStreamSocketTransport(sock, server.bufferSize, nil, + server.getTransportFlags()) trackCounter(StreamTransportTrackerName) asyncSpawn server.function(server, ntransp) else: @@ -1724,9 +1797,11 @@ else: let ntransp = if not(isNil(server.init)): let transp = server.init(server, sock) - newStreamSocketTransport(sock, server.bufferSize, transp) + newStreamSocketTransport(sock, server.bufferSize, transp, + server.getTransportFlags()) else: - newStreamSocketTransport(sock, server.bufferSize, nil) + newStreamSocketTransport(sock, server.bufferSize, nil, + server.getTransportFlags()) # Start tracking transport trackCounter(StreamTransportTrackerName) retFuture.complete(ntransp) @@ -1879,166 +1954,196 @@ proc createStreamServer*(host: TransportAddress, ## ``child`` - existing object ``StreamServer``object to initialize, can be ## used to initalize ``StreamServer`` inherited objects. ## ``udata`` - user-defined pointer. - var - saddr: Sockaddr_storage - slen: SockLen - serverSocket: AsyncFD - localAddress: TransportAddress - - when defined(nimdoc): - discard - elif defined(windows): - # Windows - if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - serverSocket = + let (serverSocket, localAddress, serverFlags) = + when defined(windows): + # Windows + if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: + var + saddr: Sockaddr_storage + slen: SockLen + laddress: TransportAddress + + let sockres = + if sock == asyncInvalidSocket: + # TODO (cheatfate): `valueOr` generates weird compile error. + let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, + Protocol.IPPROTO_TCP) + if res.isErr(): + raiseTransportOsError(res.error()) + res.get() + else: + setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + raiseTransportOsError(error) + register2(sock).isOkOr: + raiseTransportOsError(error) + sock + # SO_REUSEADDR + if ServerFlags.ReuseAddr in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + # SO_REUSEPORT + if ServerFlags.ReusePort in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + # TCP_NODELAY + if ServerFlags.TcpNoDelay in flags: + setSockOpt2(sockres, osdefs.IPPROTO_TCP, + osdefs.TCP_NODELAY, 1).isOkOr: + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + # IPV6_V6ONLY. + if sock == asyncInvalidSocket: + setDualstack(sockres, host.family, dualstack).isOkOr: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + else: + setDualstack(sockres, dualstack).isOkOr: + raiseTransportOsError(error) + + let flagres = + block: + var res = flags + if (host.family == AddressFamily.IPv6) and host.isAnyLocal(): + if dualstack in {DualStackType.Enabled, DualStackType.Auto}: + res.incl(ServerFlags.V4Mapped) + res + + host.toSAddr(saddr, slen) + + if bindSocket(SocketHandle(sockres), + cast[ptr SockAddr](addr saddr), slen) != 0: + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(err) + + slen = SockLen(sizeof(saddr)) + + if getsockname(SocketHandle(sockres), cast[ptr SockAddr](addr saddr), + addr slen) != 0: + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(err) + + fromSAddr(addr saddr, slen, laddress) + + if listen(SocketHandle(sockres), getBacklogSize(backlog)) != 0: + let err = osLastError() + if sock == asyncInvalidSocket: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(err) + + (sockres, laddress, flagres) + elif host.family == AddressFamily.Unix: + (AsyncFD(0), host, flags) + else: + raiseAssert "Incorrect host address family" + else: + # Posix + var + saddr: Sockaddr_storage + slen: SockLen + laddress: TransportAddress + + let sockres = if sock == asyncInvalidSocket: + let proto = if host.family == AddressFamily.Unix: + Protocol.IPPROTO_IP + else: + Protocol.IPPROTO_TCP # TODO (cheatfate): `valueOr` generates weird compile error. let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, - Protocol.IPPROTO_TCP) + proto) if res.isErr(): raiseTransportOsError(res.error()) res.get() else: - setDescriptorBlocking(SocketHandle(sock), false).isOkOr: + setDescriptorFlags(cint(sock), true, true).isOkOr: raiseTransportOsError(error) register2(sock).isOkOr: raiseTransportOsError(error) sock - # SO_REUSEADDR - if ServerFlags.ReuseAddr in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: - if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - # SO_REUSEPORT - if ServerFlags.ReusePort in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: - if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - # TCP_NODELAY - if ServerFlags.TcpNoDelay in flags: - setSockOpt2(serverSocket, osdefs.IPPROTO_TCP, - osdefs.TCP_NODELAY, 1).isOkOr: - if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - # IPV6_V6ONLY. - if sock == asyncInvalidSocket: - setDualstack(serverSocket, host.family, dualstack).isOkOr: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - else: - setDualstack(serverSocket, dualstack).isOkOr: - raiseTransportOsError(error) + + if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: + # SO_REUSEADDR + if ServerFlags.ReuseAddr in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: + if sock == asyncInvalidSocket: + discard unregisterAndCloseFd(sockres) + raiseTransportOsError(error) + # SO_REUSEPORT + if ServerFlags.ReusePort in flags: + setSockOpt2(sockres, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: + if sock == asyncInvalidSocket: + discard unregisterAndCloseFd(sockres) + raiseTransportOsError(error) + # TCP_NODELAY + if ServerFlags.TcpNoDelay in flags: + setSockOpt2(sockres, osdefs.IPPROTO_TCP, + osdefs.TCP_NODELAY, 1).isOkOr: + if sock == asyncInvalidSocket: + discard unregisterAndCloseFd(sockres) + raiseTransportOsError(error) + # IPV6_V6ONLY + if sock == asyncInvalidSocket: + setDualstack(sockres, host.family, dualstack).isOkOr: + discard closeFd(SocketHandle(sockres)) + raiseTransportOsError(error) + else: + setDualstack(sockres, dualstack).isOkOr: + raiseTransportOsError(error) + + elif host.family in {AddressFamily.Unix}: + # We do not care about result here, because if file cannot be removed, + # `bindSocket` will return EADDRINUSE. + discard osdefs.unlink(cast[cstring](baseAddr host.address_un)) + + let flagres = + block: + var res = flags + if (host.family == AddressFamily.IPv6) and host.isAnyLocal(): + if dualstack != DualStackType.Disabled: + res.incl(ServerFlags.V4Mapped) + res host.toSAddr(saddr, slen) - if bindSocket(SocketHandle(serverSocket), - cast[ptr SockAddr](addr saddr), slen) != 0: + + if osdefs.bindSocket(SocketHandle(sockres), + cast[ptr SockAddr](addr saddr), slen) != 0: let err = osLastError() if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) + discard unregisterAndCloseFd(sockres) raiseTransportOsError(err) + # Obtain real address slen = SockLen(sizeof(saddr)) - if getsockname(SocketHandle(serverSocket), cast[ptr SockAddr](addr saddr), + if getsockname(SocketHandle(sockres), cast[ptr SockAddr](addr saddr), addr slen) != 0: let err = osLastError() if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) + discard unregisterAndCloseFd(sockres) raiseTransportOsError(err) - fromSAddr(addr saddr, slen, localAddress) - if listen(SocketHandle(serverSocket), getBacklogSize(backlog)) != 0: + fromSAddr(addr saddr, slen, laddress) + + if listen(SocketHandle(sockres), getBacklogSize(backlog)) != 0: let err = osLastError() if sock == asyncInvalidSocket: - discard closeFd(SocketHandle(serverSocket)) + discard unregisterAndCloseFd(sockres) raiseTransportOsError(err) - elif host.family == AddressFamily.Unix: - serverSocket = AsyncFD(0) - else: - # Posix - serverSocket = - if sock == asyncInvalidSocket: - let proto = if host.family == AddressFamily.Unix: - Protocol.IPPROTO_IP - else: - Protocol.IPPROTO_TCP - # TODO (cheatfate): `valueOr` generates weird compile error. - let res = createAsyncSocket2(host.getDomain(), SockType.SOCK_STREAM, - proto) - if res.isErr(): - raiseTransportOsError(res.error()) - res.get() - else: - setDescriptorFlags(cint(sock), true, true).isOkOr: - raiseTransportOsError(error) - register2(sock).isOkOr: - raiseTransportOsError(error) - sock - - if host.family in {AddressFamily.IPv4, AddressFamily.IPv6}: - # SO_REUSEADDR - if ServerFlags.ReuseAddr in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEADDR, 1).isOkOr: - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(error) - # SO_REUSEPORT - if ServerFlags.ReusePort in flags: - setSockOpt2(serverSocket, SOL_SOCKET, SO_REUSEPORT, 1).isOkOr: - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(error) - # TCP_NODELAY - if ServerFlags.TcpNoDelay in flags: - setSockOpt2(serverSocket, osdefs.IPPROTO_TCP, - osdefs.TCP_NODELAY, 1).isOkOr: - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(error) - # IPV6_V6ONLY - if sock == asyncInvalidSocket: - setDualstack(serverSocket, host.family, dualstack).isOkOr: - discard closeFd(SocketHandle(serverSocket)) - raiseTransportOsError(error) - else: - setDualstack(serverSocket, dualstack).isOkOr: - raiseTransportOsError(error) - - elif host.family in {AddressFamily.Unix}: - # We do not care about result here, because if file cannot be removed, - # `bindSocket` will return EADDRINUSE. - discard osdefs.unlink(cast[cstring](baseAddr host.address_un)) - - host.toSAddr(saddr, slen) - if osdefs.bindSocket(SocketHandle(serverSocket), - cast[ptr SockAddr](addr saddr), slen) != 0: - let err = osLastError() - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) - # Obtain real address - slen = SockLen(sizeof(saddr)) - if getsockname(SocketHandle(serverSocket), cast[ptr SockAddr](addr saddr), - addr slen) != 0: - let err = osLastError() - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) - fromSAddr(addr saddr, slen, localAddress) - - if listen(SocketHandle(serverSocket), getBacklogSize(backlog)) != 0: - let err = osLastError() - if sock == asyncInvalidSocket: - discard unregisterAndCloseFd(serverSocket) - raiseTransportOsError(err) + (sockres, laddress, flagres) var sres = if not(isNil(child)): child else: StreamServer() sres.sock = serverSocket - sres.flags = flags + sres.flags = serverFlags sres.function = cbproc sres.init = init sres.bufferSize = bufferSize @@ -2048,9 +2153,7 @@ proc createStreamServer*(host: TransportAddress, {FutureFlag.OwnCancelSchedule}) sres.udata = udata sres.dualstack = dualstack - if localAddress.family == AddressFamily.None: - sres.local = host - else: + if localAddress.family != AddressFamily.None: sres.local = localAddress when defined(windows): @@ -2115,6 +2218,52 @@ proc createStreamServer*(host: TransportAddress, createStreamServer(host, StreamCallback2(nil), flags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) +proc createStreamServer*(port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, StreamCallback2(nil), flags, sock, + backlog, bufferSize, child, init, cast[pointer](udata), + dualstack) + +proc createStreamServer*(cbproc: StreamCallback2, + port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + udata: pointer = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, cbproc, flags, sock, backlog, + bufferSize, child, init, cast[pointer](udata), dualstack) + proc createStreamServer*[T](host: TransportAddress, cbproc: StreamCallback2, flags: set[ServerFlags] = {}, @@ -2163,6 +2312,56 @@ proc createStreamServer*[T](host: TransportAddress, createStreamServer(host, StreamCallback2(nil), fflags, sock, backlog, bufferSize, child, init, cast[pointer](udata), dualstack) +proc createStreamServer*[T](cbproc: StreamCallback2, + port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let fflags = flags + {GCUserData} + GC_ref(udata) + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, cbproc, fflags, sock, backlog, + bufferSize, child, init, cast[pointer](udata), dualstack) + +proc createStreamServer*[T](port: Port, + host: Opt[IpAddress] = Opt.none(IpAddress), + flags: set[ServerFlags] = {}, + udata: ref T, + sock: AsyncFD = asyncInvalidSocket, + backlog: int = DefaultBacklogSize, + bufferSize: int = DefaultStreamBufferSize, + child: StreamServer = nil, + init: TransportInitCallback = nil, + dualstack = DualStackType.Auto): StreamServer {. + raises: [TransportOsError].} = + ## Create stream server which will be bound to: + ## 1. IPv6 address `::`, if IPv6 is available + ## 2. IPv4 address `0.0.0.0`, if IPv6 is not available. + let fflags = flags + {GCUserData} + GC_ref(udata) + let hostname = + if host.isSome(): + initTAddress(host.get(), port) + else: + getAutoAddress(port) + createStreamServer(hostname, StreamCallback2(nil), fflags, sock, + backlog, bufferSize, child, init, cast[pointer](udata), + dualstack) + proc getUserData*[T](server: StreamServer): T {.inline.} = ## Obtain user data stored in ``server`` object. cast[T](server.udata) diff --git a/tests/testdatagram.nim b/tests/testdatagram.nim index 7b27c3431..38c10ac8e 100644 --- a/tests/testdatagram.nim +++ b/tests/testdatagram.nim @@ -32,6 +32,10 @@ suite "Datagram Transport test suite": m8 = "Bounded multiple clients with messages (" & $ClientsCount & " clients x " & $MessagesCount & " messages)" + type + DatagramSocketType {.pure.} = enum + Bound, Unbound + proc client1(transp: DatagramTransport, raddr: TransportAddress): Future[void] {.async: (raises: []).} = try: @@ -628,6 +632,243 @@ suite "Datagram Transport test suite": await allFutures(sdgram.closeWait(), cdgram.closeWait()) res == 1 + proc performAutoAddressTest(port: Port, + family: AddressFamily): Future[bool] {.async.} = + var + expectRequest1 = "AUTO REQUEST1" + expectRequest2 = "AUTO REQUEST2" + expectResponse = "AUTO RESPONSE" + mappedResponse = "MAPPED RESPONSE" + event = newAsyncEvent() + event2 = newAsyncEvent() + res = 0 + + proc process1(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + try: + var + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectRequest1: + inc(res) + await noCancel transp.sendTo( + raddr, addr expectResponse[0], len(expectResponse)) + elif smsg == expectRequest2: + inc(res) + await noCancel transp.sendTo( + raddr, addr mappedResponse[0], len(mappedResponse)) + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + proc process2(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + try: + var + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectResponse: + inc(res) + event.fire() + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + proc process3(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + try: + var + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == mappedResponse: + inc(res) + event2.fire() + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + let sdgram = + block: + var res: DatagramTransport + var currentPort = port + for i in 0 ..< 10: + res = + try: + newDatagramTransport(process1, currentPort, + flags = {ServerFlags.ReusePort}) + except TransportOsError: + echo "Unable to create transport on port ", currentPort + currentPort = Port(uint16(currentPort) + 1'u16) + nil + if not(isNil(res)): + break + doAssert(not(isNil(res)), "Unable to create transport, giving up") + res + + var + address = + case family + of AddressFamily.IPv4: + initTAddress("127.0.0.1:0") + of AddressFamily.IPv6: + initTAddress("::1:0") + of AddressFamily.Unix, AddressFamily.None: + raiseAssert "Not allowed" + + let + cdgram = + case family + of AddressFamily.IPv4: + newDatagramTransport(process2, local = address) + of AddressFamily.IPv6: + newDatagramTransport6(process2, local = address) + of AddressFamily.Unix, AddressFamily.None: + raiseAssert "Not allowed" + + address.port = sdgram.localAddress().port + + try: + await noCancel cdgram.sendTo( + address, addr expectRequest1[0], len(expectRequest1)) + except TransportError: + discard + + if family == AddressFamily.IPv6: + var remote = initTAddress("127.0.0.1:0") + remote.port = sdgram.localAddress().port + let wtransp = + newDatagramTransport(process3, local = initTAddress("0.0.0.0:0")) + try: + await noCancel wtransp.sendTo( + remote, addr expectRequest2[0], len(expectRequest2)) + except TransportError as exc: + raiseAssert "Got transport error, reason = " & $exc.msg + + try: + await event2.wait().wait(1.seconds) + except CatchableError: + discard + + await wtransp.closeWait() + + try: + await event.wait().wait(1.seconds) + except CatchableError: + discard + + await allFutures(sdgram.closeWait(), cdgram.closeWait()) + + if family == AddressFamily.IPv4: + res == 2 + else: + res == 4 + + proc performAutoAddressTest2( + address1: Opt[IpAddress], + address2: Opt[IpAddress], + port: Port, + sendType: AddressFamily, + boundType: DatagramSocketType + ): Future[bool] {.async.} = + let + expectRequest = "TEST REQUEST" + expectResponse = "TEST RESPONSE" + event = newAsyncEvent() + var res = 0 + + proc process1(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + if raddr.family != sendType: + raiseAssert "Incorrect address family received [" & $raddr & + "], expected [" & $sendType & "]" + try: + let + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectRequest: + inc(res) + await noCancel transp.sendTo( + raddr, unsafeAddr expectResponse[0], len(expectResponse)) + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + proc process2(transp: DatagramTransport, + raddr: TransportAddress): Future[void] {. + async: (raises: []).} = + if raddr.family != sendType: + raiseAssert "Incorrect address family received [" & $raddr & + "], expected [" & $sendType & "]" + try: + let + bmsg = transp.getMessage() + smsg = string.fromBytes(bmsg) + if smsg == expectResponse: + inc(res) + event.fire() + except TransportError as exc: + raiseAssert exc.msg + except CancelledError as exc: + raiseAssert exc.msg + + let + serverFlags = {ServerFlags.ReuseAddr} + server = newDatagramTransport(process1, flags = serverFlags, + local = address1, localPort = port) + serverAddr = server.localAddress() + serverPort = serverAddr.port + remoteAddress = + case sendType + of AddressFamily.IPv4: + var res = initTAddress("127.0.0.1:0") + res.port = serverPort + res + of AddressFamily.IPv6: + var res = initTAddress("[::1]:0") + res.port = serverPort + res + else: + raiseAssert "Incorrect sending type" + remoteIpAddress = Opt.some(remoteAddress.toIpAddress()) + client = + case boundType + of DatagramSocketType.Bound: + newDatagramTransport(process2, + localPort = Port(0), remotePort = serverPort, + local = address2, remote = remoteIpAddress) + of DatagramSocketType.Unbound: + newDatagramTransport(process2, + localPort = Port(0), remotePort = Port(0), + local = address2) + + try: + case boundType + of DatagramSocketType.Bound: + await noCancel client.send( + unsafeAddr expectRequest[0], len(expectRequest)) + of DatagramSocketType.Unbound: + await noCancel client.sendTo(remoteAddress, + unsafeAddr expectRequest[0], len(expectRequest)) + except TransportError as exc: + raiseAssert "Could not send datagram to remote peer, reason = " & $exc.msg + + try: + await event.wait().wait(1.seconds) + except CatchableError: + discard + + await allFutures(server.closeWait(), client.closeWait()) + + res == 2 + test "close(transport) test": check waitFor(testTransportClose()) == true test m1: @@ -730,3 +971,104 @@ suite "Datagram Transport test suite": DualStackType.Auto, initTAddress("[::1]:0"))) == true else: skip() + asyncTest "[IP] Auto-address constructor test (*:0)": + if isAvailable(AddressFamily.IPv6): + check: + (await performAutoAddressTest(Port(0), AddressFamily.IPv6)) == true + # If IPv6 is available newAutoDatagramTransport should bind to `::` - this + # means that we should be able to connect to it via IPV4_MAPPED address, + # but only when IPv4 is also available. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true + else: + # If IPv6 is not available newAutoDatagramTransport should bind to + # `0.0.0.0` - this means we should be able to connect to it via IPv4 + # address. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(0), AddressFamily.IPv4)) == true + asyncTest "[IP] Auto-address constructor test (*:30231)": + if isAvailable(AddressFamily.IPv6): + check: + (await performAutoAddressTest(Port(30231), AddressFamily.IPv6)) == true + # If IPv6 is available newAutoDatagramTransport should bind to `::` - this + # means that we should be able to connect to it via IPV4_MAPPED address, + # but only when IPv4 is also available. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(30231), AddressFamily.IPv4)) == + true + else: + # If IPv6 is not available newAutoDatagramTransport should bind to + # `0.0.0.0` - this means we should be able to connect to it via IPv4 + # address. + if isAvailable(AddressFamily.IPv4): + check: + (await performAutoAddressTest(Port(30231), AddressFamily.IPv4)) == + true + + for socketType in DatagramSocketType: + for portNumber in [Port(0), Port(30231)]: + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/auto-auto:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.none(IpAddress) + address2 = Opt.none(IpAddress) + + check: + (await performAutoAddressTest2( + address1, address2, portNumber, AddressFamily.IPv4, socketType)) + (await performAutoAddressTest2( + address1, address2, portNumber, AddressFamily.IPv6, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/auto-ipv6:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.none(IpAddress) + address2 = Opt.some(initTAddress("[::1]:0").toIpAddress()) + check: + (await performAutoAddressTest2( + address1, address2, portNumber, AddressFamily.IPv6, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/auto-ipv4:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.none(IpAddress) + address2 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress()) + check: + (await performAutoAddressTest2(address1, address2, portNumber, + AddressFamily.IPv4, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/ipv6-auto:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.some(initTAddress("[::1]:0").toIpAddress()) + address2 = Opt.none(IpAddress) + check: + (await performAutoAddressTest2(address1, address2, portNumber, + AddressFamily.IPv6, socketType)) + else: + skip() + + asyncTest "[IP] IPv6 mapping test (" & $socketType & + "/ipv4-auto:" & $int(portNumber) & ")": + if isAvailable(AddressFamily.IPv6): + let + address1 = Opt.some(initTAddress("127.0.0.1:0").toIpAddress()) + address2 = Opt.none(IpAddress) + check: + (await performAutoAddressTest2(address1, address2, portNumber, + AddressFamily.IPv4, socketType)) + else: + skip() diff --git a/tests/teststream.nim b/tests/teststream.nim index 340575c62..25278f4b4 100644 --- a/tests/teststream.nim +++ b/tests/teststream.nim @@ -1486,6 +1486,170 @@ suite "Stream Transport test suite": await server.closeWait() testResult + proc performAutoAddressTest(port: Port, + family: AddressFamily): Future[bool] {. + async: (raises: []).} = + let server = + block: + var currentPort = port + var res: StreamServer + for i in 0 ..< 10: + res = + try: + createStreamServer(port, flags = {ServerFlags.ReuseAddr}) + except TransportOsError as exc: + echo "Unable to create server on port ", currentPort, + " with error: ", exc.msg + currentPort = Port(uint16(currentPort) + 1'u16) + nil + if not(isNil(res)): + break + doAssert(not(isNil(res)), "Unable to create server, giving up") + res + + var + address = + case family + of AddressFamily.IPv4: + try: + initTAddress("127.0.0.1:0") + except TransportAddressError as exc: + raiseAssert exc.msg + of AddressFamily.IPv6: + try: + initTAddress("::1:0") + except TransportAddressError as exc: + raiseAssert exc.msg + of AddressFamily.Unix, AddressFamily.None: + raiseAssert "Not allowed" + + address.port = server.localAddress().port + var acceptFut = server.accept() + let + clientTransp = + try: + let res = await connect(address).wait(2.seconds) + Opt.some(res) + except CatchableError: + Opt.none(StreamTransport) + serverTransp = + if clientTransp.isSome(): + let res = + try: + await noCancel acceptFut + except TransportError as exc: + raiseAssert exc.msg + Opt.some(res) + else: + Opt.none(StreamTransport) + + let testResult = clientTransp.isSome() and serverTransp.isSome() + var pending: seq[FutureBase] + if clientTransp.isSome(): + pending.add(closeWait(clientTransp.get())) + if serverTransp.isSome(): + pending.add(closeWait(serverTransp.get())) + else: + pending.add(cancelAndWait(acceptFut)) + await noCancel allFutures(pending) + try: + server.stop() + except TransportError as exc: + raiseAssert exc.msg + await server.closeWait() + testResult + + proc performAutoAddressTest2( + address1: Opt[IpAddress], + address2: Opt[IpAddress], + port: Port, + sendType: AddressFamily + ): Future[bool] {.async: (raises: []).} = + let + server = + block: + var + currentPort = port + res: StreamServer + for i in 0 ..< 10: + res = + try: + createStreamServer(port, host = address1, + flags = {ServerFlags.ReuseAddr}) + except TransportOsError as exc: + echo "Unable to create server on port ", currentPort, + " with error: ", exc.msg + currentPort = Port(uint16(currentPort) + 1'u16) + nil + if not(isNil(res)): + break + doAssert(not(isNil(res)), "Unable to create server, giving up") + res + serverAddr = server.localAddress() + serverPort = serverAddr.port + remoteAddress = + try: + case sendType + of AddressFamily.IPv4: + var res = initTAddress("127.0.0.1:0") + res.port = serverPort + res + of AddressFamily.IPv6: + var res = initTAddress("[::1]:0") + res.port = serverPort + res + else: + raiseAssert "Incorrect sending type" + except TransportAddressError as exc: + raiseAssert "Unable to initialize transport address, " & + "reason = " & exc.msg + acceptFut = server.accept() + + let + clientTransp = + try: + if address2.isSome(): + let + laddr = initTAddress(address2.get(), Port(0)) + res = await connect(remoteAddress, localAddress = laddr). + wait(2.seconds) + Opt.some(res) + + else: + let res = await connect(remoteAddress).wait(2.seconds) + Opt.some(res) + except CatchableError: + Opt.none(StreamTransport) + serverTransp = + if clientTransp.isSome(): + let res = + try: + await noCancel acceptFut + except TransportError as exc: + raiseAssert exc.msg + Opt.some(res) + else: + Opt.none(StreamTransport) + testResult = + clientTransp.isSome() and serverTransp.isSome() and + (serverTransp.get().remoteAddress2().get().family == sendType) and + (clientTransp.get().remoteAddress2().get().family == sendType) + var pending: seq[FutureBase] + if clientTransp.isSome(): + pending.add(closeWait(clientTransp.get())) + if serverTransp.isSome(): + pending.add(closeWait(serverTransp.get())) + else: + pending.add(cancelAndWait(acceptFut)) + await noCancel allFutures(pending) + try: + server.stop() + except TransportError as exc: + raiseAssert exc.msg + await server.closeWait() + + testResult + markFD = getCurrentFD() for i in 0.. Date: Wed, 17 Apr 2024 17:27:14 +0300 Subject: [PATCH 132/146] Fix inability to change httpclient's internal buffer size. (#531) Add test. Address #529. --- chronos/apps/http/httpclient.nim | 66 +++++++++++++++++++++----------- tests/testhttpclient.nim | 60 +++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 23 deletions(-) diff --git a/chronos/apps/http/httpclient.nim b/chronos/apps/http/httpclient.nim index 414b1d3fd..3b4844d94 100644 --- a/chronos/apps/http/httpclient.nim +++ b/chronos/apps/http/httpclient.nim @@ -159,6 +159,7 @@ type redirectCount: int timestamp*: Moment duration*: Duration + headersBuffer: seq[byte] HttpClientRequestRef* = ref HttpClientRequest @@ -859,6 +860,7 @@ proc closeWait*(request: HttpClientRequestRef) {.async: (raises: []).} = await noCancel(allFutures(pending)) request.session = nil request.error = nil + request.headersBuffer.reset() request.state = HttpReqRespState.Closed untrackCounter(HttpClientRequestTrackerName) @@ -992,14 +994,14 @@ proc prepareResponse( proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. async: (raises: [CancelledError, HttpError]).} = - var buffer: array[HttpMaxHeadersSize, byte] let timestamp = Moment.now() req.connection.setTimestamp(timestamp) let bytesRead = try: - await req.connection.reader.readUntil(addr buffer[0], - len(buffer), HeadersMark).wait( + await req.connection.reader.readUntil(addr req.headersBuffer[0], + len(req.headersBuffer), + HeadersMark).wait( req.session.headersTimeout) except AsyncTimeoutError: raiseHttpReadError("Reading response headers timed out") @@ -1007,23 +1009,25 @@ proc getResponse(req: HttpClientRequestRef): Future[HttpClientResponseRef] {. raiseHttpReadError( "Could not read response headers, reason: " & $exc.msg) - let response = prepareResponse(req, buffer.toOpenArray(0, bytesRead - 1)) - if response.isErr(): - raiseHttpProtocolError(response.error()) - let res = response.get() - res.setTimestamp(timestamp) - return res + let response = + prepareResponse(req, + req.headersBuffer.toOpenArray(0, bytesRead - 1)).valueOr: + raiseHttpProtocolError(error) + response.setTimestamp(timestamp) + response proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, meth: HttpMethod = MethodGet, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpClientRequestRef = let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, version: version, flags: flags, headers: HttpTable.init(headers), - address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body + address: ha, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body, + headersBuffer: newSeq[byte](max(maxResponseHeadersSize, HttpMaxHeadersSize)) ) trackCounter(HttpClientRequestTrackerName) res @@ -1032,13 +1036,15 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, meth: HttpMethod = MethodGet, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpResult[HttpClientRequestRef] = let address = ? session.getAddress(parseUri(url)) let res = HttpClientRequestRef( state: HttpReqRespState.Ready, session: session, meth: meth, version: version, flags: flags, headers: HttpTable.init(headers), - address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body + address: address, bodyFlag: HttpClientBodyFlag.Custom, buffer: @body, + headersBuffer: newSeq[byte](max(maxResponseHeadersSize, HttpMaxHeadersSize)) ) trackCounter(HttpClientRequestTrackerName) ok(res) @@ -1046,48 +1052,58 @@ proc new*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [] ): HttpResult[HttpClientRequestRef] = - HttpClientRequestRef.new(session, url, MethodGet, version, flags, headers) + HttpClientRequestRef.new(session, url, MethodGet, version, flags, + maxResponseHeadersSize, headers) proc get*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [] ): HttpClientRequestRef = - HttpClientRequestRef.new(session, ha, MethodGet, version, flags, headers) + HttpClientRequestRef.new(session, ha, MethodGet, version, flags, + maxResponseHeadersSize, headers) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = [] ): HttpResult[HttpClientRequestRef] = - HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, - body) + HttpClientRequestRef.new(session, url, MethodPost, version, flags, + maxResponseHeadersSize, headers, body) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, url: string, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[char] = []): HttpResult[HttpClientRequestRef] = - HttpClientRequestRef.new(session, url, MethodPost, version, flags, headers, + HttpClientRequestRef.new(session, url, MethodPost, version, flags, + maxResponseHeadersSize, headers, body.toOpenArrayByte(0, len(body) - 1)) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[byte] = []): HttpClientRequestRef = - HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, - body) + HttpClientRequestRef.new(session, ha, MethodPost, version, flags, + maxResponseHeadersSize, headers, body) proc post*(t: typedesc[HttpClientRequestRef], session: HttpSessionRef, ha: HttpAddress, version: HttpVersion = HttpVersion11, flags: set[HttpClientRequestFlag] = {}, + maxResponseHeadersSize: int = HttpMaxHeadersSize, headers: openArray[HttpHeaderTuple] = [], body: openArray[char] = []): HttpClientRequestRef = - HttpClientRequestRef.new(session, ha, MethodPost, version, flags, headers, + HttpClientRequestRef.new(session, ha, MethodPost, version, flags, + maxResponseHeadersSize, headers, body.toOpenArrayByte(0, len(body) - 1)) proc prepareRequest(request: HttpClientRequestRef): string = @@ -1454,8 +1470,10 @@ proc redirect*(request: HttpClientRequestRef, var res = request.headers res.set(HostHeader, ha.hostname) res - var res = HttpClientRequestRef.new(request.session, ha, request.meth, - request.version, request.flags, headers.toList(), request.buffer) + var res = + HttpClientRequestRef.new(request.session, ha, request.meth, + request.version, request.flags, headers = headers.toList(), + body = request.buffer) res.redirectCount = redirectCount ok(res) @@ -1478,8 +1496,10 @@ proc redirect*(request: HttpClientRequestRef, var res = request.headers res.set(HostHeader, address.hostname) res - var res = HttpClientRequestRef.new(request.session, address, request.meth, - request.version, request.flags, headers.toList(), request.buffer) + var res = + HttpClientRequestRef.new(request.session, address, request.meth, + request.version, request.flags, headers = headers.toList(), + body = request.buffer) res.redirectCount = redirectCount ok(res) diff --git a/tests/testhttpclient.nim b/tests/testhttpclient.nim index a468aaed9..e298d5ae6 100644 --- a/tests/testhttpclient.nim +++ b/tests/testhttpclient.nim @@ -1518,3 +1518,63 @@ suite "HTTP client testing suite": res.isErr() and res.error == HttpAddressErrorType.NameLookupFailed res.error.isRecoverableError() not(res.error.isCriticalError()) + + asyncTest "HTTPS response headers buffer size test": + const HeadersSize = HttpMaxHeadersSize + let expectValue = + string.fromBytes(createBigMessage("HEADERSTEST", HeadersSize)) + proc process(r: RequestFence): Future[HttpResponseRef] {. + async: (raises: [CancelledError]).} = + if r.isOk(): + let request = r.get() + try: + case request.uri.path + of "/test": + let headers = HttpTable.init([("big-header", expectValue)]) + await request.respond(Http200, "ok", headers) + else: + await request.respond(Http404, "Page not found") + except HttpWriteError as exc: + defaultResponse(exc) + else: + defaultResponse() + + var server = createServer(initTAddress("127.0.0.1:0"), process, false) + server.start() + let + address = server.instance.localAddress() + ha = getAddress(address, HttpClientScheme.NonSecure, "/test") + session = HttpSessionRef.new() + let + req1 = HttpClientRequestRef.new(session, ha) + req2 = + HttpClientRequestRef.new(session, ha, + maxResponseHeadersSize = HttpMaxHeadersSize * 2) + res1 = + try: + let res {.used.} = await send(req1) + await closeWait(req1) + await closeWait(res) + false + except HttpReadError: + true + except HttpError: + await closeWait(req1) + false + except CancelledError: + await closeWait(req1) + false + + res2 = await send(req2) + + check: + res1 == true + res2.status == 200 + res2.headers.getString("big-header") == expectValue + + await req1.closeWait() + await req2.closeWait() + await res2.closeWait() + await session.closeWait() + await server.stop() + await server.closeWait() From bd7d84fbcb738ee06755aa9c0b0ebd94e93f3a62 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 17 Apr 2024 17:41:36 +0300 Subject: [PATCH 133/146] Fix AsyncStreamReader constructor declaration mistypes. (#533) --- chronos/streams/asyncstream.nim | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 301b0868d..0ff9f4b90 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -1082,6 +1082,22 @@ proc newAsyncStreamReader*(tsource: StreamTransport): AsyncStreamReader = res.init(tsource) res +proc newAsyncStreamReader*[T](rsource: AsyncStreamReader, + udata: ref T): AsyncStreamReader = + ## Create copy of AsyncStreamReader object ``rsource``. + ## + ## ``udata`` - user object which will be associated with new AsyncStreamReader + ## object. + var res = AsyncStreamReader() + res.init(rsource, udata) + res + +proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader = + ## Create copy of AsyncStreamReader object ``rsource``. + var res = AsyncStreamReader() + res.init(rsource) + res + proc newAsyncStreamWriter*[T](wsource: AsyncStreamWriter, loop: StreamWriterLoop, queueSize = AsyncStreamDefaultQueueSize, @@ -1147,22 +1163,6 @@ proc newAsyncStreamWriter*(wsource: AsyncStreamWriter): AsyncStreamWriter = res.init(wsource) res -proc newAsyncStreamReader*[T](rsource: AsyncStreamWriter, - udata: ref T): AsyncStreamWriter = - ## Create copy of AsyncStreamReader object ``rsource``. - ## - ## ``udata`` - user object which will be associated with new AsyncStreamReader - ## object. - var res = AsyncStreamReader() - res.init(rsource, udata) - res - -proc newAsyncStreamReader*(rsource: AsyncStreamReader): AsyncStreamReader = - ## Create copy of AsyncStreamReader object ``rsource``. - var res = AsyncStreamReader() - res.init(rsource) - res - proc getUserData*[T](rw: AsyncStreamRW): T {.inline.} = ## Obtain user data associated with AsyncStreamReader or AsyncStreamWriter ## object ``rw``. From 7a3eaffa4f4bde711510b0aef4d2b1f18abbb53c Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 18 Apr 2024 02:08:19 +0300 Subject: [PATCH 134/146] Fix English spelling for `readed` variable. (#534) --- chronos/streams/asyncstream.nim | 20 ++++++++++---------- chronos/transports/stream.nim | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/chronos/streams/asyncstream.nim b/chronos/streams/asyncstream.nim index 0ff9f4b90..bf6daa0e4 100644 --- a/chronos/streams/asyncstream.nim +++ b/chronos/streams/asyncstream.nim @@ -317,7 +317,7 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, ## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store ## it to ``pbytes``. ## - ## If EOF is received and ``nbytes`` is not yet readed, the procedure + ## If EOF is received and ``nbytes`` is not yet read, the procedure ## will raise ``AsyncStreamIncompleteError``. doAssert(not(isNil(pbytes)), "pbytes must not be nil") doAssert(nbytes >= 0, "nbytes must be non-negative integer") @@ -347,16 +347,16 @@ proc readExactly*(rstream: AsyncStreamReader, pbytes: pointer, if len(rstream.buffer.backend) == 0: if rstream.atEof(): raise newAsyncStreamIncompleteError() - var readed = 0 + var bytesRead = 0 for (region, rsize) in rstream.buffer.backend.regions(): let count = min(nbytes - index, rsize) - readed += count + bytesRead += count if count > 0: copyMem(addr pbuffer[index], region, count) index += count if index == nbytes: break - (consumed: readed, done: index == nbytes) + (consumed: bytesRead, done: index == nbytes) proc readOnce*(rstream: AsyncStreamReader, pbytes: pointer, nbytes: int): Future[int] {. @@ -547,11 +547,11 @@ proc read*(rstream: AsyncStreamReader): Future[seq[byte]] {. if rstream.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in rstream.buffer.backend.regions(): - readed += rsize + bytesRead += rsize res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) - (readed, false) + (bytesRead, false) res proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. @@ -581,12 +581,12 @@ proc read*(rstream: AsyncStreamReader, n: int): Future[seq[byte]] {. if rstream.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in rstream.buffer.backend.regions(): let count = min(rsize, n - len(res)) - readed += count + bytesRead += count res.add(region.toUnchecked().toOpenArray(0, count - 1)) - (readed, len(res) == n) + (bytesRead, len(res) == n) res proc consume*(rstream: AsyncStreamReader): Future[int] {. diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index f4ef1adf7..999254307 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2579,7 +2579,7 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, ## ## If ``nbytes == 0`` this operation will return immediately. ## - ## If EOF is received and ``nbytes`` is not yet readed, the procedure + ## If EOF is received and ``nbytes`` is not yet read, the procedure ## will raise ``TransportIncompleteError``, potentially with some bytes ## already written. doAssert(not(isNil(pbytes)), "pbytes must not be nil") @@ -2595,16 +2595,16 @@ proc readExactly*(transp: StreamTransport, pbytes: pointer, if len(transp.buffer) == 0: if transp.atEof(): raise newException(TransportIncompleteError, "Data incomplete!") - var readed = 0 + var bytesRead = 0 for (region, rsize) in transp.buffer.regions(): let count = min(nbytes - index, rsize) - readed += count + bytesRead += count if count > 0: copyMem(addr pbuffer[index], region, count) index += count if index == nbytes: break - (consumed: readed, done: index == nbytes) + (consumed: bytesRead, done: index == nbytes) proc readOnce*(transp: StreamTransport, pbytes: pointer, nbytes: int): Future[int] {. @@ -2736,11 +2736,11 @@ proc read*(transp: StreamTransport): Future[seq[byte]] {. if transp.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in transp.buffer.regions(): - readed += rsize + bytesRead += rsize res.add(region.toUnchecked().toOpenArray(0, rsize - 1)) - (readed, false) + (bytesRead, false) res proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. @@ -2756,12 +2756,12 @@ proc read*(transp: StreamTransport, n: int): Future[seq[byte]] {. if transp.atEof(): (0, true) else: - var readed = 0 + var bytesRead = 0 for (region, rsize) in transp.buffer.regions(): let count = min(rsize, n - len(res)) - readed += count + bytesRead += count res.add(region.toUnchecked().toOpenArray(0, count - 1)) - (readed, len(res) == n) + (bytesRead, len(res) == n) res proc consume*(transp: StreamTransport): Future[int] {. From d184a92227e8b9ccaa10e8f1b7547caf81770225 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Fri, 19 Apr 2024 16:43:34 +0300 Subject: [PATCH 135/146] Fix rare cancellation race issue on timeout for wait/withTimeout. (#536) Add tests. --- chronos/internal/asyncfutures.nim | 36 +++++--- tests/testfut.nim | 138 ++++++++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 11 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 7f93b0e15..c3396bfbe 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1466,18 +1466,25 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. timer: TimerCallback timeouted = false - template completeFuture(fut: untyped): untyped = + template completeFuture(fut: untyped, timeout: bool): untyped = if fut.failed() or fut.completed(): retFuture.complete(true) else: - retFuture.cancelAndSchedule() + if timeout: + retFuture.complete(false) + else: + retFuture.cancelAndSchedule() # TODO: raises annotation shouldn't be needed, but likely similar issue as # https://github.com/nim-lang/Nim/issues/17369 proc continuation(udata: pointer) {.gcsafe, raises: [].} = if not(retFuture.finished()): if timeouted: - retFuture.complete(false) + # We should not unconditionally complete result future with `false`. + # Initiated by timeout handler cancellation could fail, in this case + # we could get `fut` in complete or in failed state, so we should + # complete result future with `true` instead of `false` here. + fut.completeFuture(timeouted) return if not(fut.finished()): # Timer exceeded first, we going to cancel `fut` and wait until it @@ -1488,7 +1495,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. # Future `fut` completed/failed/cancelled first. if not(isNil(timer)): clearTimer(timer) - fut.completeFuture() + fut.completeFuture(false) timer = nil # TODO: raises annotation shouldn't be needed, but likely similar issue as @@ -1499,7 +1506,7 @@ proc withTimeout*[T](fut: Future[T], timeout: Duration): Future[bool] {. clearTimer(timer) fut.cancelSoon() else: - fut.completeFuture() + fut.completeFuture(false) timer = nil if fut.finished(): @@ -1528,11 +1535,14 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = timer: TimerCallback timeouted = false - template completeFuture(fut: untyped): untyped = + template completeFuture(fut: untyped, timeout: bool): untyped = if fut.failed(): retFuture.fail(fut.error(), warn = false) elif fut.cancelled(): - retFuture.cancelAndSchedule() + if timeout: + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + else: + retFuture.cancelAndSchedule() else: when type(fut).T is void: retFuture.complete() @@ -1542,7 +1552,11 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = proc continuation(udata: pointer) {.raises: [].} = if not(retFuture.finished()): if timeouted: - retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + # We should not unconditionally fail `retFuture` with + # `AsyncTimeoutError`. Initiated by timeout handler cancellation + # could fail, in this case we could get `fut` in complete or in failed + # state, so we should return error/value instead of `AsyncTimeoutError`. + fut.completeFuture(timeouted) return if not(fut.finished()): # Timer exceeded first. @@ -1552,7 +1566,7 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = # Future `fut` completed/failed/cancelled first. if not(isNil(timer)): clearTimer(timer) - fut.completeFuture() + fut.completeFuture(false) timer = nil var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} @@ -1562,12 +1576,12 @@ proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = clearTimer(timer) fut.cancelSoon() else: - fut.completeFuture() + fut.completeFuture(false) timer = nil if fut.finished(): - fut.completeFuture() + fut.completeFuture(false) else: if timeout.isZero(): retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) diff --git a/tests/testfut.nim b/tests/testfut.nim index 1cf0aed5f..46e9c2a00 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -2177,3 +2177,141 @@ suite "Future[T] behavior test suite": check: not compiles(Future[void].Raising([42])) not compiles(Future[void].Raising(42)) + + asyncTest "Timeout/cancellation race wait() test": + proc raceTest(T: typedesc, itype: int) {.async.} = + let monitorFuture = newFuture[T]("monitor", + {FutureFlag.OwnCancelSchedule}) + + proc raceProc0(future: Future[T]): Future[T] {.async.} = + await future + proc raceProc1(future: Future[T]): Future[T] {.async.} = + await raceProc0(future) + proc raceProc2(future: Future[T]): Future[T] {.async.} = + await raceProc1(future) + + proc activation(udata: pointer) {.gcsafe.} = + if itype == 0: + when T is void: + monitorFuture.complete() + elif T is int: + monitorFuture.complete(100) + elif itype == 1: + monitorFuture.fail(newException(ValueError, "test")) + else: + monitorFuture.cancelAndSchedule() + + monitorFuture.cancelCallback = activation + let + testFut = raceProc2(monitorFuture) + waitFut = wait(testFut, 10.milliseconds) + + when T is void: + let waitRes = + try: + await waitFut + if itype == 0: + true + else: + false + except CancelledError: + false + except CatchableError: + if itype != 0: + true + else: + false + check waitRes == true + elif T is int: + let waitRes = + try: + let res = await waitFut + if itype == 0: + (true, res) + else: + (false, -1) + except CancelledError: + (false, -1) + except CatchableError: + if itype != 0: + (true, 0) + else: + (false, -1) + if itype == 0: + check: + waitRes[0] == true + waitRes[1] == 100 + else: + check: + waitRes[0] == true + + await raceTest(void, 0) + await raceTest(void, 1) + await raceTest(void, 2) + await raceTest(int, 0) + await raceTest(int, 1) + await raceTest(int, 2) + + asyncTest "Timeout/cancellation race withTimeout() test": + proc raceTest(T: typedesc, itype: int) {.async.} = + let monitorFuture = newFuture[T]("monitor", + {FutureFlag.OwnCancelSchedule}) + + proc raceProc0(future: Future[T]): Future[T] {.async.} = + await future + proc raceProc1(future: Future[T]): Future[T] {.async.} = + await raceProc0(future) + proc raceProc2(future: Future[T]): Future[T] {.async.} = + await raceProc1(future) + + proc activation(udata: pointer) {.gcsafe.} = + if itype == 0: + when T is void: + monitorFuture.complete() + elif T is int: + monitorFuture.complete(100) + elif itype == 1: + monitorFuture.fail(newException(ValueError, "test")) + else: + monitorFuture.cancelAndSchedule() + + monitorFuture.cancelCallback = activation + let + testFut = raceProc2(monitorFuture) + waitFut = withTimeout(testFut, 10.milliseconds) + + when T is void: + let waitRes = + try: + await waitFut + except CancelledError: + false + except CatchableError: + false + if itype == 0: + check waitRes == true + elif itype == 1: + check waitRes == true + else: + check waitRes == false + elif T is int: + let waitRes = + try: + await waitFut + except CancelledError: + false + except CatchableError: + false + if itype == 0: + check waitRes == true + elif itype == 1: + check waitRes == true + else: + check waitRes == false + + await raceTest(void, 0) + await raceTest(void, 1) + await raceTest(void, 2) + await raceTest(int, 0) + await raceTest(int, 1) + await raceTest(int, 2) From 0f0ed1d654aa2f2bdd792eb9ab55b227156fa544 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sat, 20 Apr 2024 03:49:07 +0300 Subject: [PATCH 136/146] Add wait(deadline future) implementation. (#535) * Add waitUntil(deadline) implementation. * Add one more test. * Fix rare race condition and tests for it. * Rename waitUntil() to wait(). --- chronos/internal/asyncfutures.nim | 94 ++++++- tests/testfut.nim | 397 +++++++++++++++++++++++++++++- 2 files changed, 481 insertions(+), 10 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index c3396bfbe..2b92e744a 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1529,6 +1529,60 @@ proc withTimeout*[T](fut: Future[T], timeout: int): Future[bool] {. inline, deprecated: "Use withTimeout(Future[T], Duration)".} = withTimeout(fut, timeout.milliseconds()) +proc waitUntilImpl[F: SomeFuture](fut: F, retFuture: auto, + deadline: auto): auto = + var timeouted = false + + template completeFuture(fut: untyped, timeout: bool): untyped = + if fut.failed(): + retFuture.fail(fut.error(), warn = false) + elif fut.cancelled(): + if timeout: + # Its possible that `future` could be cancelled in some other place. In + # such case we can't detect if it was our cancellation due to timeout, + # or some other cancellation. + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + else: + retFuture.cancelAndSchedule() + else: + when type(fut).T is void: + retFuture.complete() + else: + retFuture.complete(fut.value) + + proc continuation(udata: pointer) {.raises: [].} = + if not(retFuture.finished()): + if timeouted: + # When timeout is exceeded and we cancelled future via cancelSoon(), + # its possible that future at this moment already has value + # and/or error. + fut.completeFuture(timeouted) + return + if not(fut.finished()): + timeouted = true + fut.cancelSoon() + else: + fut.completeFuture(false) + + var cancellation: proc(udata: pointer) {.gcsafe, raises: [].} + cancellation = proc(udata: pointer) {.gcsafe, raises: [].} = + deadline.removeCallback(continuation) + if not(fut.finished()): + fut.cancelSoon() + else: + fut.completeFuture(false) + + if fut.finished(): + fut.completeFuture(false) + else: + if deadline.finished(): + retFuture.fail(newException(AsyncTimeoutError, "Timeout exceeded!")) + else: + retFuture.cancelCallback = cancellation + fut.addCallback(continuation) + deadline.addCallback(continuation) + retFuture + proc waitImpl[F: SomeFuture](fut: F, retFuture: auto, timeout: Duration): auto = var moment: Moment @@ -1606,7 +1660,8 @@ proc wait*[T](fut: Future[T], timeout = InfiniteDuration): Future[T] = ## TODO: In case when ``fut`` got cancelled, what result Future[T] ## should return, because it can't be cancelled too. var - retFuture = newFuture[T]("chronos.wait()", {FutureFlag.OwnCancelSchedule}) + retFuture = newFuture[T]("chronos.wait(duration)", + {FutureFlag.OwnCancelSchedule}) # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` # manually at proper time. @@ -1621,6 +1676,28 @@ proc wait*[T](fut: Future[T], timeout = -1): Future[T] {. else: wait(fut, timeout.milliseconds()) +proc wait*[T](fut: Future[T], deadline: SomeFuture): Future[T] = + ## Returns a future which will complete once future ``fut`` completes + ## or if ``deadline`` future completes. + ## + ## If `deadline` future completes before future `fut` - + ## `AsyncTimeoutError` exception will be raised. + ## + ## Note: `deadline` future will not be cancelled and/or failed. + ## + ## Note: While `waitUntil(future)` operation is pending, please avoid any + ## attempts to cancel future `fut`. If it happens `waitUntil()` could + ## introduce undefined behavior - it could raise`CancelledError` or + ## `AsyncTimeoutError`. + ## + ## If you need to cancel `future` - cancel `waitUntil(future)` instead. + var + retFuture = newFuture[T]("chronos.wait(future)", + {FutureFlag.OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. + waitUntilImpl(fut, retFuture, deadline) + proc join*(future: FutureBase): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete once future ``future`` completes. @@ -1783,8 +1860,21 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError) let - retFuture = newFuture[T]("chronos.wait()", {OwnCancelSchedule}) + retFuture = newFuture[T]("chronos.wait(duration)", {OwnCancelSchedule}) # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` # manually at proper time. waitImpl(fut, retFuture, timeout) + +proc wait*(fut: InternalRaisesFuture, deadline: InternalRaisesFuture): auto = + type + T = type(fut).T + E = type(fut).E + InternalRaisesFutureRaises = E.prepend(CancelledError, AsyncTimeoutError) + + let + retFuture = newFuture[T]("chronos.wait(future)", {OwnCancelSchedule}) + # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` + # manually at proper time. + + waitUntilImpl(fut, retFuture, deadline) diff --git a/tests/testfut.nim b/tests/testfut.nim index 46e9c2a00..973743911 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -83,7 +83,7 @@ suite "Future[T] behavior test suite": fut.finished testResult == "1245" - asyncTest "wait[T]() test": + asyncTest "wait(duration) test": block: ## Test for not immediately completed future and timeout = -1 let res = @@ -146,6 +146,183 @@ suite "Future[T] behavior test suite": false check res + asyncTest "wait(future) test": + block: + ## Test for not immediately completed future and deadline which is not + ## going to be finished + let + deadline = newFuture[void]() + future1 = testFuture1() + let res = + try: + discard await wait(future1, deadline) + true + except CatchableError: + false + check: + deadline.finished() == false + future1.finished() == true + res == true + + await deadline.cancelAndWait() + + check deadline.finished() == true + block: + ## Test for immediately completed future and timeout = -1 + let + deadline = newFuture[void]() + future2 = testFuture2() + let res = + try: + discard await wait(future2, deadline) + true + except CatchableError: + false + check: + deadline.finished() == false + future2.finished() == true + res + + await deadline.cancelAndWait() + + check deadline.finished() == true + block: + ## Test for not immediately completed future and timeout = 0 + let + deadline = newFuture[void]() + future1 = testFuture1() + deadline.complete() + let res = + try: + discard await wait(future1, deadline) + false + except AsyncTimeoutError: + true + except CatchableError: + false + check: + future1.finished() == false + deadline.finished() == true + res + + block: + ## Test for immediately completed future and timeout = 0 + let + deadline = newFuture[void]() + future2 = testFuture2() + deadline.complete() + let (res1, res2) = + try: + let res = await wait(future2, deadline) + (true, res) + except CatchableError: + (false, -1) + check: + future2.finished() == true + deadline.finished() == true + res1 == true + res2 == 1 + + block: + ## Test for future which cannot be completed in timeout period + let + deadline = sleepAsync(50.milliseconds) + future100 = testFuture100() + let res = + try: + discard await wait(future100, deadline) + false + except AsyncTimeoutError: + true + except CatchableError: + false + check: + deadline.finished() == true + res + await future100.cancelAndWait() + check: + future100.finished() == true + + block: + ## Test for future which will be completed before timeout exceeded. + let + deadline = sleepAsync(500.milliseconds) + future100 = testFuture100() + let (res1, res2) = + try: + let res = await wait(future100, deadline) + (true, res) + except CatchableError: + (false, -1) + check: + future100.finished() == true + deadline.finished() == false + res1 == true + res2 == 0 + await deadline.cancelAndWait() + check: + deadline.finished() == true + + asyncTest "wait(future) cancellation behavior test": + proc deepTest3(future: Future[void]) {.async.} = + await future + + proc deepTest2(future: Future[void]) {.async.} = + await deepTest3(future) + + proc deepTest1(future: Future[void]) {.async.} = + await deepTest2(future) + + let + + deadlineFuture = newFuture[void]() + + block: + # Cancellation should affect `testFuture` because it is in pending state. + let monitorFuture = newFuture[void]() + var testFuture = deepTest1(monitorFuture) + let waitFut = wait(testFuture, deadlineFuture) + await cancelAndWait(waitFut) + check: + monitorFuture.cancelled() == true + testFuture.cancelled() == true + waitFut.cancelled() == true + deadlineFuture.finished() == false + + block: + # Cancellation should not affect `testFuture` because it is completed. + let monitorFuture = newFuture[void]() + var testFuture = deepTest1(monitorFuture) + let waitFut = wait(testFuture, deadlineFuture) + monitorFuture.complete() + await cancelAndWait(waitFut) + check: + monitorFuture.completed() == true + monitorFuture.cancelled() == false + testFuture.completed() == true + waitFut.completed() == true + deadlineFuture.finished() == false + + block: + # Cancellation should not affect `testFuture` because it is failed. + let monitorFuture = newFuture[void]() + var testFuture = deepTest1(monitorFuture) + let waitFut = wait(testFuture, deadlineFuture) + monitorFuture.fail(newException(ValueError, "TEST")) + await cancelAndWait(waitFut) + check: + monitorFuture.failed() == true + monitorFuture.cancelled() == false + testFuture.failed() == true + testFuture.cancelled() == false + waitFut.failed() == true + testFuture.cancelled() == false + deadlineFuture.finished() == false + + await cancelAndWait(deadlineFuture) + + check deadlineFuture.finished() == true + asyncTest "Discarded result Future[T] test": var completedFutures = 0 @@ -1082,7 +1259,7 @@ suite "Future[T] behavior test suite": completed == 0 cancelled == 1 - asyncTest "Cancellation wait() test": + asyncTest "Cancellation wait(duration) test": var neverFlag1, neverFlag2, neverFlag3: bool var waitProc1, waitProc2: bool proc neverEndingProc(): Future[void] = @@ -1143,7 +1320,39 @@ suite "Future[T] behavior test suite": fut.state == FutureState.Completed neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2 - asyncTest "Cancellation race test": + asyncTest "Cancellation wait(future) test": + var neverFlag1, neverFlag2, neverFlag3: bool + var waitProc1, waitProc2: bool + proc neverEndingProc(): Future[void] = + var res = newFuture[void]() + proc continuation(udata: pointer) {.gcsafe.} = + neverFlag2 = true + proc cancellation(udata: pointer) {.gcsafe.} = + neverFlag3 = true + res.addCallback(continuation) + res.cancelCallback = cancellation + result = res + neverFlag1 = true + + proc waitProc() {.async.} = + let deadline = sleepAsync(100.milliseconds) + try: + await wait(neverEndingProc(), deadline) + except CancelledError: + waitProc1 = true + except CatchableError: + doAssert(false) + finally: + await cancelAndWait(deadline) + waitProc2 = true + + var fut = waitProc() + await cancelAndWait(fut) + check: + fut.state == FutureState.Completed + neverFlag1 and neverFlag2 and neverFlag3 and waitProc1 and waitProc2 + + asyncTest "Cancellation race() test": var someFut = newFuture[void]() proc raceProc(): Future[void] {.async.} = @@ -1298,7 +1507,7 @@ suite "Future[T] behavior test suite": false check res - asyncTest "wait(fut) should wait cancellation test": + asyncTest "wait(future) should wait cancellation test": proc futureNeverEnds(): Future[void] = newFuture[void]("neverending.future") @@ -1322,6 +1531,29 @@ suite "Future[T] behavior test suite": check res + asyncTest "wait(future) should wait cancellation test": + proc futureNeverEnds(): Future[void] = + newFuture[void]("neverending.future") + + proc futureOneLevelMore() {.async.} = + await futureNeverEnds() + + var fut = futureOneLevelMore() + let res = + try: + await wait(fut, sleepAsync(100.milliseconds)) + false + except AsyncTimeoutError: + # Because `fut` is never-ending Future[T], `wait` should raise + # `AsyncTimeoutError`, but only after `fut` is cancelled. + if fut.cancelled(): + true + else: + false + except CatchableError: + false + check res + test "race(zero) test": var tseq = newSeq[FutureBase]() var fut1 = race(tseq) @@ -1563,7 +1795,7 @@ suite "Future[T] behavior test suite": v1_u == 0'u v2_u + 1'u == 0'u - asyncTest "wait() cancellation undefined behavior test #1": + asyncTest "wait(duration) cancellation undefined behavior test #1": proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. async.} = await fooFut @@ -1586,7 +1818,7 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut - asyncTest "wait() cancellation undefined behavior test #2": + asyncTest "wait(duration) cancellation undefined behavior test #2": proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. async.} = await fooFut @@ -1613,7 +1845,7 @@ suite "Future[T] behavior test suite": discard someFut.tryCancel() await someFut - asyncTest "wait() should allow cancellation test (depends on race())": + asyncTest "wait(duration) should allow cancellation test (depends on race())": proc testFoo(): Future[bool] {.async.} = let resFut = sleepAsync(2.seconds).wait(3.seconds) @@ -1699,6 +1931,78 @@ suite "Future[T] behavior test suite": check (await testFoo()) == true + asyncTest "wait(future) cancellation undefined behavior test #1": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testFoo(fooFut: Future[void]) {.async.} = + let deadline = sleepAsync(10.seconds) + let connection = + try: + let res = await testInnerFoo(fooFut).wait(deadline) + Result[TestFooConnection, int].ok(res) + except CancelledError: + Result[TestFooConnection, int].err(0) + except CatchableError: + Result[TestFooConnection, int].err(1) + finally: + await deadline.cancelAndWait() + + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "wait(future) cancellation undefined behavior test #2": + proc testInnerFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await fooFut + return TestFooConnection() + + proc testMiddleFoo(fooFut: Future[void]): Future[TestFooConnection] {. + async.} = + await testInnerFoo(fooFut) + + proc testFoo(fooFut: Future[void]) {.async.} = + let deadline = sleepAsync(10.seconds) + let connection = + try: + let res = await testMiddleFoo(fooFut).wait(deadline) + Result[TestFooConnection, int].ok(res) + except CancelledError: + Result[TestFooConnection, int].err(0) + except CatchableError: + Result[TestFooConnection, int].err(1) + finally: + await deadline.cancelAndWait() + check connection.isOk() + + var future = newFuture[void]("last.child.future") + var someFut = testFoo(future) + future.complete() + discard someFut.tryCancel() + await someFut + + asyncTest "wait(future) should allow cancellation test (depends on race())": + proc testFoo(): Future[bool] {.async.} = + let + deadline = sleepAsync(3.seconds) + resFut = sleepAsync(2.seconds).wait(deadline) + timeFut = sleepAsync(1.seconds) + cancelFut = cancelAndWait(resFut) + discard await race(cancelFut, timeFut) + await deadline.cancelAndWait() + if cancelFut.finished(): + return (resFut.cancelled() and cancelFut.completed()) + false + + check (await testFoo()) == true + asyncTest "Cancellation behavior test": proc testInnerFoo(fooFut: Future[void]) {.async.} = await fooFut @@ -2178,7 +2482,7 @@ suite "Future[T] behavior test suite": not compiles(Future[void].Raising([42])) not compiles(Future[void].Raising(42)) - asyncTest "Timeout/cancellation race wait() test": + asyncTest "Timeout/cancellation race wait(duration) test": proc raceTest(T: typedesc, itype: int) {.async.} = let monitorFuture = newFuture[T]("monitor", {FutureFlag.OwnCancelSchedule}) @@ -2252,6 +2556,83 @@ suite "Future[T] behavior test suite": await raceTest(int, 1) await raceTest(int, 2) + asyncTest "Timeout/cancellation race wait(future) test": + proc raceTest(T: typedesc, itype: int) {.async.} = + let monitorFuture = newFuture[T]() + + proc raceProc0(future: Future[T]): Future[T] {.async.} = + await future + proc raceProc1(future: Future[T]): Future[T] {.async.} = + await raceProc0(future) + proc raceProc2(future: Future[T]): Future[T] {.async.} = + await raceProc1(future) + + proc continuation(udata: pointer) {.gcsafe.} = + if itype == 0: + when T is void: + monitorFuture.complete() + elif T is int: + monitorFuture.complete(100) + elif itype == 1: + monitorFuture.fail(newException(ValueError, "test")) + else: + monitorFuture.cancelAndSchedule() + + let deadlineFuture = newFuture[void]() + deadlineFuture.addCallback continuation + + let + testFut = raceProc2(monitorFuture) + waitFut = wait(testFut, deadlineFuture) + + deadlineFuture.complete() + + when T is void: + let waitRes = + try: + await waitFut + if itype == 0: + true + else: + false + except CancelledError: + false + except CatchableError: + if itype != 0: + true + else: + false + check waitRes == true + elif T is int: + let waitRes = + try: + let res = await waitFut + if itype == 0: + (true, res) + else: + (false, -1) + except CancelledError: + (false, -1) + except CatchableError: + if itype != 0: + (true, 0) + else: + (false, -1) + if itype == 0: + check: + waitRes[0] == true + waitRes[1] == 100 + else: + check: + waitRes[0] == true + + await raceTest(void, 0) + await raceTest(void, 1) + await raceTest(void, 2) + await raceTest(int, 0) + await raceTest(int, 1) + await raceTest(int, 2) + asyncTest "Timeout/cancellation race withTimeout() test": proc raceTest(T: typedesc, itype: int) {.async.} = let monitorFuture = newFuture[T]("monitor", From bb96f02ae877e04230dac85c040e66656b7d2ef0 Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Wed, 24 Apr 2024 03:16:23 +0300 Subject: [PATCH 137/146] Fix `wait(future)` declaration signature. (#537) --- chronos/internal/asyncfutures.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 2b92e744a..ad0b847bc 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1866,7 +1866,7 @@ proc wait*(fut: InternalRaisesFuture, timeout = InfiniteDuration): auto = waitImpl(fut, retFuture, timeout) -proc wait*(fut: InternalRaisesFuture, deadline: InternalRaisesFuture): auto = +proc wait*(fut: InternalRaisesFuture, deadline: SomeFuture): auto = type T = type(fut).T E = type(fut).E From 72f560f049efa42fffb50aff2015782f6f17825e Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Thu, 25 Apr 2024 19:08:53 +0300 Subject: [PATCH 138/146] Fix RangeError defect being happened using android toolchain. (#538) * Fix RangeError defect being happened using android toolchain. * Set proper type for `Tnfds`. * Update comment. --- chronos/ioselects/ioselectors_poll.nim | 5 ++++- chronos/osdefs.nim | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/chronos/ioselects/ioselectors_poll.nim b/chronos/ioselects/ioselectors_poll.nim index 25cc03518..51f21bbfd 100644 --- a/chronos/ioselects/ioselectors_poll.nim +++ b/chronos/ioselects/ioselectors_poll.nim @@ -220,7 +220,10 @@ proc selectInto2*[T](s: Selector[T], timeout: int, verifySelectParams(timeout, -1, int(high(cint))) let - maxEventsCount = min(len(s.pollfds), len(readyKeys)) + maxEventsCount = culong(min(len(s.pollfds), len(readyKeys))) + # Without `culong` conversion, this code could fail with RangeError + # defect on explicit Tnfds(integer) conversion (probably related to + # combination of nim+clang (android toolchain)). eventsCount = if maxEventsCount > 0: let res = handleEintr(poll(addr(s.pollfds[0]), Tnfds(maxEventsCount), diff --git a/chronos/osdefs.nim b/chronos/osdefs.nim index 40a6365ad..303a6110b 100644 --- a/chronos/osdefs.nim +++ b/chronos/osdefs.nim @@ -965,7 +965,7 @@ elif defined(macos) or defined(macosx): events*: cshort revents*: cshort - Tnfds* {.importc: "nfds_t", header: "".} = cuint + Tnfds* {.importc: "nfds_t", header: "".} = culong const POLLIN* = 0x0001 From 52b02b9977d0b06e0b235861b0c8b06fdc7294be Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sat, 4 May 2024 11:52:42 +0200 Subject: [PATCH 139/146] remove unnecessary impl overloads (#539) --- chronos/internal/asyncfutures.nim | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index ad0b847bc..1898685a3 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -76,22 +76,11 @@ template Finished*(T: type FutureState): FutureState {. deprecated: "Use FutureState.Completed instead".} = FutureState.Completed -proc newFutureImpl[T](loc: ptr SrcLoc): Future[T] = - let fut = Future[T]() - internalInitFutureBase(fut, loc, FutureState.Pending, {}) - fut - proc newFutureImpl[T](loc: ptr SrcLoc, flags: FutureFlags): Future[T] = let fut = Future[T]() internalInitFutureBase(fut, loc, FutureState.Pending, flags) fut -proc newInternalRaisesFutureImpl[T, E]( - loc: ptr SrcLoc): InternalRaisesFuture[T, E] = - let fut = InternalRaisesFuture[T, E]() - internalInitFutureBase(fut, loc, FutureState.Pending, {}) - fut - proc newInternalRaisesFutureImpl[T, E]( loc: ptr SrcLoc, flags: FutureFlags): InternalRaisesFuture[T, E] = let fut = InternalRaisesFuture[T, E]() @@ -125,7 +114,7 @@ template newInternalRaisesFuture*[T, E](fromProc: static[string] = ""): auto = ## ## Specifying ``fromProc``, which is a string specifying the name of the proc ## that this future belongs to, is a good habit as it helps with debugging. - newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc)) + newInternalRaisesFutureImpl[T, E](getSrcLocation(fromProc), {}) template newFutureSeq*[A, B](fromProc: static[string] = ""): FutureSeq[A, B] {.deprecated.} = ## Create a new future which can hold/preserve GC sequence until future will @@ -1697,7 +1686,7 @@ proc wait*[T](fut: Future[T], deadline: SomeFuture): Future[T] = # We set `OwnCancelSchedule` flag, because we going to cancel `retFuture` # manually at proper time. waitUntilImpl(fut, retFuture, deadline) - + proc join*(future: FutureBase): Future[void] {. async: (raw: true, raises: [CancelledError]).} = ## Returns a future which will complete once future ``future`` completes. From 1ff81c60eaaff6867fef81680273f3d0f4b5d18b Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 6 May 2024 10:56:48 +0200 Subject: [PATCH 140/146] avoid warning in noCancel with non-raising future (#540) --- chronos/internal/asyncfutures.nim | 19 +++++++++++------- tests/testmacro.nim | 32 ++++++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/chronos/internal/asyncfutures.nim b/chronos/internal/asyncfutures.nim index 1898685a3..6c8f2bddb 100644 --- a/chronos/internal/asyncfutures.nim +++ b/chronos/internal/asyncfutures.nim @@ -1031,19 +1031,24 @@ proc noCancel*[F: SomeFuture](future: F): auto = # async: (raw: true, raises: as let retFuture = newFuture[F.T]("chronos.noCancel(T)", {FutureFlag.OwnCancelSchedule}) template completeFuture() = + const canFail = when declared(InternalRaisesFutureRaises): + InternalRaisesFutureRaises isnot void + else: + true + if future.completed(): when F.T is void: retFuture.complete() else: retFuture.complete(future.value) - elif future.failed(): - when F is Future: - retFuture.fail(future.error, warn = false) - when declared(InternalRaisesFutureRaises): - when InternalRaisesFutureRaises isnot void: - retFuture.fail(future.error, warn = false) else: - raiseAssert("Unexpected future state [" & $future.state & "]") + when canFail: # Avoid calling `failed` on non-failing raises futures + if future.failed(): + retFuture.fail(future.error, warn = false) + else: + raiseAssert("Unexpected future state [" & $future.state & "]") + else: + raiseAssert("Unexpected future state [" & $future.state & "]") proc continuation(udata: pointer) {.gcsafe.} = completeFuture() diff --git a/tests/testmacro.nim b/tests/testmacro.nim index d646303a3..335e2eef8 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -519,7 +519,7 @@ suite "Exceptions tracking": noraises() - test "Nocancel errors": + test "Nocancel errors with raises": proc testit {.async: (raises: [ValueError, CancelledError]).} = await sleepAsync(5.milliseconds) raise (ref ValueError)() @@ -535,6 +535,36 @@ suite "Exceptions tracking": noraises() + test "Nocancel with no errors": + proc testit {.async: (raises: [CancelledError]).} = + await sleepAsync(5.milliseconds) + + proc test {.async: (raises: []).} = + await noCancel testit() + + proc noraises() {.raises: [].} = + let f = test() + waitFor(f.cancelAndWait()) + waitFor(f) + + noraises() + + test "Nocancel errors without raises": + proc testit {.async.} = + await sleepAsync(5.milliseconds) + raise (ref ValueError)() + + proc test {.async.} = + await noCancel testit() + + proc noraises() = + expect(ValueError): + let f = test() + waitFor(f.cancelAndWait()) + waitFor(f) + + noraises() + test "Defect on wrong exception type at runtime": {.push warning[User]: off} let f = InternalRaisesFuture[void, (ValueError,)]() From 8a306763cec8105fa83574b56734b0f66823f844 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 26 Mar 2024 20:08:21 +0100 Subject: [PATCH 141/146] docs for `join` and `noCancel` --- docs/src/concepts.md | 86 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 3 deletions(-) diff --git a/docs/src/concepts.md b/docs/src/concepts.md index 0469b8be4..72a5db3ae 100644 --- a/docs/src/concepts.md +++ b/docs/src/concepts.md @@ -4,6 +4,9 @@ Async/await is a programming model that relies on cooperative multitasking to coordinate the concurrent execution of procedures, using event notifications from the operating system or other treads to resume execution. +Code execution happens in a loop that alternates between making progress on +tasks and handling events. + ## The dispatcher @@ -118,7 +121,8 @@ The `CancelledError` will now travel up the stack like any other exception. It can be caught for instance to free some resources and is then typically re-raised for the whole chain operations to get cancelled. -Alternatively, the cancellation request can be translated to a regular outcome of the operation - for example, a `read` operation might return an empty result. +Alternatively, the cancellation request can be translated to a regular outcome +of the operation - for example, a `read` operation might return an empty result. Cancelling an already-finished `Future` has no effect, as the following example of downloading two web pages concurrently shows: @@ -127,8 +131,84 @@ of downloading two web pages concurrently shows: {{#include ../examples/twogets.nim}} ``` +### Ownership + +When calling a procedure that returns a `Future`, ownership of that `Future` is +shared between the callee that created it and the caller that waits for it to be +finished. + +The `Future` can be thought of as a single-item channel between a producer and a +consumer. The producer creates the `Future` and is responsible for completing or +failing it while the caller waits for completion and may `cancel` it. + +Although it is technically possible, callers must not `complete` or `fail` +futures and callees or other intermediate observers must not `cancel` them as +this may lead to panics and shutdown (ie if the future is completed twice or a +cancalletion is not handled by the original caller). + +### `noCancel` + +Certain operations must not be cancelled for semantic reasons. Common scenarios +include `closeWait` that releases a resources irrevocably and composed +operations whose individual steps should be performed together or not at all. + +In such cases, the `noCancel` modifier to `await` can be used to temporarily +disable cancellation propagation, allowing the operation to complete even if +the caller initiates a cancellation request: + +```nim +proc deepSleep(dur: Duration) {.async.} = + # `noCancel` prevents any cancellation request by the caller of `deepSleep` + # from reaching `sleepAsync` - even if `deepSleep` is cancelled, its future + # will not complete until the sleep finishes. + await noCancel sleepAsync(dur) + +let future = deepSleep(10.minutes) + +# This will take ~10 minutes even if we try to cancel the call to `deepSleep`! +await cancelAndWait(future) +``` + +### `join` + +The `join` modifier to `await` allows cancelling an `async` procedure without +propagating the cancellation to the awaited operation. This is useful when +`await`:ing a `Future` for monitoring purposes, ie when a procedure is not the +owner of the future that's being `await`:ed. + +One situation where this happens is when implementing the "observer" pattern, +where a helper monitors an operation it did not initiate: + +```nim +var tick: Future[void] +proc ticker() {.async.} = + while true: + tick = sleepAsync(1.second) + await tick + echo "tick!" + +proc tocker() {.async.} = + # This operation does not own or implement the operation behind `tick`, + # so it should not cancel it when `tocker` is cancelled + await join tick + echo "tock!" + +let + fut = ticker() # `ticker` is now looping and most likely waiting for `tick` + fut2 = tocker() # both `ticker` and `tocker` are waiting for `tick` + +# We don't want `tocker` to cancel a future that was created in `ticker` +waitFor fut2.cancelAndWait() + +waitFor fut # keeps printing `tick!` every second. +``` + ## Compile-time configuration -`chronos` contains several compile-time [configuration options](./chronos/config.nim) enabling stricter compile-time checks and debugging helpers whose runtime cost may be significant. +`chronos` contains several compile-time +[configuration options](./chronos/config.nim) enabling stricter compile-time +checks and debugging helpers whose runtime cost may be significant. -Strictness options generally will become default in future chronos releases and allow adapting existing code without changing the new version - see the [`config.nim`](./chronos/config.nim) module for more information. +Strictness options generally will become default in future chronos releases and +allow adapting existing code without changing the new version - see the +[`config.nim`](./chronos/config.nim) module for more information. From 1b9d9253e89445d585d0fff39cc0d19254fdfd0d Mon Sep 17 00:00:00 2001 From: Eugene Kabanov Date: Sun, 2 Jun 2024 18:05:22 +0300 Subject: [PATCH 142/146] Fix GCC-14 [-Wincompatible-pointer-types] issues. (#546) * Fix class assignment. * One more fix. * Bump bearssl version. --- chronos.nimble | 2 +- chronos/streams/tlsstream.nim | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index ba92ac657..490a0861c 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -10,7 +10,7 @@ skipDirs = @["tests"] requires "nim >= 1.6.16", "results", "stew", - "bearssl", + "bearssl >= 0.2.3", "httputils", "unittest2" diff --git a/chronos/streams/tlsstream.nim b/chronos/streams/tlsstream.nim index 9d90ab718..0e3430e61 100644 --- a/chronos/streams/tlsstream.nim +++ b/chronos/streams/tlsstream.nim @@ -511,7 +511,8 @@ proc newTLSClientAsyncStream*( if TLSFlags.NoVerifyHost in flags: sslClientInitFull(res.ccontext, addr res.x509, nil, 0) x509NoanchorInit(res.xwc, addr res.x509.vtable) - sslEngineSetX509(res.ccontext.eng, addr res.xwc.vtable) + sslEngineSetX509(res.ccontext.eng, + X509ClassPointerConst(addr res.xwc.vtable)) else: when trustAnchors is TrustAnchorStore: res.trustAnchors = trustAnchors @@ -611,7 +612,8 @@ proc newTLSServerAsyncStream*(rsource: AsyncStreamReader, uint16(maxVersion)) if not isNil(cache): - sslServerSetCache(res.scontext, addr cache.context.vtable) + sslServerSetCache( + res.scontext, SslSessionCacheClassPointerConst(addr cache.context.vtable)) if TLSFlags.EnforceServerPref in flags: sslEngineAddFlags(res.scontext.eng, OPT_ENFORCE_SERVER_PREFERENCES) From c44406594ff4375649f35f48f79dd6a0963bdf3c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 7 Jun 2024 12:05:15 +0200 Subject: [PATCH 143/146] fix results import --- tests/testfut.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testfut.nim b/tests/testfut.nim index 973743911..8d9fa5832 100644 --- a/tests/testfut.nim +++ b/tests/testfut.nim @@ -6,7 +6,7 @@ # Apache License, version 2.0, (LICENSE-APACHEv2) # MIT license (LICENSE-MIT) import unittest2 -import stew/results +import results import ../chronos, ../chronos/unittest2/asynctests {.used.} From 7630f394718ebcdb8577e36faacd78cb7a0b7dd6 Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Mon, 10 Jun 2024 05:18:42 -0300 Subject: [PATCH 144/146] Fixes compilation issues in v3 compatibility mode (`-d:chronosHandleException`) (#545) * add missing calls to await * add test run in v3 compatibility * fix semantics for chronosHandleException so it does not override local raises/handleException annotations * distinguish between explicit override and default setting; fix test * re-enable wrongly disabled check * make implementation simpler/clearer * update docs * reflow long line * word swap --- chronos.nimble | 8 +++++++ chronos/internal/asyncmacro.nim | 8 ++++++- chronos/transports/datagram.nim | 2 +- chronos/transports/stream.nim | 2 +- docs/src/error_handling.md | 39 ++++++++++++++++++++++++--------- tests/testmacro.nim | 15 +++++++++++++ 6 files changed, 61 insertions(+), 13 deletions(-) diff --git a/chronos.nimble b/chronos.nimble index 490a0861c..e8334ceb9 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -60,6 +60,14 @@ task test, "Run all tests": run args & " --mm:refc", "tests/testall" run args, "tests/testall" +task test_v3_compat, "Run all tests in v3 compatibility mode": + for args in testArguments: + if (NimMajor, NimMinor) > (1, 6): + # First run tests with `refc` memory manager. + run args & " --mm:refc -d:chronosHandleException", "tests/testall" + + run args & " -d:chronosHandleException", "tests/testall" + task test_libbacktrace, "test with libbacktrace": if platform != "x86": let allArgs = @[ diff --git a/chronos/internal/asyncmacro.nim b/chronos/internal/asyncmacro.nim index 4ece9f08b..e416e1e8b 100644 --- a/chronos/internal/asyncmacro.nim +++ b/chronos/internal/asyncmacro.nim @@ -219,12 +219,14 @@ proc decodeParams(params: NimNode): AsyncParams = var raw = false raises: NimNode = nil - handleException = chronosHandleException + handleException = false + hasLocalAnnotations = false for param in params: param.expectKind(nnkExprColonExpr) if param[0].eqIdent("raises"): + hasLocalAnnotations = true param[1].expectKind(nnkBracket) if param[1].len == 0: raises = makeNoRaises() @@ -236,10 +238,14 @@ proc decodeParams(params: NimNode): AsyncParams = # boolVal doesn't work in untyped macros it seems.. raw = param[1].eqIdent("true") elif param[0].eqIdent("handleException"): + hasLocalAnnotations = true handleException = param[1].eqIdent("true") else: warning("Unrecognised async parameter: " & repr(param[0]), param) + if not hasLocalAnnotations: + handleException = chronosHandleException + (raw, raises, handleException) proc isEmpty(n: NimNode): bool {.compileTime.} = diff --git a/chronos/transports/datagram.nim b/chronos/transports/datagram.nim index fdb406ba4..1423d7617 100644 --- a/chronos/transports/datagram.nim +++ b/chronos/transports/datagram.nim @@ -720,7 +720,7 @@ proc newDatagramTransportCommon(cbproc: UnsafeDatagramCallback, proc wrap(transp: DatagramTransport, remote: TransportAddress) {.async: (raises: []).} = try: - cbproc(transp, remote) + await cbproc(transp, remote) except CatchableError as exc: raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg diff --git a/chronos/transports/stream.nim b/chronos/transports/stream.nim index 999254307..391ff0a76 100644 --- a/chronos/transports/stream.nim +++ b/chronos/transports/stream.nim @@ -2197,7 +2197,7 @@ proc createStreamServer*(host: TransportAddress, proc wrap(server: StreamServer, client: StreamTransport) {.async: (raises: []).} = try: - cbproc(server, client) + await cbproc(server, client) except CatchableError as exc: raiseAssert "Unexpected exception from stream server cbproc: " & exc.msg diff --git a/docs/src/error_handling.md b/docs/src/error_handling.md index 54c1236f3..2b03dc2ae 100644 --- a/docs/src/error_handling.md +++ b/docs/src/error_handling.md @@ -110,7 +110,7 @@ sometimes lead to compile errors around forward declarations, methods and closures as Nim conservatively asssumes that any `Exception` might be raised from those. -Make sure to excplicitly annotate these with `{.raises.}`: +Make sure to explicitly annotate these with `{.raises.}`: ```nim # Forward declarations need to explicitly include a raises list: @@ -124,11 +124,12 @@ proc myfunction() = let closure: MyClosure = myfunction ``` +## Compatibility modes -For compatibility, `async` functions can be instructed to handle `Exception` as -well, specifying `handleException: true`. `Exception` that is not a `Defect` and -not a `CatchableError` will then be caught and remapped to -`AsyncExceptionError`: +**Individual functions.** For compatibility, `async` functions can be instructed +to handle `Exception` as well, specifying `handleException: true`. Any +`Exception` that is not a `Defect` and not a `CatchableError` will then be +caught and remapped to `AsyncExceptionError`: ```nim proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionError]).} = @@ -136,14 +137,32 @@ proc raiseException() {.async: (handleException: true, raises: [AsyncExceptionEr proc callRaiseException() {.async: (raises: []).} = try: - raiseException() + await raiseException() except AsyncExceptionError as exc: # The original Exception is available from the `parent` field echo exc.parent.msg ``` -This mode can be enabled globally with `-d:chronosHandleException` as a help -when porting code to `chronos` but should generally be avoided as global -configuration settings may interfere with libraries that use `chronos` leading -to unexpected behavior. +**Global flag.** This mode can be enabled globally with +`-d:chronosHandleException` as a help when porting code to `chronos`. The +behavior in this case will be that: +1. old-style functions annotated with plain `async` will behave as if they had + been annotated with `async: (handleException: true)`. + + This is functionally equivalent to + `async: (handleException: true, raises: [CatchableError])` and will, as + before, remap any `Exception` that is not `Defect` into + `AsyncExceptionError`, while also allowing any `CatchableError` (including + `AsyncExceptionError`) to get through without compilation errors. + +2. New-style functions with `async: (raises: [...])` annotations or their own + `handleException` annotations will not be affected. + +The rationale here is to allow one to incrementally introduce exception +annotations and get compiler feedback while not requiring that every bit of +legacy code is updated at once. + +This should be used sparingly and with care, however, as global configuration +settings may interfere with libraries that use `chronos` leading to unexpected +behavior. diff --git a/tests/testmacro.nim b/tests/testmacro.nim index 335e2eef8..ba1f69109 100644 --- a/tests/testmacro.nim +++ b/tests/testmacro.nim @@ -8,6 +8,7 @@ import std/[macros, strutils] import unittest2 import ../chronos +import ../chronos/config {.used.} @@ -586,6 +587,20 @@ suite "Exceptions tracking": waitFor(callCatchAll()) + test "Global handleException does not override local annotations": + when chronosHandleException: + proc unnanotated() {.async.} = raise (ref CatchableError)() + + checkNotCompiles: + proc annotated() {.async: (raises: [ValueError]).} = + raise (ref CatchableError)() + + checkNotCompiles: + proc noHandleException() {.async: (handleException: false).} = + raise (ref Exception)() + else: + skip() + test "Results compatibility": proc returnOk(): Future[Result[int, string]] {.async: (raises: []).} = ok(42) From 4ad38079dec8407c396ebaaf6ba60e5e94e3fce5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 20 Jun 2024 09:52:23 +0200 Subject: [PATCH 145/146] pretty-printer for `Duration` (#547) --- chronos/timer.nim | 79 ++++++++++++++++++++-------------------------- tests/testtime.nim | 3 ++ 2 files changed, 37 insertions(+), 45 deletions(-) diff --git a/chronos/timer.nim b/chronos/timer.nim index 29af20e79..1aabd6474 100644 --- a/chronos/timer.nim +++ b/chronos/timer.nim @@ -370,53 +370,42 @@ template add(a: var string, b: Base10Buf[uint64]) = for index in 0 ..< b.len: a.add(char(b.data[index])) -func `$`*(a: Duration): string {.inline.} = - ## Returns string representation of Duration ``a`` as nanoseconds value. - var res = "" - var v = a.value - - if v >= Week.value: - res.add(Base10.toBytes(uint64(v div Week.value))) - res.add('w') - v = v mod Week.value - if v == 0: return res - if v >= Day.value: - res.add(Base10.toBytes(uint64(v div Day.value))) - res.add('d') - v = v mod Day.value - if v == 0: return res - if v >= Hour.value: - res.add(Base10.toBytes(uint64(v div Hour.value))) - res.add('h') - v = v mod Hour.value - if v == 0: return res - if v >= Minute.value: - res.add(Base10.toBytes(uint64(v div Minute.value))) - res.add('m') - v = v mod Minute.value - if v == 0: return res - if v >= Second.value: - res.add(Base10.toBytes(uint64(v div Second.value))) - res.add('s') - v = v mod Second.value - if v == 0: return res - if v >= Millisecond.value: - res.add(Base10.toBytes(uint64(v div Millisecond.value))) - res.add('m') - res.add('s') - v = v mod Millisecond.value - if v == 0: return res - if v >= Microsecond.value: - res.add(Base10.toBytes(uint64(v div Microsecond.value))) - res.add('u') - res.add('s') - v = v mod Microsecond.value - if v == 0: return res - res.add(Base10.toBytes(uint64(v div Nanosecond.value))) - res.add('n') - res.add('s') +func toString*(a: timer.Duration, parts = int.high): string = + ## Returns a pretty string representation of Duration ``a`` - the + ## number of parts returned can be limited thus truncating the output to + ## an approximation that grows more precise as the duration becomes smaller + var + res = newStringOfCap(32) + v = a.nanoseconds() + parts = parts + + template f(n: string, T: Duration) = + if parts <= 0: + return res + + if v >= T.nanoseconds(): + res.add(Base10.toBytes(uint64(v div T.nanoseconds()))) + res.add(n) + v = v mod T.nanoseconds() + dec parts + if v == 0: + return res + + f("w", Week) + f("d", Day) + f("h", Hour) + f("m", Minute) + f("s", Second) + f("ms", Millisecond) + f("us", Microsecond) + f("ns", Nanosecond) + res +func `$`*(a: Duration): string {.inline.} = + ## Returns string representation of Duration ``a``. + a.toString() + func `$`*(a: Moment): string {.inline.} = ## Returns string representation of Moment ``a`` as nanoseconds value. var res = "" diff --git a/tests/testtime.nim b/tests/testtime.nim index 03c2318d3..118a602f6 100644 --- a/tests/testtime.nim +++ b/tests/testtime.nim @@ -89,6 +89,9 @@ suite "Asynchronous timers & steps test suite": $nanoseconds(1_000_000_900) == "1s900ns" $nanoseconds(1_800_700_000) == "1s800ms700us" $nanoseconds(1_800_000_600) == "1s800ms600ns" + nanoseconds(1_800_000_600).toString(0) == "" + nanoseconds(1_800_000_600).toString(1) == "1s" + nanoseconds(1_800_000_600).toString(2) == "1s800ms" test "Asynchronous steps test": var fut1 = stepsAsync(1) From 13d28a5b710c414be17bfe36ca25bf34771875cc Mon Sep 17 00:00:00 2001 From: Miran Date: Wed, 3 Jul 2024 12:57:58 +0200 Subject: [PATCH 146/146] update ci.yml and be more explicit in .nimble (#549) --- .github/workflows/ci.yml | 22 +++++++++++++++------- .github/workflows/doc.yml | 4 ++-- chronos.nimble | 12 ++++++------ 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cab855580..81d6ccadb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,23 +22,29 @@ jobs: cpu: i386 - os: macos cpu: amd64 + - os: macos + cpu: arm64 - os: windows cpu: amd64 - #- os: windows - #cpu: i386 branch: [version-1-6, version-2-0, devel] include: - target: os: linux - builder: ubuntu-20.04 + builder: ubuntu-latest + shell: bash + - target: + os: macos + cpu: amd64 + builder: macos-13 shell: bash - target: os: macos - builder: macos-12 + cpu: arm64 + builder: macos-latest shell: bash - target: os: windows - builder: windows-2019 + builder: windows-latest shell: msys2 {0} defaults: @@ -50,7 +56,7 @@ jobs: continue-on-error: ${{ matrix.branch == 'devel' }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Enable debug verbosity if: runner.debug == '1' @@ -102,7 +108,7 @@ jobs: - name: Restore Nim DLLs dependencies (Windows) from cache if: runner.os == 'Windows' id: windows-dlls-cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: external/dlls-${{ matrix.target.cpu }} key: 'dlls-${{ matrix.target.cpu }}' @@ -126,6 +132,8 @@ jobs: run: | if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then PLATFORM=x64 + elif [[ '${{ matrix.target.cpu }}' == 'arm64' ]]; then + PLATFORM=arm64 else PLATFORM=x86 fi diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 5d4022c88..5fc0d524e 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -15,7 +15,7 @@ jobs: continue-on-error: true steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: true - uses: actions-rs/install@v0.1 @@ -41,7 +41,7 @@ jobs: - uses: jiro4989/setup-nim-action@v1 with: - nim-version: '1.6.16' + nim-version: '1.6.20' - name: Generate doc run: | diff --git a/chronos.nimble b/chronos.nimble index e8334ceb9..a6ae7491c 100644 --- a/chronos.nimble +++ b/chronos.nimble @@ -55,10 +55,10 @@ task examples, "Build examples": task test, "Run all tests": for args in testArguments: + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" if (NimMajor, NimMinor) > (1, 6): - # First run tests with `refc` memory manager. - run args & " --mm:refc", "tests/testall" - run args, "tests/testall" + run args & " --mm:orc", "tests/testall" task test_v3_compat, "Run all tests in v3 compatibility mode": for args in testArguments: @@ -75,10 +75,10 @@ task test_libbacktrace, "test with libbacktrace": ] for args in allArgs: + # First run tests with `refc` memory manager. + run args & " --mm:refc", "tests/testall" if (NimMajor, NimMinor) > (1, 6): - # First run tests with `refc` memory manager. - run args & " --mm:refc", "tests/testall" - run args, "tests/testall" + run args & " --mm:orc", "tests/testall" task docs, "Generate API documentation": exec "mdbook build docs"