aboutsummaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/CMakeLists.txt44
-rw-r--r--doc/curve25519-sha256@libssh.org.txt12
-rwxr-xr-xdoc/doc_coverage.sh52
-rw-r--r--doc/forwarding.dox10
-rw-r--r--doc/guided_tour.dox4
-rw-r--r--doc/introduction.dox4
-rw-r--r--doc/mainpage.dox16
-rw-r--r--doc/pkcs11.dox30
-rw-r--r--doc/sftp.dox114
-rw-r--r--doc/sftp_aio.dox705
-rw-r--r--doc/shell.dox46
11 files changed, 923 insertions, 114 deletions
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 259424b4..c8b5a7e5 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -14,11 +14,13 @@ if (DOXYGEN_FOUND)
set(DOXYGEN_OPTIMIZE_OUTPUT_FOR_C YES)
set(DOXYGEN_MARKDOWN_SUPPORT YES)
set(DOXYGEN_FULL_PATH_NAMES NO)
+ set(DOXYGEN_GENERATE_TAGFILE "tags.xml")
set(DOXYGEN_PREDEFINED DOXYGEN
WITH_SERVER
WITH_SFTP
- PRINTF_ATTRIBUTE(x,y))
+ PRINTF_ATTRIBUTE\(x,y\))
+ set(DOXYGEN_DOT_GRAPH_MAX_NODES 100)
set(DOXYGEN_EXCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/that_style)
set(DOXYGEN_HTML_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/that_style/header.html)
@@ -34,6 +36,44 @@ if (DOXYGEN_FOUND)
${CMAKE_CURRENT_SOURCE_DIR}/that_style/img/folderclosed.svg
${CMAKE_CURRENT_SOURCE_DIR}/that_style/img/folderopen.svg
${CMAKE_CURRENT_SOURCE_DIR}/that_style/js/striped_bg.js)
+ set(DOXYGEN_EXCLUDE_PATTERNS */src/external/* fe25519.h ge25519.h sc25519.h
+ blf.h)
+ set(DOXYGEN_EXCLUDE_SYMBOLS_STRUCTS chacha20_poly1305_keysched,dh_ctx,dh_ctx,dh_keypair,error_struct,
+ packet_struct,pem_get_password_struct,ssh_tokens_st,
+ sftp_attributes_struct,sftp_client_message_struct,
+ sftp_dir_struct,sftp_ext_struct,sftp_file_struct,sftp_message_struct,
+ sftp_packet_struct,sftp_request_queue_struct,sftp_session_struct,
+ sftp_status_message_struct,ssh_agent_state_struct,
+ ssh_agent_struct,ssh_auth_auto_state_struct,ssh_auth_request,
+ ssh_bind_config_keyword_table_s,ssh_bind_config_match_keyword_table_s,
+ ssh_bind_struct,ssh_buffer_struct,ssh_channel_callbacks_struct,
+ ssh_channel_read_termination_struct,ssh_channel_request,
+ ssh_channel_request_open,ssh_channel_struct,ssh_cipher_struct,
+ ssh_common_struct,ssh_config_keyword_table_s,
+ ssh_config_match_keyword_table_s,ssh_connector_struct,
+ ssh_counter_struct,ssh_crypto_struct,ssh_event_fd_wrapper,
+ ssh_event_struct,ssh_global_request,ssh_gssapi_struct,ssh_hmac_struct,
+ ssh_iterator,ssh_kbdint_struct,ssh_kex_struct,ssh_key_struct,
+ ssh_knownhosts_entry,ssh_list,ssh_mac_ctx_struct,ssh_message_struct,
+ ssh_packet_callbacks_struct,ssh_packet_header,ssh_poll_ctx_struct,
+ ssh_poll_handle_struct,ssh_pollfd_struct,ssh_private_key_struct,
+ ssh_public_key_struct,ssh_scp_struct,ssh_service_request,
+ ssh_session_struct,ssh_signature_struct,ssh_socket_struct,
+ ssh_string_struct,ssh_threads_callbacks_struct,ssh_timestamp,)
+ set(DOXYGEN_EXCLUDE_SYMBOLS_MACRO SSH_FXP*,SSH_SOCKET*,SERVERBANNER,SOCKOPT_TYPE_ARG4,SSH_FILEXFER*,
+ SSH_FXF*,SSH_S_*,SFTP_*,NSS_BUFLEN_PASSWD,CLOCK,MAX_LINE_SIZE,
+ PKCS11_URI,KNOWNHOSTS_MAXTYPES,)
+ set(DOXYGEN_EXCLUDE_SYMBOLS_TYPEDEFS sftp_attributes,sftp_client_message,sftp_dir,sftp_ext,sftp_file,
+ sftp_message,sftp_packet,sftp_request_queue,sftp_session,
+ sftp_status_message,sftp_statvfs_t,poll_fn,ssh_callback_int,
+ ssh_callback_data,ssh_callback_int_int,ssh_message_callback,
+ ssh_channel_callback_int,ssh_channel_callback_data,ssh_callbacks,
+ ssh_gssapi_select_oid_callback,ssh_gssapi_accept_sec_ctx_callback,
+ ssh_gssapi_verify_mic_callback,ssh_server_callbacks,ssh_socket_callbacks,
+ ssh_packet_callbacks,ssh_channel_callbacks,ssh_bind,ssh_bind_callbacks,)
+ set(DOXYGEN_EXCLUDE_SYMBOLS ${DOXYGEN_EXCLUDE_SYMBOLS_STRUCTS}
+ ${DOXYGEN_EXCLUDE_SYMBOLS_MACRO}
+ ${DOXYGEN_EXCLUDE_SYMBOLS_TYPEDEFS})
# This updates the Doxyfile if we do changes here
set(_doxyfile_template "${CMAKE_BINARY_DIR}/CMakeDoxyfile.in")
@@ -44,6 +84,8 @@ if (DOXYGEN_FOUND)
${CMAKE_SOURCE_DIR}/include/libssh
${CMAKE_SOURCE_DIR}/src
${CMAKE_CURRENT_SOURCE_DIR})
+
+ add_custom_target(docs_coverage COMMAND ${CMAKE_SOURCE_DIR}/doc/doc_coverage.sh ${CMAKE_BINARY_DIR})
endif() # DOXYGEN_FOUND
endif() # CMAKE_VERSION
diff --git a/doc/curve25519-sha256@libssh.org.txt b/doc/curve25519-sha256@libssh.org.txt
index 75541902..04d88575 100644
--- a/doc/curve25519-sha256@libssh.org.txt
+++ b/doc/curve25519-sha256@libssh.org.txt
@@ -3,13 +3,13 @@ curve25519-sha256@libssh.org.txt Aris Adamantiadis <aris@badcode.be>
1. Introduction
-This document describes the key exchange methode curve25519-sha256@libssh.org
+This document describes the key exchange method curve25519-sha256@libssh.org
for SSH version 2 protocol. It is provided as an alternative to the existing
key exchange mechanisms based on either Diffie-Hellman or Elliptic Curve Diffie-
Hellman [RFC5656].
The reason is the following : During summer of 2013, revelations from ex-
consultant at NSA Edward Snowden gave proof that NSA willingly inserts backdoors
-into softwares, hardware components and published standards. While it is still
+into software, hardware components and published standards. While it is still
believed that the mathematics behind ECC cryptography are still sound and solid,
some people (including Bruce Schneier [SCHNEIER]), showed their lack of confidence
in NIST-published curves such as nistp256, nistp384, nistp521, for which constant
@@ -42,8 +42,8 @@ The following is an overview of the key exchange process:
Client Server
------ ------
Generate ephemeral key pair.
-SSH_MSG_KEX_ECDH_INIT -------->
- Verify that client public key
+SSH_MSG_KEX_ECDH_INIT -------->
+ Verify that client public key
length is 32 bytes.
Generate ephemeral key pair.
Compute shared secret.
@@ -55,7 +55,7 @@ Compute shared secret.
Generate exchange hash.
Verify server's signature.
-* Optional but strongly recommanded as this protects against MITM attacks.
+* Optional but strongly recommended as this protects against MITM attacks.
This is implemented using the same messages as described in RFC5656 chapter 4
@@ -109,7 +109,7 @@ This number is calculated using the following procedure:
side's public key and the local private key scalar.
The whole 32 bytes of the number X are then converted into a big integer k.
- This conversion follows the network byte order. This step differs from
+ This conversion follows the network byte order. This step differs from
RFC5656.
[RFC5656] https://tools.ietf.org/html/rfc5656
diff --git a/doc/doc_coverage.sh b/doc/doc_coverage.sh
new file mode 100755
index 00000000..2f653275
--- /dev/null
+++ b/doc/doc_coverage.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+################################################################################
+# .doc_coverage.sh #
+# Script to detect overall documentation coverage of libssh. The script uses #
+# doxygen to generate the documentation then parses it's output. #
+# #
+# maintainer: Norbert Pocs <npocs@redhat.com> #
+################################################################################
+BUILD_DIR="$1"
+DOXYFILE_PATH="$BUILD_DIR/doc/Doxyfile.docs"
+INDEX_XML_PATH="$BUILD_DIR/doc/xml/index.xml"
+# filters
+F_EXCLUDE_FILES=' wrapper.h legacy.h crypto.h priv.h chacha.h curve25519.h '
+F_UNDOC_FUNC='(function).*is not documented'
+F_FUNC='kind="function"'
+F_HEADERS='libssh_8h_|group__libssh__'
+F_CUT_BEFORE='.*<name>'
+F_CUT_AFTER='<\/name><\/member>'
+# Doxygen options
+O_QUIET='QUIET=YES'
+O_GEN_XML='GENERATE_XML=YES'
+
+# check if build dir given
+if [ $# -eq 0 ]; then
+ echo "Please provide the build directory e.g.: ./build"
+ exit 255
+fi
+
+# modify doxyfile to our needs:
+# QUIET - less output
+# GENERATE_XML - xml needed to inspect all the functions
+# (note: the options are needed to be on separate lines)
+# We want to exclude irrelevant files
+MOD_DOXYFILE=$(cat "$DOXYFILE_PATH"; echo "$O_QUIET"; echo "$O_GEN_XML")
+MOD_DOXYFILE=${MOD_DOXYFILE//EXCLUDE_PATTERNS.*=/EXCLUDE_PATTERNS=$F_EXCLUDE_FILES/g}
+
+# call doxygen to get the warning messages
+# and also generate the xml for inspection
+DOXY_WARNINGS=$(echo "$MOD_DOXYFILE" | doxygen - 2>&1)
+
+# get the number of undocumented functions
+UNDOC_FUNC=$(echo "$DOXY_WARNINGS" | grep -cE "$F_UNDOC_FUNC")
+
+# filter out the lines consisting of functions of our interest
+FUNC_LINES=$(grep "$F_FUNC" "$INDEX_XML_PATH" | grep -E "$F_HEADERS")
+# cut the irrelevant information and leave just the function names
+ALL_FUNC=$(echo "$FUNC_LINES" | sed -e "s/$F_CUT_BEFORE//g" -e "s/$F_CUT_AFTER//")
+# remove duplicates and get the number of functions
+ALL_FUNC=$(echo "$ALL_FUNC" | sort - | uniq | wc -l)
+
+# percentage of the documented functions
+awk "BEGIN {printf \"Documentation coverage is %.2f%\n\", 100 - (${UNDOC_FUNC}/${ALL_FUNC}*100)}"
diff --git a/doc/forwarding.dox b/doc/forwarding.dox
index ca3b94f8..2b202b4d 100644
--- a/doc/forwarding.dox
+++ b/doc/forwarding.dox
@@ -165,6 +165,8 @@ int web_server(ssh_session session)
char buffer[256];
int nbytes, nwritten;
int port = 0;
+ char *peer_address = NULL;
+ int peer_port = 0;
char *helloworld = ""
"HTTP/1.1 200 OK\n"
"Content-Type: text/html\n"
@@ -187,7 +189,8 @@ int web_server(ssh_session session)
return rc;
}
- channel = ssh_channel_accept_forward(session, 60000, &port);
+ channel = ssh_channel_open_forward_port(session, 60000, &port,
+ &peer_address, &peer_port);
if (channel == NULL)
{
fprintf(stderr, "Error waiting for incoming connection: %s\n",
@@ -204,6 +207,7 @@ int web_server(ssh_session session)
ssh_get_error(session));
ssh_channel_send_eof(channel);
ssh_channel_free(channel);
+ ssh_string_free_char(peer_address);
return SSH_ERROR;
}
if (strncmp(buffer, "GET /", 5)) continue;
@@ -216,13 +220,15 @@ int web_server(ssh_session session)
ssh_get_error(session));
ssh_channel_send_eof(channel);
ssh_channel_free(channel);
+ ssh_string_free_char(peer_address);
return SSH_ERROR;
}
- printf("Sent answer\n");
+ printf("Sent answer to %s:%d\n", peer_address, peer_port);
}
ssh_channel_send_eof(channel);
ssh_channel_free(channel);
+ ssh_string_free_char(peer_address);
return SSH_OK;
}
@endcode
diff --git a/doc/guided_tour.dox b/doc/guided_tour.dox
index 69576f18..904a739e 100644
--- a/doc/guided_tour.dox
+++ b/doc/guided_tour.dox
@@ -5,7 +5,7 @@
A SSH session goes through the following steps:
- Before connecting to the server, you can set up if you wish one or other
- server public key authentication, i.e. DSA or RSA. You can choose
+ server public key authentication, i.e. RSA, ED25519 or ECDSA. You can choose
cryptographic algorithms you trust and compression algorithms if any. You
must of course set up the hostname.
@@ -15,7 +15,7 @@ A SSH session goes through the following steps:
file.
- The client must authenticate: the classical ways are password, or
- public keys (from dsa and rsa key-pairs generated by openssh).
+ public keys (from ecdsa, ed25519 and rsa key-pairs generated by openssh).
If a SSH agent is running, it is possible to use it.
- Now that the user has been authenticated, you must open one or several
diff --git a/doc/introduction.dox b/doc/introduction.dox
index f2f3d3dd..8d2aa1d5 100644
--- a/doc/introduction.dox
+++ b/doc/introduction.dox
@@ -44,6 +44,10 @@ Table of contents:
@subpage libssh_tutor_threads
+@subpage libssh_tutor_pkcs11
+
+@subpage libssh_tutor_sftp_aio
+
@subpage libssh_tutor_todo
*/
diff --git a/doc/mainpage.dox b/doc/mainpage.dox
index e40d8a15..04197603 100644
--- a/doc/mainpage.dox
+++ b/doc/mainpage.dox
@@ -20,7 +20,7 @@ the interesting functions as you go.
The libssh library provides:
- <strong>Key Exchange Methods</strong>: <i>curve25519-sha256, curve25519-sha256@libssh.org, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521</i>, diffie-hellman-group1-sha1, diffie-hellman-group14-sha1
- - <strong>Public Key Algorithms</strong>: ssh-ed25519, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, ecdsa-sha2-nistp521, ssh-rsa, rsa-sha2-512, rsa-sha2-256,ssh-dss
+ - <strong>Public Key Algorithms</strong>: ssh-ed25519, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, ecdsa-sha2-nistp521, ssh-rsa, rsa-sha2-512, rsa-sha2-256
- <strong>Ciphers</strong>: <i>aes256-ctr, aes192-ctr, aes128-ctr</i>, aes256-cbc (rijndael-cbc@lysator.liu.se), aes192-cbc, aes128-cbc, 3des-cbc, blowfish-cbc
- <strong>Compression Schemes</strong>: zlib, <i>zlib@openssh.com</i>, none
- <strong>MAC hashes</strong>: hmac-sha1, hmac-sha2-256, hmac-sha2-512, hmac-md5
@@ -38,7 +38,7 @@ The libssh library provides:
@section main-additional-features Additional Features
- Client <b>and</b> server support
- - SSHv2 and SSHv1 protocol support
+ - SSHv2 protocol support
- Supports <a href="https://test.libssh.org/" target="_blank">Linux, UNIX, BSD, Solaris, OS/2 and Windows</a>
- Automated test cases with nightly <a href="https://test.libssh.org/" target="_blank">tests</a>
- Event model based on poll(2), or a poll(2)-emulation.
@@ -149,7 +149,7 @@ The libssh Team
@subsection main-rfc-secsh Secure Shell (SSH)
-The following RFC documents described SSH-2 protcol as an Internet standard.
+The following RFC documents described SSH-2 protocol as an Internet standard.
- <a href="https://tools.ietf.org/html/rfc4250" target="_blank">RFC 4250</a>,
The Secure Shell (SSH) Protocol Assigned Numbers
@@ -213,15 +213,15 @@ It was later modified and expanded by the following RFCs.
Use of RSA Keys with SHA-256 and SHA-512 in the Secure Shell (SSH) Protocol
- <a href="https://tools.ietf.org/html/rfc8709" target="_blank">RFC 8709</a>,
Ed25519 and Ed448 Public Key Algorithms for the Secure Shell (SSH) Protocol
+ - <a href="https://tools.ietf.org/html/rfc8709" target="_blank">RFC 8731</a>,
+ Secure Shell (SSH) Key Exchange Method Using Curve25519 and Curve448
+ - <a href="https://tools.ietf.org/html/rfc9142" target="_blank">RFC 9142</a>,
+ Key Exchange (KEX) Method Updates and Recommendations for Secure Shell (SSH)
There are also drafts that are being currently developed and followed.
- - <a href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-kex-sha2-10" target="_blank">draft-ietf-curdle-ssh-kex-sha2-10</a>
- Key Exchange (KEX) Method Updates and Recommendations for Secure Shell (SSH)
- - <a href="https://tools.ietf.org/html/draft-miller-ssh-agent-03" target="_blank">draft-miller-ssh-agent-03</a>
+ - <a href="https://tools.ietf.org/html/draft-miller-ssh-agent-03" target="_blank">draft-miller-ssh-agent-08</a>
SSH Agent Protocol
- - <a href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-curves-12" target="_blank">draft-ietf-curdle-ssh-curves-12</a>
- Secure Shell (SSH) Key Exchange Method using Curve25519 and Curve448
Interesting cryptography documents:
diff --git a/doc/pkcs11.dox b/doc/pkcs11.dox
index 0bdfc6dc..c2732a81 100644
--- a/doc/pkcs11.dox
+++ b/doc/pkcs11.dox
@@ -9,11 +9,11 @@ objects stored on the tokens can be uniquely identified is called PKCS #11 URI
(Uniform Resource Identifier) and is defined in RFC 7512
(https://tools.ietf.org/html/rfc7512).
-Pre-requisites:
+# Pre-requisites (OpenSSL < 3.0):
-OpenSSL defines an abstract layer called the "engine" to achieve cryptographic
-acceleration. The engine_pkcs11 module acts like an interface between the PKCS #11
-modules and the OpenSSL engine.
+OpenSSL 1.x defines an abstract layer called the "engine" to achieve
+cryptographic acceleration. The engine_pkcs11 module acts like an interface
+between the PKCS #11 modules and the OpenSSL application.
To build and use libssh with PKCS #11 support:
1. Enable the cmake option: $ cmake -DWITH_PKCS11_URI=ON
@@ -21,6 +21,20 @@ To build and use libssh with PKCS #11 support:
3. Install and configure engine_pkcs11 (https://github.com/OpenSC/libp11).
4. Plug in a working smart card or configure softhsm (https://www.opendnssec.org/softhsm).
+# Pre-requisites (OpenSSL 3.0.8+)
+
+The OpenSSL 3.0 is deprecating usage of low-level engines in favor of high-level
+"providers" to provide alternative implementation of cryptographic operations
+or acceleration.
+
+To build and use libssh with PKCS #11 support using OpenSSL providers:
+1. Install and configure pkcs11 provider (https://github.com/latchset/pkcs11-provider).
+2. Enable the cmake options: $ cmake -DWITH_PKCS11_URI=ON -DWITH_PKCS11_PROVIDER=ON
+3. Build with OpenSSL.
+4. Plug in a working smart card or configure softhsm (https://www.opendnssec.org/softhsm).
+
+# New API functions
+
The functions ssh_pki_import_pubkey_file() and ssh_pki_import_privkey_file() that
import the public and private keys from files respectively are now modified to support
PKCS #11 URIs. These functions automatically detect if the provided filename is a file path
@@ -31,7 +45,7 @@ corresponding to the PKCS #11 URI are loaded from the PKCS #11 device.
If you wish to authenticate using public keys on your own, follow the steps mentioned under
"Authentication with public keys" in Chapter 2 - A deeper insight into authentication.
-The function pki_uri_import() is used to populate the public/private ssh_key from the
+The function pki_uri_import() is used to populate the public/private ssh_key from the
engine with PKCS #11 URIs as the look up.
Here is a minimalistic example of public key authentication using PKCS #11 URIs:
@@ -64,4 +78,10 @@ We recommend the users to provide a specific PKCS #11 URI so that it matches onl
If the engine discovers multiple slots that could potentially contain the private keys referenced
by the provided PKCS #11 URI, the engine will not try to authenticate.
+For testing, the SoftHSM PKCS#11 library is used. But it has some issues with
+OpenSSL initialization/cleanup when used with OpenSSL 3.0 so we are using it
+indirectly through a p11-kit remoting as described in the following article:
+
+https://p11-glue.github.io/p11-glue/p11-kit/manual/remoting.html
+
*/
diff --git a/doc/sftp.dox b/doc/sftp.dox
index 1f99cfdf..4c176a4b 100644
--- a/doc/sftp.dox
+++ b/doc/sftp.dox
@@ -139,7 +139,7 @@ Unlike its equivalent in the SCP subsystem, this function does NOT change the
current directory to the newly created subdirectory.
-@subsection sftp_write Copying a file to the remote computer
+@subsection sftp_write Writing to a file on the remote computer
You handle the contents of a remote file just like you would do with a
local file: you open the file in a given mode, move the file pointer in it,
@@ -203,16 +203,14 @@ int sftp_helloworld(ssh_session session, sftp_session sftp)
@subsection sftp_read Reading a file from the remote computer
-The nice thing with reading a file over the network through SFTP is that it
-can be done both in a synchronous way or an asynchronous way. If you read the file
-asynchronously, your program can do something else while it waits for the
-results to come.
-
-Synchronous read is done with sftp_read().
+A synchronous read from a remote file is done using sftp_read(). This
+section describes how to download a remote file using sftp_read(). The
+next section will discuss more about synchronous/asynchronous read/write
+operations using libssh sftp API.
Files are normally transferred in chunks. A good chunk size is 16 KB. The following
example transfers the remote file "/etc/profile" in 16 KB chunks. For each chunk we
-request, sftp_read blocks till the data has been received:
+request, sftp_read() blocks till the data has been received:
@code
// Good chunk size
@@ -273,87 +271,39 @@ int sftp_read_sync(ssh_session session, sftp_session sftp)
}
@endcode
-Asynchronous read is done in two steps, first sftp_async_read_begin(), which
-returns a "request handle", and then sftp_async_read(), which uses that request handle.
-If the file has been opened in nonblocking mode, then sftp_async_read()
-might return SSH_AGAIN, which means that the request hasn't completed yet
-and that the function should be called again later on. Otherwise,
-sftp_async_read() waits for the data to come. To open a file in nonblocking mode,
-call sftp_file_set_nonblocking() right after you opened it. Default is blocking mode.
+@subsection sftp_aio Performing an asynchronous read/write on a file on the remote computer
-The example below reads a very big file in asynchronous, nonblocking, mode. Each
-time the data is not ready yet, a counter is incremented.
+sftp_read() performs a "synchronous" read operation on a remote file.
+This means that sftp_read() will first request the server to read some
+data from the remote file and then would wait until the server response
+containing data to read (or an error) arrives at the client side.
-@code
-// Good chunk size
-#define MAX_XFER_BUF_SIZE 16384
+sftp_write() performs a "synchronous" write operation on a remote file.
+This means that sftp_write() will first request the server to write some
+data to the remote file and then would wait until the server response
+containing information about the status of the write operation arrives at the
+client side.
-int sftp_read_async(ssh_session session, sftp_session sftp)
-{
- int access_type;
- sftp_file file;
- char buffer[MAX_XFER_BUF_SIZE];
- int async_request;
- int nbytes;
- long counter;
- int rc;
+If your client program wants to do something other than waiting for the
+response after requesting a read/write, the synchronous sftp_read() and
+sftp_write() can't be used. In such a case the "asynchronous" sftp aio API
+should be used.
- access_type = O_RDONLY;
- file = sftp_open(sftp, "some_very_big_file",
- access_type, 0);
- if (file == NULL) {
- fprintf(stderr, "Can't open file for reading: %s\n",
- ssh_get_error(session));
- return SSH_ERROR;
- }
- sftp_file_set_nonblocking(file);
-
- async_request = sftp_async_read_begin(file, sizeof(buffer));
- counter = 0L;
- usleep(10000);
- if (async_request >= 0) {
- nbytes = sftp_async_read(file, buffer, sizeof(buffer),
- async_request);
- } else {
- nbytes = -1;
- }
+Please go through @ref libssh_tutor_sftp_aio for a detailed description
+of the sftp aio API.
- while (nbytes > 0 || nbytes == SSH_AGAIN) {
- if (nbytes > 0) {
- write(1, buffer, nbytes);
- async_request = sftp_async_read_begin(file, sizeof(buffer));
- } else {
- counter++;
- }
- usleep(10000);
+The sftp aio API provides two categories of functions :
+ - sftp_aio_begin_*() : For requesting a read/write from the server.
+ - sftp_aio_wait_*() : For waiting for the response of a previously
+ issued read/write request from the server.
- if (async_request >= 0) {
- nbytes = sftp_async_read(file, buffer, sizeof(buffer),
- async_request);
- } else {
- nbytes = -1;
- }
- }
-
- if (nbytes < 0) {
- fprintf(stderr, "Error while reading file: %s\n",
- ssh_get_error(session));
- sftp_close(file);
- return SSH_ERROR;
- }
-
- printf("The counter has reached value: %ld\n", counter);
-
- rc = sftp_close(file);
- if (rc != SSH_OK) {
- fprintf(stderr, "Can't close the read file: %s\n",
- ssh_get_error(session));
- return rc;
- }
+Hence, the client program can call sftp_aio_begin_*() to request a read/write
+and then can perform any number of operations (other than waiting) before
+calling sftp_aio_wait_*() for waiting for the response of the previously
+issued request.
- return SSH_OK;
-}
-@endcode
+We call read/write operations performed in the manner described above as
+"asynchronous" read/write operations on a remote file.
@subsection sftp_ls Listing the contents of a directory
diff --git a/doc/sftp_aio.dox b/doc/sftp_aio.dox
new file mode 100644
index 00000000..9c26f5e1
--- /dev/null
+++ b/doc/sftp_aio.dox
@@ -0,0 +1,705 @@
+/**
+
+@page libssh_tutor_sftp_aio Chapter 10: The SFTP asynchronous I/O
+
+@section sftp_aio_api The SFTP asynchronous I/O
+
+NOTE : Please read @ref libssh_tutor_sftp before reading this page. The
+synchronous sftp_read() and sftp_write() have been described there.
+
+SFTP AIO stands for "SFTP Asynchronous Input/Output". This API contains
+functions which perform async read/write operations on remote files.
+
+File transfers performed using the asynchronous sftp aio API can be
+significantly faster than the file transfers performed using the synchronous
+sftp read/write API (see sftp_read() and sftp_write()).
+
+The sftp aio API functions are divided into two categories :
+ - sftp_aio_begin_*() [see sftp_aio_begin_read(), sftp_aio_begin_write()]:
+ These functions send a request for an i/o operation to the server and
+ provide the caller an sftp aio handle corresponding to the sent request.
+
+ - sftp_aio_wait_*() [see sftp_aio_wait_read(), sftp_aio_wait_write()]:
+ These functions wait for the server response corresponding to a previously
+ issued request. Which request ? the request corresponding to the sftp aio
+ handle supplied by the caller to these functions.
+
+Conceptually, you can think of the sftp aio handle as a request identifier.
+
+Technically, the sftp_aio_begin_*() functions dynamically allocate memory to
+store information about the i/o request they send and provide the caller a
+handle to this memory, we call this handle an sftp aio handle.
+
+sftp_aio_wait_*() functions use the information stored in that memory (handled
+by the caller supplied sftp aio handle) to identify a request, and then they
+wait for that request's response. These functions also release the memory
+handled by the caller supplied sftp aio handle (except when they return
+SSH_AGAIN).
+
+sftp_aio_free() can also be used to release the memory handled by an sftp aio
+handle but unlike the sftp_aio_wait_*() functions, it doesn't wait for a
+response. This should be used to release the memory corresponding to an sftp
+aio handle when some failure occurs. An example has been provided at the
+end of this page to show the usage of sftp_aio_free().
+
+To begin with, this tutorial will provide basic examples that describe the
+usage of sftp aio API to perform a single read/write operation.
+
+The later sections describe the usage of the sftp aio API to obtain faster file
+transfers as compared to the transfers performed using the synchronous sftp
+read/write API.
+
+On encountering an error, the sftp aio API functions set the sftp and ssh
+errors just like any other libssh sftp API function. These errors can be
+obtained using sftp_get_error(), ssh_get_error() and ssh_get_error_code().
+The code examples provided on this page ignore error handling for the sake of
+brevity.
+
+@subsection sftp_aio_read Using the sftp aio API for reading (a basic example)
+
+For performing an async read operation on a sftp file (see sftp_open()),
+the first step is to call sftp_aio_begin_read() to send a read request to the
+server. The caller is provided an sftp aio handle corresponding to the sent
+read request.
+
+The second step is to pass a pointer to this aio handle to
+sftp_aio_wait_read(), this function waits for the server response which
+indicates the success/failure of the read request. On success, the response
+indicates EOF or contains the data read from the sftp file.
+
+The following code example shows how a read operation can be performed
+on an sftp file using the sftp aio API.
+
+@code
+ssize_t read_chunk(sftp_file file, void *buf, size_t to_read)
+{
+ ssize_t bytes_requested, bytes_read;
+
+ // Variable to store an sftp aio handle
+ sftp_aio aio = NULL;
+
+ // Send a read request to the sftp server
+ bytes_requested = sftp_aio_begin_read(file, to_read, &aio);
+ if (bytes_requested == SSH_ERROR) {
+ // handle error
+ }
+
+ // Here its possible that (bytes_requested < to_read) as specified in
+ // the function documentation of sftp_aio_begin_read()
+
+ // Wait for the response of the read request corresponding to the
+ // sftp aio handle stored in the aio variable.
+ bytes_read = sftp_aio_wait_read(&aio, buf, to_read);
+ if (bytes_read == SSH_ERROR) {
+ // handle error
+ }
+
+ return bytes_read;
+}
+@endcode
+
+@subsection sftp_aio_write Using the sftp aio API for writing (a basic example)
+
+For performing an async write operation on a sftp file (see sftp_open()),
+the first step is to call sftp_aio_begin_write() to send a write request to
+the server. The caller is provided an sftp aio handle corresponding to the
+sent write request.
+
+The second step is to pass a pointer to this aio handle to
+sftp_aio_wait_write(), this function waits for the server response which
+indicates the success/failure of the write request.
+
+The following code example shows how a write operation can be performed on an
+sftp file using the sftp aio API.
+
+@code
+ssize_t write_chunk(sftp_file file, void *buf, size_t to_write)
+{
+ ssize_t bytes_requested, bytes_written;
+
+ // Variable to store an sftp aio handle
+ sftp_aio aio = NULL;
+
+ // Send a write request to the sftp server
+ bytes_requested = sftp_aio_begin_write(file, buf, to_write, &aio);
+ if (bytes_requested == SSH_ERROR) {
+ // handle error
+ }
+
+ // Here its possible that (bytes_requested < to_write) as specified in
+ // the function documentation of sftp_aio_begin_write()
+
+ // Wait for the response of the write request corresponding to
+ // the sftp aio handle stored in the aio variable.
+ bytes_written = sftp_aio_wait_write(&aio);
+ if (bytes_written == SSH_ERROR) {
+ // handle error
+ }
+
+ return bytes_written;
+}
+@endcode
+
+@subsection sftp_aio_actual_use Using the sftp aio API to speed up a transfer
+
+The above examples were provided to introduce the sftp aio API.
+This is not how the sftp aio API is intended to be used, because the
+above usage offers no advantage over the synchronous sftp read/write API
+which does the same thing i.e issue a request and then immediately wait for
+its response.
+
+The facility that the sftp aio API provides is that the user can do
+anything between issuing a request and getting the corresponding response.
+Any number of operations can be performed after calling sftp_aio_begin_*()
+[which issues a request] and before calling sftp_aio_wait_*() [which waits
+for a response]
+
+The code can leverage this feature by calling sftp_aio_begin_*() multiple times
+to issue multiple requests before calling sftp_aio_wait_*() to wait for the
+response of an earlier issued request. This approach will keep a certain number
+of requests outstanding at the client side.
+
+After issuing those requests, while the client code does something else (for
+example waiting for an outstanding request's response, processing an obtained
+response, issuing another request or any other operation the client wants
+to perform), at the same time :
+
+ - Some of those outstanding requests may be travelling over the
+ network towards the server.
+
+ - Some of the outstanding requests may have reached the server and may
+ be queued for processing at the server side.
+
+ - Some of the outstanding requests may have been processed and the
+ corresponding responses may be travelling over the network towards the
+ client.
+
+ - Some of the responses corresponding to the outstanding requests may
+ have already reached the client side.
+
+Clearly in this case, operations that the client performs and operations
+involved in transfer/processing of a outstanding request can occur in
+parallel. Also, operations involved in transfer/processing of two or more
+outstanding requests may also occur in parallel (for example when one request
+travels to the server, another request's response may be incoming towards the
+client). Such kind of parallelism makes the overall transfer faster as compared
+to a transfer performed using the synchronous sftp read/write API.
+
+When the synchronous sftp read/write API is used to perform a transfer,
+a strict sequence is followed:
+
+ - The client issues a single read/write request.
+ - Then waits for its response.
+ - On obtaining the response, the client processes it.
+ - After the processing ends, the client issues the next read/write request.
+
+A file transfer performed in this manner would be slower than the case where
+multiple read/write requests are kept outstanding at the client side. Because
+here at any given time, operations related to transfer/processing of only one
+request/response pair occurs. This is in contrast to the multiple outstanding
+requests scenario where operations related to transfer/processing of multiple
+request/response pairs may occur at the same time.
+
+Although it's true that keeping multiple requests outstanding can speed up a
+transfer, those outstanding requests come at a cost of increased memory
+consumption both at the client side and the server side. Hence care must be
+taken to use a reasonable limit for the number of requests kept outstanding.
+
+The further sections provide code examples to show how uploads/downloads
+can be performed using the sftp aio API and the concept of outstanding requests
+discussed in this section. In those code examples, error handling has been
+ignored and at some places pseudo code has been used for the sake of brevity.
+
+The complete code for performing uploads/downloads using the sftp aio API,
+can be found at https://gitlab.com/libssh/libssh-mirror/-/tree/master.
+
+ - libssh benchmarks for uploads performed using the sftp aio API [See
+ tests/benchmarks/bench_sftp.c]
+ - libssh benchmarks for downloads performed using the sftp aio API. [See
+ tests/benchmarks/bench_sftp.c]
+ - libssh sftp ft API code for performing a local to remote transfer (upload).
+ [See src/sftp_ft.c]
+ - libssh sftp ft API code for performing a remote to local transfer
+ (download). [See src/sftp_ft.c]
+
+@subsection sftp_aio_cap Capping applied by the sftp aio API
+
+Before the code examples for uploads and downloads, its important
+to know about the capping applied by the sftp aio API.
+
+sftp_aio_begin_read() caps the number of bytes the caller can request
+to read from the remote file. That cap is the value of the max_read_length
+field of the sftp_limits_t returned by sftp_limits(). Say that cap is LIM
+and the caller passes x as the number of bytes to read to
+sftp_aio_begin_read(), then (assuming no error occurs) :
+
+ - if x <= LIM, then sftp_aio_begin_read() will request the server
+ to read x bytes from the remote file, and will return x.
+
+ - if x > LIM, then sftp_aio_begin_read() will request the server
+ to read LIM bytes from the remote file and will return LIM.
+
+Hence to request server to read x bytes (> LIM), the caller would have
+to call sftp_aio_begin_read() multiple times, typically in a loop and
+break out of the loop when the summation of return values of the multiple
+sftp_aio_begin_read() calls becomes equal to x.
+
+For the sake of simplicity, the code example for download in the upcoming
+section would always ask sftp_aio_begin_read() to read x <= LIM bytes,
+so that its return value is guaranteed to be x, unless an error occurs.
+
+Similarly, sftp_aio_begin_write() caps the number of bytes the caller
+can request to write to the remote file. That cap is the value of
+max_write_length field of the sftp_limits_t returned by sftp_limits().
+Say that cap is LIM and the caller passes x as the number of bytes to
+write to sftp_aio_begin_write(), then (assuming no error occurs) :
+
+ - if x <= LIM, then sftp_aio_begin_write() will request the server
+ to write x bytes to the remote file, and will return x.
+
+ - if x > LIM, then sftp_aio_begin_write() will request the server
+ to write LIM bytes to the remote file and will return LIM.
+
+Hence to request server to write x bytes (> LIM), the caller would have
+to call sftp_aio_begin_write() multiple times, typically in a loop and
+break out of the loop when the summation of return values of the multiple
+sftp_aio_begin_write() calls becomes equal to x.
+
+For the sake of simplicity, the code example for upload in the upcoming
+section would always ask sftp_aio_begin_write() to write x <= LIM bytes,
+so that its return value is guaranteed to be x, unless an error occurs.
+
+@subsection sftp_aio_download_example Performing a download using the sftp aio API
+
+Terminologies used in the following code snippets :
+
+ - sftp : The sftp_session opened using sftp_new() and initialised using
+ sftp_init()
+
+ - file : The sftp file handle of the remote file to download data
+ from. (See sftp_open())
+
+ - file_size : the size of the sftp file to download. This size can be obtained
+ by statting the remote file to download (e.g by using sftp_stat())
+
+ - We will need to maintain a queue which will be used to store the sftp aio
+ handles corresponding to the outstanding requests.
+
+First, we issue the read requests while ensuring that their count
+doesn't exceed a particular limit decided by us, and the number of bytes
+requested don't exceed the size of the file to download.
+
+@code
+sftp_aio aio = NULL;
+
+// Chunk size to use for the transfer
+size_t chunk_size;
+
+// For the limits structure that would be used
+// by the code to set the chunk size
+sftp_limits_t lim = NULL;
+
+// Max number of requests to keep outstanding at a time
+size_t in_flight_requests = 5;
+
+// Number of bytes for which requests have been sent
+size_t total_bytes_requested = 0;
+
+// Number of bytes which have been downloaded
+size_t bytes_downloaded = 0;
+
+// Buffer to use for the download
+char *buffer = NULL;
+
+// Helper variables
+size_t to_read;
+ssize_t bytes_requested;
+
+// Get the sftp limits
+lim = sftp_limits(sftp);
+if (lim == NULL) {
+ // handle error
+}
+
+// Set the chunk size for download = the max limit for reading
+// The reason for this has been given in the "Capping applied by
+// the sftp aio API" section (Its to make the code simpler)
+//
+// Assigning a size_t type variable a uint64_t type value here,
+// theoretically could cause an overflow, but practically
+// max_read_length would never exceed SIZE_MAX so its okay.
+chunk_size = lim->max_read_length;
+
+buffer = malloc(chunk_size);
+if (buffer == NULL) {
+ // handle error
+}
+
+... // Code to open the remote file (to download) using sftp_open().
+... // Code to stat the remote file's file size.
+... // Code to open the local file in which downloaded data is to be stored.
+... // Code to initialize the queue which will be used to store sftp aio
+ // handles.
+
+for (i = 0;
+ i < in_flight_requests && total_bytes_requested < file_size;
+ ++i) {
+ to_read = file_size - total_bytes_requested;
+ if (to_read > chunk_size) {
+ to_read = chunk_size;
+ }
+
+ // Issue a read request
+ bytes_requested = sftp_aio_begin_read(file, to_read, &aio);
+ if (bytes_requested == SSH_ERROR) {
+ // handle error
+ }
+
+ if ((size_t)bytes_requested < to_read) {
+ // Should not happen for this code, as the to_read is <=
+ // max limit for reading (chunk size), so there is no reason
+ // for sftp_aio_begin_read() to return a lesser value.
+ }
+
+ total_bytes_requested += (size_t)bytes_requested;
+
+ // Pseudo code
+ ENQUEUE aio in the queue;
+}
+
+@endcode
+
+At this point, at max in_flight_requests number of requests may be
+outstanding. Now we wait for the response corresponding to the earliest
+issued outstanding request.
+
+On getting that response, we issue another read request if there are
+still some bytes in the sftp file (to download) for which we haven't sent the
+read request. (This happens when total_bytes_requested < file_size)
+
+This issuing of another read request (under a condition) is done to
+keep the number of outstanding requests equal to the value of the
+in_flight_requests variable.
+
+This process has to be repeated for every remaining outstanding request.
+
+@code
+while (the queue is not empty) {
+ // Pseudo code
+ aio = DEQUEUE an sftp aio handle from the queue of sftp aio handles;
+
+ // Wait for the response of the request corresponding to the aio
+ bytes_read = sftp_aio_wait_read(&aio, buffer, chunk_size);
+ if (bytes_read == SSH_ERROR) {
+ //handle error
+ }
+
+ bytes_downloaded += bytes_read;
+ if (bytes_read != chunk_size && bytes_downloaded != file_size) {
+ // A short read encountered on the remote file before reaching EOF,
+ // short read before reaching EOF should never happen for the sftp aio
+ // API which respects the max limit for reading. This probably
+ // indicates a bad server.
+ }
+
+ // Pseudo code
+ WRITE bytes_read bytes from the buffer into the local file
+ in which downloaded data is to be stored ;
+
+ if (total_bytes_requested == file_size) {
+ // no need to issue more read requests
+ continue;
+ }
+
+ // else issue a read request
+ to_read = file_size - total_bytes_requested;
+ if (to_read > chunk_size) {
+ to_read = chunk_size;
+ }
+
+ bytes_requested = sftp_aio_begin_read(file, to_read, &aio);
+ if (bytes_requested == SSH_ERROR) {
+ // handle error
+ }
+
+ if ((size_t)bytes_requested < to_read) {
+ // Should not happen for this code, as the to_read is <=
+ // max limit for reading (chunk size), so there is no reason
+ // for sftp_aio_begin_read() to return a lesser value.
+ }
+
+ total_bytes_requested += bytes_requested;
+
+ // Pseudo code
+ ENQUEUE aio in the queue;
+}
+
+free(buffer);
+sftp_limits_free(lim);
+
+... // Code to destroy the queue which was used to store the sftp aio
+ // handles.
+@endcode
+
+After exiting the while (the queue is not empty) loop, the download
+would've been complete (assuming no error occurs).
+
+@subsection sftp_aio_upload_example Performing an upload using the sftp aio API
+
+Terminologies used in the following code snippets :
+
+ - sftp : The sftp_session opened using sftp_new() and initialised using
+ sftp_init()
+
+ - file : The sftp file handle of the remote file in which uploaded data
+ is to be stored. (See sftp_open())
+
+ - file_size : The size of the local file to upload. This size can be
+ obtained by statting the local file to upload (e.g by using stat())
+
+ - We will need maintain a queue which will be used to store the sftp aio
+ handles corresponding to the outstanding requests.
+
+First, we issue the write requests while ensuring that their count
+doesn't exceed a particular limit decided by us, and the number of bytes
+requested to write don't exceed the size of the file to upload.
+
+@code
+sftp_aio aio = NULL;
+
+// The chunk size to use for the transfer
+size_t chunk_size;
+
+// For the limits structure that would be used by
+// the code to set the chunk size
+sftp_limits_t lim = NULL;
+
+// Max number of requests to keep outstanding at a time
+size_t in_flight_requests = 5;
+
+// Total number of bytes for which write requests have been sent
+size_t total_bytes_requested = 0;
+
+// Buffer to use for the upload
+char *buffer = NULL;
+
+// Helper variables
+size_t to_write;
+ssize_t bytes_requested;
+
+// Get the sftp limits
+lim = sftp_limits(sftp);
+if (lim == NULL) {
+ // handle error
+}
+
+// Set the chunk size for upload = the max limit for writing.
+// The reason for this has been given in the "Capping applied by
+// the sftp aio API" section (Its to make the code simpler)
+//
+// Assigning a size_t type variable a uint64_t type value here,
+// theoretically could cause an overflow, but practically
+// max_write_length would never exceed SIZE_MAX so its okay.
+chunk_size = lim->max_write_length;
+
+buffer = malloc(chunk_size);
+if (buffer == NULL) {
+ // handle error
+}
+
+... // Code to open the local file (to upload) [e.g using open(), fopen()].
+... // Code to stat the local file's file size [e.g using stat()].
+... // Code to open the remote file in which uploaded data will be stored [see
+ // sftp_open()].
+... // Code to initialize the queue which will be used to store sftp aio
+ // handles.
+
+for (i = 0;
+ i < in_flight_requests && total_bytes_requested < file_size;
+ ++i) {
+ to_write = file_size - total_bytes_requested;
+ if (to_write > chunk_size) {
+ to_write = chunk_size;
+ }
+
+ // Pseudo code
+ READ to_write bytes from the local file (to upload) into the buffer;
+
+ bytes_requested = sftp_aio_begin_write(file, buffer, to_write, &aio);
+ if (bytes_requested == SSH_ERROR) {
+ // handle error
+ }
+
+ if ((size_t)bytes_requested < to_write) {
+ // Should not happen for this code, as the to_write is <=
+ // max limit for writing (chunk size), so there is no reason
+ // for sftp_aio_begin_write() to return a lesser value.
+ }
+
+ total_bytes_requested += (size_t)bytes_requested;
+
+ // Pseudo code
+ ENQUEUE aio in the queue;
+}
+
+@endcode
+
+At this point, at max in_flight_requests number of requests may be
+outstanding. Now we wait for the response corresponding to the earliest
+issued outstanding request.
+
+On getting that response, we issue another write request if there are
+still some bytes in the local file (to upload) for which we haven't sent
+the write request. (This happens when total_bytes_requested < file_size)
+
+This issuing of another write request (under a condition) is done to
+keep the number of outstanding requests equal to the value of the
+in_flight_requests variable.
+
+This process has to be repeated for every remaining outstanding request.
+
+@code
+while (the queue is not empty) {
+ // Pseudo code
+ aio = DEQUEUE an sftp aio handle from the queue of sftp aio handles;
+
+ // Wait for the response of the request corresponding to the aio
+ bytes_written = sftp_aio_wait_write(&aio);
+ if (bytes_written == SSH_ERROR) {
+ // handle error
+ }
+
+ // sftp_aio_wait_write() won't report a short write, so no need
+ // to check for a short write here.
+
+ if (total_bytes_requested == file_size) {
+ // no need to issue more write requests
+ continue;
+ }
+
+ // else issue a write request
+ to_write = file_size - total_bytes_requested;
+ if (to_write > chunk_size) {
+ to_write = chunk_size;
+ }
+
+ // Pseudo code
+ READ to_write bytes from the local file (to upload) into a buffer;
+
+ bytes_requested = sftp_aio_begin_write(file, buffer, to_write, &aio);
+ if (bytes_requested == SSH_ERROR) {
+ // handle error
+ }
+
+ if ((size_t)bytes_requested < to_write) {
+ // Should not happen for this code, as the to_write is <=
+ // max limit for writing (chunk size), so there is no reason
+ // for sftp_aio_begin_write() to return a lesser value.
+ }
+
+ total_bytes_requested += (size_t)bytes_requested;
+
+ // Pseudo code
+ ENQUEUE aio in the queue;
+}
+
+free(buffer);
+
+... // Code to destroy the queue which was used to store the sftp aio
+ // handles.
+@endcode
+
+After exiting the while (the queue is not empty) loop, the upload
+would've been complete (assuming no error occurs).
+
+@subsection sftp_aio_free Example showing the usage of sftp_aio_free()
+
+The purpose of sftp_aio_free() was discussed at the beginning of this page,
+the following code example shows how it can be used during cleanup.
+
+@code
+void print_sftp_error(sftp_session sftp)
+{
+ if (sftp == NULL) {
+ return;
+ }
+
+ fprintf(stderr, "sftp error : %d\n", sftp_get_error(sftp));
+ fprintf(stderr, "ssh error : %s\n", ssh_get_error(sftp->session));
+}
+
+// Returns 0 on success, -1 on error
+int write_strings(sftp_file file)
+{
+ const char * strings[] = {
+ "This is the first string",
+ "This is the second string",
+ "This is the third string",
+ "This is the fourth string"
+ };
+
+ size_t string_count = sizeof(strings) / sizeof(strings[0]);
+ size_t i;
+
+ sftp_session sftp = NULL;
+ sftp_aio aio = NULL;
+
+ int rc;
+
+ if (file == NULL) {
+ return -1;
+ }
+
+ ... // Code to initialize the queue which will be used to store sftp aio
+ // handles
+
+ sftp = file->sftp;
+ for (i = 0; i < string_count; ++i) {
+ rc = sftp_aio_begin_write(file,
+ strings[i],
+ strlen(strings[i]),
+ &aio);
+ if (rc == SSH_ERROR) {
+ print_sftp_error(sftp);
+ goto err;
+ }
+
+ // Pseudo code
+ ENQUEUE aio in the queue of sftp aio handles
+ }
+
+ for (i = 0; i < string_count; ++i) {
+ // Pseudo code
+ aio = DEQUEUE an sftp aio handle from the queue of sftp aio handles;
+
+ rc = sftp_aio_wait_write(&aio);
+ if (rc == SSH_ERROR) {
+ print_sftp_error(sftp);
+ goto err;
+ }
+ }
+
+
+ ... // Code to destroy the queue in which sftp aio handles were
+ // stored
+
+ return 0;
+
+err:
+
+ while (queue is not empty) {
+ // Pseudo code
+ aio = DEQUEUE an sftp aio handle from the queue of sftp aio handles;
+
+ sftp_aio_free(aio);
+ }
+
+ ... // Code to destroy the queue in which sftp aio handles were
+ // stored.
+
+ return -1;
+}
+
+@endcode
+
+*/
diff --git a/doc/shell.dox b/doc/shell.dox
index 0693bbcc..f51c489c 100644
--- a/doc/shell.dox
+++ b/doc/shell.dox
@@ -65,8 +65,17 @@ to as a "pty", for "pseudo-teletype". The remote processes won't see the
difference with a real text-oriented terminal.
If needed, you request the pty with the function ssh_channel_request_pty().
-Then you define its dimensions (number of rows and columns)
-with ssh_channel_change_pty_size().
+If you want define its dimensions (number of rows and columns),
+call ssh_channel_request_pty_size() instead. It's also possible to change
+the dimensions after creating the pty with ssh_channel_change_pty_size().
+
+These two functions configure the pty using the same terminal modes that
+stdin has. If stdin isn't a TTY, they use default modes that configure
+the pty with in canonical mode and e.g. preserving CR and LF characters.
+If you want to change the terminal modes used by the pty (e.g. to change
+CRLF handling), use ssh_channel_request_pty_size_modes(). This function
+accepts an additional "modes" buffer that is expected to contain encoded
+terminal modes according to RFC 4254 section 8.
Be your session interactive or not, the next step is to request a
shell with ssh_channel_request_shell().
@@ -320,18 +329,36 @@ int interactive_shell_session(ssh_session session, ssh_channel channel)
If your remote application is graphical, you can forward the X11 protocol to
your local computer.
-To do that, you first declare that you accept X11 connections with
-ssh_channel_accept_x11(). Then you create the forwarding tunnel for
-the X11 protocol with ssh_channel_request_x11().
+To do that, you first declare a callback to manage channel_open_request_x11_function.
+Then you create the forwarding tunnel for the X11 protocol with ssh_channel_request_x11().
The following code performs channel initialization and shell session
opening, and handles a parallel X11 connection:
@code
+#include <libssh/callbacks.h>
+
+ssh_channel x11channel = NULL;
+
+ssh_channel x11_open_request_callback(ssh_session session, const char *shost, int sport, void *userdata)
+{
+ x11channel = ssh_channel_new(session);
+ return x11channel;
+}
+
int interactive_shell_session(ssh_channel channel)
{
int rc;
- ssh_channel x11channel;
+
+ struct ssh_callbacks_struct cb =
+ {
+ .channel_open_request_x11_function = x11_open_request_callback,
+ .userdata = NULL
+ };
+
+ ssh_callbacks_init(&cb);
+ rc = ssh_set_callbacks(session, &cb);
+ if (rc != SSH_OK) return rc;
rc = ssh_channel_request_pty(channel);
if (rc != SSH_OK) return rc;
@@ -350,12 +377,15 @@ int interactive_shell_session(ssh_channel channel)
}
@endcode
-Don't forget to set the $DISPLAY environment variable on the remote
+Don't forget to check the $DISPLAY environment variable on the remote
side, or the remote applications won't try using the X11 tunnel:
@code
-$ export DISPLAY=:0
+$ echo $DISPLAY
+localhost:10.0
$ xclock &
@endcode
+See an implementation example at https://gitlab.com/libssh/libssh-mirror/-/tree/master/examples/ssh_X11_client.c for details.
+
*/