summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrank Ch. Eigler <fche@redhat.com>2019-11-04 16:33:09 -0500
committerMark Wielaard <mark@klomp.org>2019-11-22 23:26:21 +0100
commit3d7a08d5d37c3e96151d1be6e6e6eb0556c079c9 (patch)
treee0e521b088ff051993abb47e77f30414cf485729
parente27e30cae0468903473641efe3853c12d9294ac3 (diff)
debuginfod 3/3: client interruptability
For interactive clients such as gdb, interruptibility is important for usability during longer downloads. This patchset adds a download-progress callback function to the debuginfod client library, with which a caller app can interrupt a download as well as be notified of its quantitative progress.
-rw-r--r--debuginfod/ChangeLog15
-rw-r--r--debuginfod/debuginfod-client.c89
-rw-r--r--debuginfod/debuginfod-find.c33
-rw-r--r--debuginfod/debuginfod.cxx236
-rw-r--r--debuginfod/debuginfod.h17
-rw-r--r--debuginfod/libdebuginfod.map1
-rw-r--r--doc/Makefile.am2
-rw-r--r--doc/debuginfod-find.113
-rw-r--r--doc/debuginfod_find_debuginfo.355
-rw-r--r--doc/debuginfod_set_progressfn.31
-rw-r--r--tests/ChangeLog4
-rwxr-xr-xtests/run-debuginfod-find.sh8
12 files changed, 326 insertions, 148 deletions
diff --git a/debuginfod/ChangeLog b/debuginfod/ChangeLog
index b5679a2f..34713746 100644
--- a/debuginfod/ChangeLog
+++ b/debuginfod/ChangeLog
@@ -1,3 +1,18 @@
+2019-11-04 Frank Ch. Eigler <fche@redhat.com>
+
+ * debuginfo-client.c (debuginfod_set_progressfn): New function
+ for progress/interrupt callback.
+ (debuginfod_clean_cache, debuginfod_query_server): Call it.
+ * debuginfo.h: Declare it.
+ * debuginfod_set_progressfn.3, *_find_debuginfo.3: Document it.
+ * Makefile.am: Install it.
+ * libdebuginfod.map: Export it all under ELFUTILS_0.178 symversion.
+
+ * debuginfod-find.c: Add -v option to activate progress cb.
+ * debuginfod-find.1: Document it.
+ * debuginfod.cxx: Add $DEBUGINFOD_TEST_WEBAPI_SLEEP env var
+ to insert sleep in webapi callbacks, to help manual testing.
+
2019-10-28 Frank Ch. Eigler <fche@redhat.com>
* debuginfod.cxx: New file: debuginfod server.
diff --git a/debuginfod/debuginfod-client.c b/debuginfod/debuginfod-client.c
index 1aa9edac..64dd608a 100644
--- a/debuginfod/debuginfod-client.c
+++ b/debuginfod/debuginfod-client.c
@@ -99,6 +99,9 @@ static const char url_delim_char = ' ';
static const char *server_timeout_envvar = DEBUGINFOD_TIMEOUT_ENV_VAR;
static int server_timeout = 5;
+/* Progress/interrupt callback function. */
+static debuginfod_progressfn_t progressfn;
+
/* Data associated with a particular CURL easy handle. Passed to
the write callback. */
struct handle_data
@@ -229,8 +232,14 @@ debuginfod_clean_cache(char *cache_path, char *interval_path, char *max_unused_p
return -errno;
FTSENT *f;
+ long files = 0;
while ((f = fts_read(fts)) != NULL)
{
+ files++;
+ if (progressfn) /* inform/check progress callback */
+ if ((*progressfn) (files, 0))
+ break;
+
switch (f->fts_info)
{
case FTS_F:
@@ -263,13 +272,14 @@ debuginfod_clean_cache(char *cache_path, char *interval_path, char *max_unused_p
/* Query each of the server URLs found in $DEBUGINFOD_URLS for the file
with the specified build-id, type (debuginfo, executable or source)
and filename. filename may be NULL. If found, return a file
- descriptor for the target, otherwise return an error code. */
+ descriptor for the target, otherwise return an error code.
+*/
static int
debuginfod_query_server (const unsigned char *build_id,
- int build_id_len,
- const char *type,
- const char *filename,
- char **path)
+ int build_id_len,
+ const char *type,
+ const char *filename,
+ char **path)
{
char *urls_envvar;
char *server_urls;
@@ -486,10 +496,55 @@ debuginfod_query_server (const unsigned char *build_id,
/* Query servers in parallel. */
int still_running;
+ long loops = 0;
do
{
CURLMcode curl_res;
+ if (progressfn) /* inform/check progress callback */
+ {
+ loops ++;
+ long pa = loops; /* default params for progress callback */
+ long pb = 0;
+ if (target_handle) /* we've committed to a server; report its download progress */
+ {
+#ifdef CURLINFO_SIZE_DOWNLOAD_T
+ curl_off_t dl;
+ curl_res = curl_easy_getinfo(target_handle,
+ CURLINFO_SIZE_DOWNLOAD_T,
+ &dl);
+ if (curl_res == 0 && dl >= 0)
+ pa = (dl > LONG_MAX ? LONG_MAX : (long)dl);
+#else
+ double dl;
+ curl_res = curl_easy_getinfo(target_handle,
+ CURLINFO_SIZE_DOWNLOAD,
+ &dl);
+ if (curl_res == 0)
+ pa = (dl > LONG_MAX ? LONG_MAX : (long)dl);
+#endif
+
+#ifdef CURLINFO_CURLINFO_CONTENT_LENGTH_DOWNLOAD_T
+ curl_off_t cl;
+ curl_res = curl_easy_getinfo(target_handle,
+ CURLINFO_CONTENT_LENGTH_DOWNLOAD_T,
+ &cl);
+ if (curl_res == 0 && cl >= 0)
+ pb = (cl > LONG_MAX ? LONG_MAX : (long)cl);
+#else
+ double cl;
+ curl_res = curl_easy_getinfo(target_handle,
+ CURLINFO_CONTENT_LENGTH_DOWNLOAD,
+ &cl);
+ if (curl_res == 0)
+ pb = (cl > LONG_MAX ? LONG_MAX : (long)cl);
+#endif
+ }
+
+ if ((*progressfn) (pa, pb))
+ break;
+ }
+
/* Wait 1 second, the minimum DEBUGINFOD_TIMEOUT. */
curl_multi_wait(curlm, NULL, 0, 1000, NULL);
@@ -623,33 +678,39 @@ debuginfod_query_server (const unsigned char *build_id,
/* See debuginfod.h */
int
debuginfod_find_debuginfo (const unsigned char *build_id, int build_id_len,
- char **path)
+ char **path)
{
return debuginfod_query_server(build_id, build_id_len,
- "debuginfo", NULL, path);
+ "debuginfo", NULL, path);
}
/* See debuginfod.h */
int
debuginfod_find_executable(const unsigned char *build_id, int build_id_len,
- char **path)
+ char **path)
{
return debuginfod_query_server(build_id, build_id_len,
- "executable", NULL, path);
+ "executable", NULL, path);
}
/* See debuginfod.h */
-int debuginfod_find_source(const unsigned char *build_id,
- int build_id_len,
- const char *filename,
- char **path)
+int debuginfod_find_source(const unsigned char *build_id, int build_id_len,
+ const char *filename, char **path)
{
return debuginfod_query_server(build_id, build_id_len,
- "source", filename, path);
+ "source", filename, path);
}
+debuginfod_progressfn_t
+debuginfod_set_progressfn(debuginfod_progressfn_t fn)
+{
+ debuginfod_progressfn_t it = progressfn;
+ progressfn = fn;
+ return it;
+}
+
/* NB: these are thread-unsafe. */
__attribute__((constructor)) attribute_hidden void libdebuginfod_ctor(void)
diff --git a/debuginfod/debuginfod-find.c b/debuginfod/debuginfod-find.c
index ff47b807..4c1a94c6 100644
--- a/debuginfod/debuginfod-find.c
+++ b/debuginfod/debuginfod-find.c
@@ -42,10 +42,39 @@ static const char args_doc[] = N_("debuginfo BUILDID\n"
"executable BUILDID\n"
"source BUILDID /FILENAME");
+/* Definitions of arguments for argp functions. */
+static const struct argp_option options[] =
+ {
+ { "verbose", 'v', NULL, 0, "Increase verbosity.", 0 },
+ { NULL, 0, NULL, 0, NULL, 0 }
+ };
+
+
+
+int progressfn(long a, long b)
+{
+ fprintf (stderr, "Progress %ld / %ld\n", a, b);
+ return 0;
+}
+
+
+static error_t parse_opt (int key, char *arg, struct argp_state *state)
+{
+ (void) arg;
+ (void) state;
+ switch (key)
+ {
+ case 'v': debuginfod_set_progressfn (& progressfn); break;
+ default: return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+
/* Data structure to communicate with argp functions. */
static struct argp argp =
{
- NULL, NULL, args_doc, doc, NULL, NULL, NULL
+ options, parse_opt, args_doc, doc, NULL, NULL, NULL
};
@@ -61,7 +90,7 @@ main(int argc, char** argv)
argp_help (&argp, stderr, ARGP_HELP_USAGE, argv[0]);
return 1;
}
-
+
int rc;
char *cache_name;
diff --git a/debuginfod/debuginfod.cxx b/debuginfod/debuginfod.cxx
index a87ec4d0..359f9a24 100644
--- a/debuginfod/debuginfod.cxx
+++ b/debuginfod/debuginfod.cxx
@@ -121,7 +121,7 @@ static const char DEBUGINFOD_SQLITE_DDL[] =
"pragma wal_checkpoint = truncate;\n" // clean out any preexisting wal file
"pragma journal_size_limit = 0;\n" // limit steady state file (between grooming, which also =truncate's)
"pragma auto_vacuum = incremental;\n" // https://sqlite.org/pragma.html
- "pragma busy_timeout = 1000;\n" // https://sqlite.org/pragma.html
+ "pragma busy_timeout = 1000;\n" // https://sqlite.org/pragma.html
// NB: all these are overridable with -D option
// Normalization table for interning file names
@@ -232,7 +232,7 @@ static const char DEBUGINFOD_SQLITE_DDL[] =
"union all select 'file s',count(*) from " BUILDIDS "_f_s\n"
"union all select 'rpm d/e',count(*) from " BUILDIDS "_r_de\n"
"union all select 'rpm sref',count(*) from " BUILDIDS "_r_sref\n"
- "union all select 'rpm sdef',count(*) from " BUILDIDS "_r_sdef\n"
+ "union all select 'rpm sdef',count(*) from " BUILDIDS "_r_sdef\n"
"union all select 'buildids',count(*) from " BUILDIDS "_buildids\n"
"union all select 'filenames',count(*) from " BUILDIDS "_files\n"
"union all select 'files scanned (#)',count(*) from " BUILDIDS "_file_mtime_scanned\n"
@@ -241,9 +241,9 @@ static const char DEBUGINFOD_SQLITE_DDL[] =
"union all select 'index db size (mb)',page_count*page_size/1024/1024 as size FROM pragma_page_count(), pragma_page_size()\n"
#endif
";\n"
-
+
// schema change history & garbage collection
-//
+//
// XXX: we could have migration queries here to bring prior-schema
// data over instead of just dropping it.
//
@@ -277,14 +277,14 @@ static const char DEBUGINFOD_SQLITE_DDL[] =
"drop table if exists buildids5_files;\n"
"drop table if exists buildids5_buildids;\n"
"drop table if exists buildids5_bolo;\n"
- "drop table if exists buildids5_rfolo;\n"
+ "drop table if exists buildids5_rfolo;\n"
"drop view if exists buildids5;\n"
// buildids4: introduce rpmfile RFOLO
"drop table if exists buildids4_norm;\n"
"drop table if exists buildids4_files;\n"
"drop table if exists buildids4_buildids;\n"
"drop table if exists buildids4_bolo;\n"
- "drop table if exists buildids4_rfolo;\n"
+ "drop table if exists buildids4_rfolo;\n"
"drop view if exists buildids4;\n"
// buildids3*: split out srcfile BOLO
"drop table if exists buildids3_norm;\n"
@@ -295,7 +295,7 @@ static const char DEBUGINFOD_SQLITE_DDL[] =
// buildids2: normalized buildid and filenames into interning tables;
"drop table if exists buildids2_norm;\n"
"drop table if exists buildids2_files;\n"
- "drop table if exists buildids2_buildids;\n"
+ "drop table if exists buildids2_buildids;\n"
"drop view if exists buildids2;\n"
// buildids1: made buildid and artifacttype NULLable, to represent cached-negative
// lookups from sources, e.g. files or rpms that contain no buildid-indexable content
@@ -324,7 +324,7 @@ static const struct argp_option options[] =
{ "scan-file-dir", 'F', NULL, 0, "Enable ELF/DWARF file scanning threads.", 0 },
{ "scan-rpm-dir", 'R', NULL, 0, "Enable RPM scanning threads.", 0 },
// "source-oci-imageregistry" ...
-
+
{ NULL, 0, NULL, 0, "Options:", 2 },
{ "rescan-time", 't', "SECONDS", 0, "Number of seconds to wait between rescans, 0=disable.", 0 },
{ "groom-time", 'g', "SECONDS", 0, "Number of seconds to wait between database grooming, 0=disable.", 0 },
@@ -336,7 +336,7 @@ static const struct argp_option options[] =
{ "database", 'd', "FILE", 0, "Path to sqlite database.", 0 },
{ "ddl", 'D', "SQL", 0, "Apply extra sqlite ddl/pragma to connection.", 0 },
{ "verbose", 'v', NULL, 0, "Increase verbosity.", 0 },
-
+
{ NULL, 0, NULL, 0, NULL, 0 }
};
@@ -373,6 +373,8 @@ static bool scan_rpms = false;
static vector<string> extra_ddl;
static regex_t file_include_regex;
static regex_t file_exclude_regex;
+static int test_webapi_sleep; /* testing only */
+
/* Handle program arguments. */
static error_t
@@ -408,13 +410,13 @@ parse_opt (int key, char *arg,
regfree (&file_include_regex);
rc = regcomp (&file_include_regex, arg, REG_EXTENDED|REG_NOSUB);
if (rc != 0)
- argp_failure(state, 1, EINVAL, "regular expession");
+ argp_failure(state, 1, EINVAL, "regular expession");
break;
case 'X':
regfree (&file_exclude_regex);
rc = regcomp (&file_exclude_regex, arg, REG_EXTENDED|REG_NOSUB);
if (rc != 0)
- argp_failure(state, 1, EINVAL, "regular expession");
+ argp_failure(state, 1, EINVAL, "regular expession");
break;
case ARGP_KEY_ARG:
source_paths.insert(string(arg));
@@ -440,9 +442,9 @@ struct reportable_exception
reportable_exception(int c, const string& m): code(c), message(m) {}
reportable_exception(const string& m): code(503), message(m) {}
reportable_exception(): code(503), message() {}
-
+
void report(ostream& o) const; // defined under obatched() class below
-
+
int mhd_send_response(MHD_Connection* c) const {
MHD_Response* r = MHD_create_response_from_buffer (message.size(),
(void*) message.c_str(),
@@ -586,7 +588,7 @@ private:
const string nickname;
const string sql;
sqlite3_stmt *pp;
-
+
sqlite_ps(const sqlite_ps&); // make uncopyable
sqlite_ps& operator=(const sqlite_ps &); // make unassignable
@@ -604,7 +606,7 @@ public:
sqlite3_reset(this->pp);
return *this;
}
-
+
sqlite_ps& bind(int parameter, const string& str)
{
if (verbose > 4)
@@ -635,7 +637,7 @@ public:
return *this;
}
-
+
void step_ok_done() {
int rc = sqlite3_step (this->pp);
if (verbose > 4)
@@ -645,7 +647,7 @@ public:
(void) sqlite3_reset (this->pp);
}
-
+
int step() {
int rc = sqlite3_step (this->pp);
if (verbose > 4)
@@ -653,7 +655,7 @@ public:
return rc;
}
-
+
~sqlite_ps () { sqlite3_finalize (this->pp); }
operator sqlite3_stmt* () { return this->pp; }
@@ -669,7 +671,7 @@ struct defer_dtor
{
public:
typedef Ignore (*dtor_fn) (Payload);
-
+
private:
Payload p;
dtor_fn fn;
@@ -735,7 +737,7 @@ add_mhd_last_modified (struct MHD_Response *resp, time_t mtime)
if (rc > 0 && rc < sizeof (datebuf))
(void) MHD_add_response_header (resp, "Last-Modified", datebuf);
}
-
+
(void) MHD_add_response_header (resp, "Cache-Control", "public");
}
@@ -754,11 +756,11 @@ handle_buildid_f_match (int64_t b_mtime,
// if still missing, a periodic groom pass will delete this buildid record
return 0;
}
-
+
// NB: use manual close(2) in error case instead of defer_dtor, because
// in the normal case, we want to hand the fd over to libmicrohttpd for
// file transfer.
-
+
struct stat s;
int rc = fstat(fd, &s);
if (rc < 0)
@@ -776,7 +778,7 @@ handle_buildid_f_match (int64_t b_mtime,
close(fd);
return 0;
}
-
+
struct MHD_Response* r = MHD_create_response_from_fd ((uint64_t) s.st_size, fd);
if (r == 0)
{
@@ -806,7 +808,7 @@ shell_escape(const string& str)
for (auto&& x : str)
{
if (! isalnum(x) && x != '/')
- y += "\\";
+ y += "\\";
y += x;
}
return y;
@@ -830,7 +832,7 @@ handle_buildid_r_match (int64_t b_mtime,
obatched(clog) << "mtime mismatch for " << b_source0 << endl;
return 0;
}
-
+
string popen_cmd = string("rpm2cpio " + shell_escape(b_source0));
FILE* fp = popen (popen_cmd.c_str(), "r"); // "e" O_CLOEXEC?
if (fp == NULL)
@@ -849,7 +851,7 @@ handle_buildid_r_match (int64_t b_mtime,
rc = archive_read_support_filter_all(a);
if (rc != ARCHIVE_OK)
throw archive_exception(a, "cannot select all filters");
-
+
rc = archive_read_open_FILE (a, fp);
if (rc != ARCHIVE_OK)
throw archive_exception(a, "cannot open archive from rpm2cpio pipe");
@@ -863,7 +865,7 @@ handle_buildid_r_match (int64_t b_mtime,
if (! S_ISREG(archive_entry_mode (e))) // skip non-files completely
continue;
-
+
string fn = archive_entry_pathname (e);
if (fn != string(".")+b_source1)
continue;
@@ -874,7 +876,7 @@ handle_buildid_r_match (int64_t b_mtime,
if (fd < 0)
throw libc_exception (errno, "cannot create temporary file");
unlink (tmppath); // unlink now so OS will release the file as soon as we close the fd
-
+
rc = archive_read_data_into_fd (a, fd);
if (rc != ARCHIVE_OK)
{
@@ -923,6 +925,15 @@ handle_buildid_match (int64_t b_mtime,
}
+static int
+debuginfod_find_progress (long a, long b)
+{
+ if (verbose > 4)
+ obatched(clog) << "federated debuginfod progress=" << a << "/" << b << endl;
+
+ return interrupted;
+}
+
static struct MHD_Response* handle_buildid (const string& buildid /* unsafe */,
const string& artifacttype /* unsafe */,
@@ -939,7 +950,7 @@ static struct MHD_Response* handle_buildid (const string& buildid /* unsafe */,
if (atype_code == "S" && suffix == "")
throw reportable_exception("invalid source suffix");
-
+
// validate buildid
if ((buildid.size() < 2) || // not empty
(buildid.size() % 2) || // even number
@@ -978,7 +989,7 @@ static struct MHD_Response* handle_buildid (const string& buildid /* unsafe */,
pp->bind(2, suffix);
}
unique_ptr<sqlite_ps> ps_closer(pp); // release pp if exception or return
-
+
// consume all the rows
while (1)
{
@@ -986,7 +997,7 @@ static struct MHD_Response* handle_buildid (const string& buildid /* unsafe */,
if (rc == SQLITE_DONE) break;
if (rc != SQLITE_ROW)
throw sqlite_exception(rc, "step");
-
+
int64_t b_mtime = sqlite3_column_int64 (*pp, 0);
string b_stype = string((const char*) sqlite3_column_text (*pp, 1) ?: ""); /* by DDL may not be NULL */
string b_source0 = string((const char*) sqlite3_column_text (*pp, 2) ?: ""); /* may be NULL */
@@ -1005,6 +1016,7 @@ static struct MHD_Response* handle_buildid (const string& buildid /* unsafe */,
// We couldn't find it in the database. Last ditch effort
// is to defer to other debuginfo servers.
+
int fd = -1;
if (artifacttype == "debuginfo")
fd = debuginfod_find_debuginfo ((const unsigned char*) buildid.c_str(), 0,
@@ -1036,7 +1048,7 @@ static struct MHD_Response* handle_buildid (const string& buildid /* unsafe */,
}
else if (fd != -ENOSYS) // no DEBUGINFOD_URLS configured
throw libc_exception(-fd, "upstream debuginfod query failed");
-
+
throw reportable_exception(MHD_HTTP_NOT_FOUND, "not found");
}
@@ -1067,10 +1079,13 @@ handler_cb (void * /*cls*/,
{
struct MHD_Response *r = NULL;
string url_copy = url;
-
+
if (verbose)
obatched(clog) << conninfo(connection) << " " << method << " " << url << endl;
+ if (test_webapi_sleep)
+ sleep (test_webapi_sleep);
+
try
{
if (string(method) != "GET")
@@ -1079,13 +1094,13 @@ handler_cb (void * /*cls*/,
/* Start decoding the URL. */
size_t slash1 = url_copy.find('/', 1);
string url1 = url_copy.substr(0, slash1); // ok even if slash1 not found
-
+
if (slash1 != string::npos && url1 == "/buildid")
{
size_t slash2 = url_copy.find('/', slash1+1);
if (slash2 == string::npos)
throw reportable_exception("/buildid/ webapi error, need buildid");
-
+
string buildid = url_copy.substr(slash1+1, slash2-slash1-1);
size_t slash3 = url_copy.find('/', slash2+1);
@@ -1100,17 +1115,17 @@ handler_cb (void * /*cls*/,
artifacttype = url_copy.substr(slash2+1, slash3-slash2-1);
suffix = url_copy.substr(slash3); // include the slash in the suffix
}
-
+
r = handle_buildid(buildid, artifacttype, suffix, 0); // NB: don't care about result-fd
}
else if (url1 == "/metrics")
r = handle_metrics();
else
throw reportable_exception("webapi error, unrecognized /operation");
-
+
if (r == 0)
throw reportable_exception("internal error, missing response");
-
+
int rc = MHD_queue_response (connection, MHD_HTTP_OK, r);
MHD_destroy_response (r);
return rc;
@@ -1136,9 +1151,9 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
Dwarf* altdbg = NULL;
int altdbg_fd = -1;
-
+
// DWZ handling: if we have an unsatisfied debug-alt-link, add an
- // empty string into the outgoing sourcefiles set, so the caller
+ // empty string into the outgoing sourcefiles set, so the caller
// should know that our data is incomplete.
const char *alt_name_p;
const void *alt_build_id; // elfutils-owned memory
@@ -1172,7 +1187,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
{
// swallow exceptions
}
-
+
// NB: this is not actually recursive! This invokes the web-query
// path, which cannot get back into the scan code paths.
if (r)
@@ -1191,7 +1206,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
// NB: dwarf_setalt(alt) inappropriate - already done!
// NB: altdbg will stay 0 so nothing tries to redundantly dealloc.
}
-
+
if (alt)
{
if (verbose > 3)
@@ -1204,7 +1219,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
obatched(clog) << "Unresolved altdebug buildid=" << buildid << endl;
}
}
-
+
Dwarf_Off offset = 0;
Dwarf_Off old_offset;
size_t hsize;
@@ -1235,7 +1250,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
comp_dir = dirs[0];
if (comp_dir == NULL)
comp_dir = "";
-
+
if (verbose > 3)
obatched(clog) << "searching for sources for cu=" << cuname << " comp_dir=" << comp_dir
<< " #files=" << nfiles << " #dirs=" << ndirs << endl;
@@ -1257,7 +1272,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
if (string(hat) == "<built-in>") // gcc intrinsics, don't bother record
continue;
-
+
string waldo;
if (hat[0] == '/') // absolute
waldo = (string (hat));
@@ -1268,7 +1283,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
obatched(clog) << "skipping hat=" << hat << " due to empty comp_dir" << endl;
continue;
}
-
+
// NB: this is the 'waldo' that a dbginfo client will have
// to supply for us to give them the file The comp_dir
// prefixing is a definite complication. Otherwise we'd
@@ -1279,7 +1294,7 @@ dwarf_extract_source_paths (Elf *elf, set<string>& debug_sourcefiles)
if (verbose > 4)
obatched(clog) << waldo
<< (debug_sourcefiles.find(waldo)==debug_sourcefiles.end() ? " new" : " dup") << endl;
-
+
debug_sourcefiles.insert (waldo);
}
}
@@ -1299,7 +1314,7 @@ elf_classify (int fd, bool &executable_p, bool &debuginfo_p, string &buildid, se
Elf *elf = elf_begin (fd, ELF_C_READ_MMAP_PRIVATE, NULL);
if (elf == NULL)
return;
-
+
try // catch our types of errors and clean up the Elf* object
{
if (elf_kind (elf) != ELF_K_ELF)
@@ -1316,7 +1331,7 @@ elf_classify (int fd, bool &executable_p, bool &debuginfo_p, string &buildid, se
return;
}
auto elf_type = ehdr->e_type;
-
+
const void *build_id; // elfutils-owned memory
ssize_t sz = dwelf_elf_gnu_build_id (elf, & build_id);
if (sz <= 0)
@@ -1326,7 +1341,7 @@ elf_classify (int fd, bool &executable_p, bool &debuginfo_p, string &buildid, se
elf_end (elf);
return;
}
-
+
// build_id is a raw byte array; convert to hexadecimal *lowercase*
unsigned char* build_id_bytes = (unsigned char*) build_id;
for (ssize_t idx=0; idx<sz; idx++)
@@ -1373,7 +1388,7 @@ elf_classify (int fd, bool &executable_p, bool &debuginfo_p, string &buildid, se
int rc = elf_getshdrstrndx (elf, &shstrndx);
if (rc < 0)
throw elfutils_exception(rc, "getshdrstrndx");
-
+
Elf_Scn *scn = NULL;
while (true)
{
@@ -1417,10 +1432,10 @@ static void
scan_source_file_path (const string& dir)
{
obatched(clog) << "fts/file traversing " << dir << endl;
-
+
struct timeval tv_start, tv_end;
gettimeofday (&tv_start, NULL);
-
+
sqlite_ps ps_upsert_buildids (db, "file-buildids-intern", "insert or ignore into " BUILDIDS "_buildids VALUES (NULL, ?);");
sqlite_ps ps_upsert_files (db, "file-files-intern", "insert or ignore into " BUILDIDS "_files VALUES (NULL, ?);");
sqlite_ps ps_upsert_de (db, "file-de-upsert",
@@ -1442,11 +1457,11 @@ scan_source_file_path (const string& dir)
"insert or ignore into " BUILDIDS "_file_mtime_scanned (sourcetype, file, mtime, size)"
"values ('F', (select id from " BUILDIDS "_files where name = ?), ?, ?);");
-
+
char * const dirs[] = { (char*) dir.c_str(), NULL };
unsigned fts_scanned=0, fts_regex=0, fts_cached=0, fts_debuginfo=0, fts_executable=0, fts_sourcefiles=0;
-
+
FTS *fts = fts_open (dirs,
FTS_PHYSICAL /* don't follow symlinks */
| FTS_XDEV /* don't cross devices/mountpoints */
@@ -1462,7 +1477,7 @@ scan_source_file_path (const string& dir)
while ((f = fts_read (fts)) != NULL)
{
semaphore_borrower handle_one_file (scan_concurrency_sem);
-
+
fts_scanned ++;
if (interrupted)
break;
@@ -1491,7 +1506,7 @@ scan_source_file_path (const string& dir)
fts_regex ++;
continue;
}
-
+
switch (f->fts_info)
{
case FTS_D:
@@ -1521,7 +1536,7 @@ scan_source_file_path (const string& dir)
bool executable_p = false, debuginfo_p = false; // E and/or D
string buildid;
set<string> sourcefiles;
-
+
int fd = open (rps.c_str(), O_RDONLY);
try
{
@@ -1530,16 +1545,17 @@ scan_source_file_path (const string& dir)
else
throw libc_exception(errno, string("open ") + rps);
}
-
+
// NB: we catch exceptions here too, so that we can
// cache the corrupt-elf case (!executable_p &&
// !debuginfo_p) just below, just as if we had an
// EPERM error from open(2).
+
catch (const reportable_exception& e)
{
e.report(clog);
}
-
+
if (fd >= 0)
close (fd);
@@ -1548,7 +1564,7 @@ scan_source_file_path (const string& dir)
.reset()
.bind(1, rps)
.step_ok_done();
-
+
if (buildid == "")
{
// no point storing an elf file without buildid
@@ -1579,11 +1595,11 @@ scan_source_file_path (const string& dir)
.bind(5, f->fts_statp->st_mtime)
.step_ok_done();
}
-
+
if (sourcefiles.size() && buildid != "")
{
fts_sourcefiles += sourcefiles.size();
-
+
for (auto&& dwarfsrc : sourcefiles)
{
char *srp = realpath(dwarfsrc.c_str(), NULL);
@@ -1597,7 +1613,7 @@ scan_source_file_path (const string& dir)
rc = stat(srps.c_str(), &sfs);
if (rc != 0)
continue;
-
+
if (verbose > 2)
obatched(clog) << "recorded buildid=" << buildid << " file=" << srps
<< " mtime=" << sfs.st_mtime
@@ -1630,7 +1646,7 @@ scan_source_file_path (const string& dir)
.bind(2, f->fts_statp->st_mtime)
.bind(3, f->fts_statp->st_size)
.step_ok_done();
-
+
if (verbose > 2)
obatched(clog) << "recorded buildid=" << buildid << " file=" << rps
<< " mtime=" << f->fts_statp->st_mtime << " atype="
@@ -1664,7 +1680,7 @@ scan_source_file_path (const string& dir)
gettimeofday (&tv_end, NULL);
double deltas = (tv_end.tv_sec - tv_start.tv_sec) + (tv_end.tv_usec - tv_start.tv_usec)*0.000001;
-
+
obatched(clog) << "fts/file traversed " << dir << " in " << deltas << "s, scanned=" << fts_scanned
<< ", regex-skipped=" << fts_regex
<< ", cached=" << fts_cached << ", debuginfo=" << fts_debuginfo
@@ -1688,7 +1704,7 @@ thread_main_scan_source_file_path (void* arg)
else if (sigusr1 != forced_rescan_count)
{
forced_rescan_count = sigusr1;
- scan_source_file_path (dir);
+ scan_source_file_path (dir);
}
}
catch (const sqlite_exception& e)
@@ -1700,7 +1716,7 @@ thread_main_scan_source_file_path (void* arg)
if (rescan_s)
rescan_timer %= rescan_s;
}
-
+
return 0;
}
@@ -1737,14 +1753,14 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
rc = archive_read_support_filter_all(a);
if (rc != ARCHIVE_OK)
throw archive_exception(a, "cannot select all filters");
-
+
rc = archive_read_open_FILE (a, fp);
if (rc != ARCHIVE_OK)
throw archive_exception(a, "cannot open archive from rpm2cpio pipe");
if (verbose > 3)
obatched(clog) << "rpm2cpio|libarchive scanning " << rps << endl;
-
+
while(1) // parse cpio archive entries
{
try
@@ -1756,11 +1772,11 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
if (! S_ISREG(archive_entry_mode (e))) // skip non-files completely
continue;
-
+
string fn = archive_entry_pathname (e);
if (fn.size() > 1 && fn[0] == '.')
fn = fn.substr(1); // trim off the leading '.'
-
+
if (verbose > 3)
obatched(clog) << "rpm2cpio|libarchive checking " << fn << endl;
@@ -1776,7 +1792,7 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
throw libc_exception (errno, "cannot create temporary file");
unlink (tmppath); // unlink now so OS will release the file as soon as we close the fd
defer_dtor<int,int> minifd_closer (fd, close);
-
+
rc = archive_read_data_into_fd (a, fd);
if (rc != ARCHIVE_OK)
throw archive_exception(a, "cannot extract file");
@@ -1800,7 +1816,7 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
.reset()
.bind(1, fn)
.step_ok_done();
-
+
if (sourcefiles.size() > 0) // sref records needed
{
// NB: we intern each source file once. Once raw, as it
@@ -1818,7 +1834,7 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
fts_sref_complete_p = false;
continue;
}
-
+
ps_upsert_files
.reset()
.bind(1, s)
@@ -1840,7 +1856,7 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
fts_debuginfo ++;
if (executable_p || debuginfo_p)
- {
+ {
ps_upsert_de
.reset()
.bind(1, buildid)
@@ -1861,14 +1877,14 @@ rpm_classify (const string& rps, sqlite_ps& ps_upsert_buildids, sqlite_ps& ps_up
.bind(3, fn)
.step_ok_done();
}
-
+
if ((verbose > 2) && (executable_p || debuginfo_p))
obatched(clog) << "recorded buildid=" << buildid << " rpm=" << rps << " file=" << fn
<< " mtime=" << mtime << " atype="
<< (executable_p ? "E" : "")
<< (debuginfo_p ? "D" : "")
<< " sourcefiles=" << sourcefiles.size() << endl;
-
+
}
catch (const reportable_exception& e)
{
@@ -1884,7 +1900,7 @@ static void
scan_source_rpm_path (const string& dir)
{
obatched(clog) << "fts/rpm traversing " << dir << endl;
-
+
sqlite_ps ps_upsert_buildids (db, "rpm-buildid-intern", "insert or ignore into " BUILDIDS "_buildids VALUES (NULL, ?);");
sqlite_ps ps_upsert_files (db, "rpm-file-intern", "insert or ignore into " BUILDIDS "_files VALUES (NULL, ?);");
sqlite_ps ps_upsert_de (db, "rpm-de-insert",
@@ -1913,7 +1929,7 @@ scan_source_rpm_path (const string& dir)
gettimeofday (&tv_start, NULL);
unsigned fts_scanned=0, fts_regex=0, fts_cached=0, fts_debuginfo=0;
unsigned fts_executable=0, fts_rpm = 0, fts_sref=0, fts_sdef=0;
-
+
FTS *fts = fts_open (dirs,
FTS_PHYSICAL /* don't follow symlinks */
| FTS_XDEV /* don't cross devices/mountpoints */
@@ -1976,18 +1992,18 @@ scan_source_rpm_path (const string& dir)
rps.substr(rps.size()-suffix.size()) != suffix)
continue;
fts_rpm ++;
-
+
/* See if we know of it already. */
int rc = ps_query
.reset()
.bind(1, rps)
.bind(2, f->fts_statp->st_mtime)
.step();
- ps_query.reset();
+ ps_query.reset();
if (rc == SQLITE_ROW) // i.e., a result, as opposed to DONE (no results)
// no need to recheck a file/version we already know
// specifically, no need to parse this rpm again, since we already have
- // it as a D or E or S record,
+ // it as a D or E or S record,
// (so is stored with buildid=NULL)
{
fts_cached ++;
@@ -1999,7 +2015,7 @@ scan_source_rpm_path (const string& dir)
.reset()
.bind(1, rps)
.step_ok_done();
-
+
// extract the rpm contents via popen("rpm2cpio") | libarchive | loop-of-elf_classify()
unsigned my_fts_executable = 0, my_fts_debuginfo = 0, my_fts_sref = 0, my_fts_sdef = 0;
bool my_fts_sref_complete_p = true;
@@ -2067,7 +2083,7 @@ scan_source_rpm_path (const string& dir)
gettimeofday (&tv_end, NULL);
double deltas = (tv_end.tv_sec - tv_start.tv_sec) + (tv_end.tv_usec - tv_start.tv_usec)*0.000001;
-
+
obatched(clog) << "fts/rpm traversed " << dir << " in " << deltas << "s, scanned=" << fts_scanned
<< ", regex-skipped=" << fts_regex
<< ", rpm=" << fts_rpm << ", cached=" << fts_cached << ", debuginfo=" << fts_debuginfo
@@ -2093,7 +2109,7 @@ thread_main_scan_source_rpm_path (void* arg)
else if (sigusr1 != forced_rescan_count)
{
forced_rescan_count = sigusr1;
- scan_source_rpm_path (dir);
+ scan_source_rpm_path (dir);
}
}
catch (const sqlite_exception& e)
@@ -2139,10 +2155,10 @@ database_stats_report()
void groom()
{
obatched(clog) << "grooming database" << endl;
-
+
struct timeval tv_start, tv_end;
gettimeofday (&tv_start, NULL);
-
+
// scan for files that have disappeared
sqlite_ps files (db, "check old files", "select s.mtime, s.file, f.name from "
BUILDIDS "_file_mtime_scanned s, " BUILDIDS "_files f "
@@ -2157,7 +2173,7 @@ void groom()
int rc = files.step();
if (rc != SQLITE_ROW)
break;
-
+
int64_t mtime = sqlite3_column_int64 (files, 0);
int64_t fileid = sqlite3_column_int64 (files, 1);
const char* filename = ((const char*) sqlite3_column_text (files, 2) ?: "");
@@ -2181,7 +2197,7 @@ void groom()
"where not exists (select 1 from " BUILDIDS "_f_de d where " BUILDIDS "_buildids.id = d.buildid) "
"and not exists (select 1 from " BUILDIDS "_r_de d where " BUILDIDS "_buildids.id = d.buildid)");
buildids_del.reset().step_ok_done();
-
+
// NB: "vacuum" is too heavy for even daily runs: it rewrites the entire db, so is done as maxigroom -G
sqlite_ps g1 (db, "incremental vacuum", "pragma incremental_vacuum");
g1.reset().step_ok_done();
@@ -2191,7 +2207,7 @@ void groom()
g3.reset().step_ok_done();
database_stats_report();
-
+
gettimeofday (&tv_end, NULL);
double deltas = (tv_end.tv_sec - tv_start.tv_sec) + (tv_end.tv_usec - tv_start.tv_usec)*0.000001;
@@ -2240,7 +2256,7 @@ signal_handler (int /* sig */)
if (db)
sqlite3_interrupt (db);
-
+
// NB: don't do anything else in here
}
@@ -2298,7 +2314,7 @@ main (int argc, char *argv[])
/* Tell the library which version we are expecting. */
elf_version (EV_CURRENT);
-
+
/* Set computed default values. */
db_path = string(getenv("HOME") ?: "/") + string("/.debuginfod.sqlite"); /* XDG? */
int rc = regcomp (& file_include_regex, ".*", REG_EXTENDED|REG_NOSUB); // match everything
@@ -2307,7 +2323,11 @@ main (int argc, char *argv[])
rc = regcomp (& file_exclude_regex, "^$", REG_EXTENDED|REG_NOSUB); // match nothing
if (rc != 0)
error (EXIT_FAILURE, 0, "regcomp failure: %d", rc);
-
+
+ const char* test_webapi_sleep_str = getenv("DEBUGINFOD_TEST_WEBAPI_SLEEP");
+ if (test_webapi_sleep_str)
+ test_webapi_sleep = atoi (test_webapi_sleep_str);
+
/* Parse and process arguments. */
int remaining;
argp_program_version_hook = print_version; // this works
@@ -2325,10 +2345,10 @@ main (int argc, char *argv[])
(void) signal (SIGTERM, signal_handler); // systemd
(void) signal (SIGUSR1, sigusr1_handler); // end-user
(void) signal (SIGUSR2, sigusr2_handler); // end-user
-
+
// do this before any threads start
scan_concurrency_sem = new semaphore(concurrency);
-
+
/* Get database ready. */
rc = sqlite3_open_v2 (db_path.c_str(), &db, (SQLITE_OPEN_READWRITE
|SQLITE_OPEN_CREATE
@@ -2355,7 +2375,7 @@ main (int argc, char *argv[])
if (rc != SQLITE_OK)
error (EXIT_FAILURE, 0,
"cannot create sharedprefix( function: %s", sqlite3_errmsg(db));
-
+
if (verbose > 3)
obatched(clog) << "ddl: " << DEBUGINFOD_SQLITE_DDL << endl;
rc = sqlite3_exec (db, DEBUGINFOD_SQLITE_DDL, NULL, NULL, NULL);
@@ -2365,6 +2385,8 @@ main (int argc, char *argv[])
"cannot run database schema ddl: %s", sqlite3_errmsg(db));
}
+ (void) debuginfod_set_progressfn (& debuginfod_find_progress);
+
// Start httpd server threads. Separate pool for IPv4 and IPv6, in
// case the host only has one protocol stack.
MHD_Daemon *d4 = MHD_start_daemon (MHD_USE_THREAD_PER_CONNECTION
@@ -2411,7 +2433,7 @@ main (int argc, char *argv[])
extra_ddl.push_back("create index if not exists " BUILDIDS "_r_sref_arc on " BUILDIDS "_r_sref(artifactsrc);");
extra_ddl.push_back("delete from " BUILDIDS "_r_sdef where not exists (select 1 from " BUILDIDS "_r_sref b where " BUILDIDS "_r_sdef.content = b.artifactsrc);");
extra_ddl.push_back("drop index if exists " BUILDIDS "_r_sref_arc;");
-
+
// NB: we don't maxigroom the _files interning table. It'd require a temp index on all the
// tables that have file foreign-keys, which is a lot.
@@ -2422,7 +2444,7 @@ main (int argc, char *argv[])
extra_ddl.push_back("vacuum;");
extra_ddl.push_back("pragma journal_mode=wal;");
}
-
+
// run extra -D sql if given
for (auto&& i: extra_ddl)
{
@@ -2433,15 +2455,15 @@ main (int argc, char *argv[])
error (0, 0,
"warning: cannot run database extra ddl %s: %s", i.c_str(), sqlite3_errmsg(db));
}
-
+
if (maxigroom)
obatched(clog) << "maxigroomed database" << endl;
-
+
obatched(clog) << "search concurrency " << concurrency << endl;
obatched(clog) << "rescan time " << rescan_s << endl;
obatched(clog) << "groom time " << groom_s << endl;
-
+
vector<pthread_t> source_file_scanner_threads;
vector<pthread_t> source_rpm_scanner_threads;
pthread_t groom_thread;
@@ -2470,29 +2492,29 @@ main (int argc, char *argv[])
source_rpm_scanner_threads.push_back(pt);
}
-
+
const char* du = getenv(DEBUGINFOD_URLS_ENV_VAR);
if (du && du[0] != '\0') // set to non-empty string?
obatched(clog) << "upstream debuginfod servers: " << du << endl;
-
+
/* Trivial main loop! */
while (! interrupted)
pause ();
if (verbose)
obatched(clog) << "stopping" << endl;
-
+
/* Stop all the web service threads. */
if (d4) MHD_stop_daemon (d4);
if (d6) MHD_stop_daemon (d6);
-
+
/* Join any source scanning threads. */
for (auto&& it : source_file_scanner_threads)
pthread_join (it, NULL);
for (auto&& it : source_rpm_scanner_threads)
pthread_join (it, NULL);
pthread_join (groom_thread, NULL);
-
+
/* With all threads known dead, we can clean up the global resources. */
delete scan_concurrency_sem;
rc = sqlite3_exec (db, DEBUGINFOD_SQLITE_CLEANUP_DDL, NULL, NULL, NULL);
diff --git a/debuginfod/debuginfod.h b/debuginfod/debuginfod.h
index d2bb0c94..0620f02a 100644
--- a/debuginfod/debuginfod.h
+++ b/debuginfod/debuginfod.h
@@ -49,17 +49,20 @@ extern "C" {
Caller must free() it later. */
int debuginfod_find_debuginfo (const unsigned char *build_id,
- int build_id_len,
- char **path);
-
-int debuginfod_find_executable (const unsigned char *build_id,
int build_id_len,
char **path);
+int debuginfod_find_executable (const unsigned char *build_id,
+ int build_id_len,
+ char **path);
+
int debuginfod_find_source (const unsigned char *build_id,
- int build_id_len,
- const char *filename,
- char **path);
+ int build_id_len,
+ const char *filename,
+ char **path);
+
+typedef int (*debuginfod_progressfn_t)(long a, long b);
+debuginfod_progressfn_t debuginfod_set_progressfn(debuginfod_progressfn_t fn);
#ifdef __cplusplus
}
diff --git a/debuginfod/libdebuginfod.map b/debuginfod/libdebuginfod.map
index 4d3daf32..b322cba6 100644
--- a/debuginfod/libdebuginfod.map
+++ b/debuginfod/libdebuginfod.map
@@ -4,4 +4,5 @@ ELFUTILS_0.178 {
debuginfod_find_debuginfo;
debuginfod_find_executable;
debuginfod_find_source;
+ debuginfod_set_progressfn;
} ELFUTILS_0;
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 60e942cb..b5db01ff 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -24,7 +24,7 @@ notrans_dist_man1_MANS=
if DEBUGINFOD
notrans_dist_man8_MANS += debuginfod.8
-notrans_dist_man3_MANS += debuginfod_find_debuginfo.3 debuginfod_find_source.3 debuginfod_find_executable.3
+notrans_dist_man3_MANS += debuginfod_find_debuginfo.3 debuginfod_find_source.3 debuginfod_find_executable.3 debuginfod_set_progressfn.3
notrans_dist_man1_MANS += debuginfod-find.1
endif
diff --git a/doc/debuginfod-find.1 b/doc/debuginfod-find.1
index ab2f4d17..a759ecba 100644
--- a/doc/debuginfod-find.1
+++ b/doc/debuginfod-find.1
@@ -18,11 +18,11 @@
debuginfod-find \- request debuginfo-related data
.SH SYNOPSIS
-.B debuginfod-find debuginfo \fIBUILDID\fP
+.B debuginfod-find [\fIOPTION\fP]... debuginfo \fIBUILDID\fP
-.B debuginfod-find executable \fIBUILDID\fP
+.B debuginfod-find [\fIOPTION\fP]... executable \fIBUILDID\fP
-.B debuginfod-find source \fIBUILDID\fP \fI/FILENAME\fP
+.B debuginfod-find [\fIOPTION\fP]... source \fIBUILDID\fP \fI/FILENAME\fP
.SH DESCRIPTION
\fBdebuginfod-find\fP queries one or more \fBdebuginfod\fP servers for
@@ -91,6 +91,13 @@ l l.
\../bar/foo.c AT_comp_dir=/zoo/ source BUILDID /zoo//../bar/foo.c
.TE
+.SH "OPTIONS"
+
+.TP
+.B "\-v"
+Increase verbosity, including printing frequent download-progress messages.
+
+
.SH "SECURITY"
debuginfod-find \fBdoes not\fP include any particular security
diff --git a/doc/debuginfod_find_debuginfo.3 b/doc/debuginfod_find_debuginfo.3
index e0923d8d..d8d9236e 100644
--- a/doc/debuginfod_find_debuginfo.3
+++ b/doc/debuginfod_find_debuginfo.3
@@ -13,7 +13,7 @@
.RE
..
-.TH DEBUGINFOD_FIND_DEBUGINFO 3
+.TH DEBUGINFOD_FIND_* 3
.SH NAME
debuginfod_find_debuginfo \- request debuginfo from debuginfod
@@ -21,9 +21,13 @@ debuginfod_find_debuginfo \- request debuginfo from debuginfod
.nf
.B #include <elfutils/debuginfod.h>
.PP
-.BI "debuginfod_find_debuginfo(const unsigned char *" build_id ", int " build_id_len ", char ** " path ");"
-.BI "debuginfod_find_executable(const unsigned char *" build_id ", int " build_id_len ", char ** " path ");"
-.BI "debuginfod_find_source(const unsigned char *" build_id ", int " build_id_len ", const char *" filename ", char ** " path ");"
+.BI "int debuginfod_find_debuginfo(const unsigned char *" build_id ", int " build_id_len ", char ** " path ");"
+.BI "int debuginfod_find_executable(const unsigned char *" build_id ", int " build_id_len ", char ** " path ");"
+.BI "int debuginfod_find_source(const unsigned char *" build_id ", int " build_id_len ", const char *" filename ", char ** " path ");"
+.BI "typedef int (*debuginfo_progressfn_t)(long a, long b);"
+.BI "debuginfo_progressfn_t debuginfod_set_progressfn(debuginfo_progressfn_t " progressfn ");"
+
+Link with \fB-ldebuginfod\fP.
.SH DESCRIPTION
.BR debuginfod_find_debuginfo (),
@@ -57,9 +61,41 @@ debuginfod needs to see too.
If \fIpath\fP is not NULL and the query is successful, \fIpath\fP is set
to the path of the file in the cache. The caller must \fBfree\fP() this value.
-The URLs in \fB$DEBUGINFOD_URLS\fP are queried in parallel. As soon as a
-debuginfod server begins transfering the target file all of the connections
-to the other servers are closed.
+The URLs in \fB$DEBUGINFOD_URLS\fP may be queried in parallel. As soon
+as a debuginfod server begins transferring the target file all of the
+connections to the other servers are closed.
+
+These functions are MT-safe.
+
+.SH "RETURN VALUE"
+If a find family function is successful, the resulting file is saved
+to the client cache and a file descriptor to that file is returned.
+The caller needs to \fBclose\fP() this descriptor. Otherwise, a
+negative error code is returned.
+
+.SH "PROGRESS CALLBACK"
+
+As the \fBdebuginfod_find_*\fP() functions may block for seconds or longer, a progress
+callback function is called periodically, if configured with
+.BR debuginfod_set_progressfn ().
+This function sets a new progress callback function (or NULL) and
+returns the previously set function (or NULL). This function may be
+MT-unsafe.
+
+The given callback function is called from the context of each thread
+that is invoking any of the other lookup functions. It is given two
+numeric parameters that, if thought of as a numerator \fIa\fP and
+denominator \fIb\fP, together represent a completion fraction
+\fIa/b\fP. The denominator may be zero initially, until a quantity
+such as an exact download size becomes known.
+
+The progress callback function is also the supported way to
+\fIinterrupt\fP the download operation. (The library does \fInot\fP
+modify or trigger signals.) The progress callback must return 0 to
+continue the work, or any other value to stop work as soon as
+possible. Consequently, the \fBdebuginfod_find_*\fP() function will
+likely return with an error, but might still succeed.
+
.SH "CACHE"
If the query is successful, the \fBdebuginfod_find_*\fP() functions save
@@ -108,11 +144,6 @@ This environment variable governs the location of the cache where
downloaded files are kept. It is cleaned periodically as this
program is reexecuted. The default is $HOME/.debuginfod_client_cache.
-.SH "RETURN VALUE"
-If the query is successful, these functions save the target file
-to the client cache and return a file descriptor to that file.
-Otherwise an error code is returned.
-
.SH "ERRORS"
The following list is not comprehensive. Error codes may also
originate from calls to various C Library functions.
diff --git a/doc/debuginfod_set_progressfn.3 b/doc/debuginfod_set_progressfn.3
new file mode 100644
index 00000000..16279936
--- /dev/null
+++ b/doc/debuginfod_set_progressfn.3
@@ -0,0 +1 @@
+.so man3/debuginfod_find_debuginfo.3
diff --git a/tests/ChangeLog b/tests/ChangeLog
index a5e57282..42dee5c2 100644
--- a/tests/ChangeLog
+++ b/tests/ChangeLog
@@ -1,3 +1,7 @@
+2019-11-04 Frank Ch. Eigler <fche@redhat.com>
+
+ * run-debuginfod-find.sh: Test debuginfod-find -v progress mode.
+
2019-10-28 Aaron Merey <amerey@redhat.com>
Frank Ch. Eigler <fche@redhat.com>
diff --git a/tests/run-debuginfod-find.sh b/tests/run-debuginfod-find.sh
index 145c704a..eb32def6 100755
--- a/tests/run-debuginfod-find.sh
+++ b/tests/run-debuginfod-find.sh
@@ -42,7 +42,8 @@ ldpath=`testrun sh -c 'echo $LD_LIBRARY_PATH'`
mkdir F R
# not tempfiles F R - they are directories which we clean up manually
-env DEBUGINFOD_TEST_WEBAPI_SLEEP=3 LD_LIBRARY_PATH=$ldpath DEBUGINFOD_URLS= ${abs_builddir}/../debuginfod/debuginfod -F -R -vvvv -d $DB -p $PORT1 -t0 -g0 R F &
+env DEBUGINFOD_TEST_WEBAPI_SLEEP=3 LD_LIBRARY_PATH=$ldpath DEBUGINFOD_URLS= ${abs_builddir}/../debuginfod/debuginfod -F -R -vvvv -d $DB \
+-p $PORT1 -t0 -g0 R F &
PID1=$!
sleep 3
export DEBUGINFOD_URLS=http://localhost:$PORT1/ # or without trailing /
@@ -110,8 +111,11 @@ kill -USR1 $PID1
sleep 3
# Rerun same tests for the prog2 binary
-filename=`testrun ${abs_top_builddir}/debuginfod/debuginfod-find debuginfo $BUILDID2`
+filename=`testrun ${abs_top_builddir}/debuginfod/debuginfod-find -v debuginfo $BUILDID2 2>vlog`
cmp $filename F/prog2
+cat vlog
+grep -q Progress vlog
+tempfiles vlog
filename=`testrun ${abs_top_builddir}/debuginfod/debuginfod-find executable $BUILDID2`
cmp $filename F/prog2
filename=`testrun ${abs_top_builddir}/debuginfod/debuginfod-find source $BUILDID2 ${PWD}/prog2.c`