]> git.proxmox.com Git - ceph.git/blame - ceph/src/mds/PurgeQueue.cc
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / mds / PurgeQueue.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2015 Red Hat
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15#include "common/debug.h"
16#include "mds/mdstypes.h"
17#include "mds/CInode.h"
18#include "mds/MDCache.h"
19
20#include "PurgeQueue.h"
21
f64942e4 22#include <string.h>
7c673cae
FG
23
24#define dout_context cct
25#define dout_subsys ceph_subsys_mds
26#undef dout_prefix
27#define dout_prefix _prefix(_dout, rank) << __func__ << ": "
28static ostream& _prefix(std::ostream *_dout, mds_rank_t rank) {
29 return *_dout << "mds." << rank << ".purge_queue ";
30}
31
32void PurgeItem::encode(bufferlist &bl) const
33{
34 ENCODE_START(1, 1, bl);
35 ::encode((uint8_t)action, bl);
36 ::encode(ino, bl);
37 ::encode(size, bl);
38 ::encode(layout, bl, CEPH_FEATURE_FS_FILE_LAYOUT_V2);
39 ::encode(old_pools, bl);
40 ::encode(snapc, bl);
41 ::encode(fragtree, bl);
42 ENCODE_FINISH(bl);
43}
44
45void PurgeItem::decode(bufferlist::iterator &p)
46{
47 DECODE_START(1, p);
48 ::decode((uint8_t&)action, p);
49 ::decode(ino, p);
50 ::decode(size, p);
51 ::decode(layout, p);
52 ::decode(old_pools, p);
53 ::decode(snapc, p);
54 ::decode(fragtree, p);
55 DECODE_FINISH(p);
56}
57
58// TODO: if Objecter has any slow requests, take that as a hint and
59// slow down our rate of purging (keep accepting pushes though)
60PurgeQueue::PurgeQueue(
61 CephContext *cct_,
62 mds_rank_t rank_,
63 const int64_t metadata_pool_,
64 Objecter *objecter_,
65 Context *on_error_)
66 :
67 cct(cct_),
68 rank(rank_),
69 lock("PurgeQueue"),
70 metadata_pool(metadata_pool_),
71 finisher(cct, "PurgeQueue", "PQ_Finisher"),
72 timer(cct, lock),
73 filer(objecter_, &finisher),
74 objecter(objecter_),
75 journaler("pq", MDS_INO_PURGE_QUEUE + rank, metadata_pool,
76 CEPH_FS_ONDISK_MAGIC, objecter_, nullptr, 0,
77 &finisher),
78 on_error(on_error_),
79 ops_in_flight(0),
80 max_purge_ops(0),
81 drain_initial(0),
82 draining(false),
3efd9988
FG
83 delayed_flush(nullptr),
84 recovered(false)
7c673cae
FG
85{
86 assert(cct != nullptr);
87 assert(on_error != nullptr);
88 assert(objecter != nullptr);
89 journaler.set_write_error_handler(on_error);
90}
91
92PurgeQueue::~PurgeQueue()
93{
94 if (logger) {
95 g_ceph_context->get_perfcounters_collection()->remove(logger.get());
96 }
f64942e4 97 delete on_error;
7c673cae
FG
98}
99
100void PurgeQueue::create_logger()
101{
91327a77
AA
102 PerfCountersBuilder pcb(g_ceph_context, "purge_queue", l_pq_first, l_pq_last);
103
104 pcb.add_u64_counter(l_pq_executed, "pq_executed", "Purge queue tasks executed",
105 "purg", PerfCountersBuilder::PRIO_INTERESTING);
106
107 pcb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
7c673cae
FG
108 pcb.add_u64(l_pq_executing_ops, "pq_executing_ops", "Purge queue ops in flight");
109 pcb.add_u64(l_pq_executing, "pq_executing", "Purge queue tasks in flight");
7c673cae
FG
110
111 logger.reset(pcb.create_perf_counters());
112 g_ceph_context->get_perfcounters_collection()->add(logger.get());
113}
114
115void PurgeQueue::init()
116{
117 Mutex::Locker l(lock);
118
119 assert(logger != nullptr);
120
121 finisher.start();
122 timer.init();
123}
124
c07f9fc5
FG
125void PurgeQueue::activate()
126{
127 Mutex::Locker l(lock);
f64942e4
AA
128
129 if (readonly) {
130 dout(10) << "skipping activate: PurgeQueue is readonly" << dendl;
131 return;
132 }
133
c07f9fc5
FG
134 if (journaler.get_read_pos() == journaler.get_write_pos())
135 return;
136
137 if (in_flight.empty()) {
138 dout(4) << "start work (by drain)" << dendl;
139 finisher.queue(new FunctionContext([this](int r) {
140 Mutex::Locker l(lock);
141 _consume();
142 }));
143 }
144}
145
7c673cae
FG
146void PurgeQueue::shutdown()
147{
148 Mutex::Locker l(lock);
149
150 journaler.shutdown();
151 timer.shutdown();
152 finisher.stop();
153}
154
155void PurgeQueue::open(Context *completion)
156{
157 dout(4) << "opening" << dendl;
158
159 Mutex::Locker l(lock);
160
3efd9988
FG
161 if (completion)
162 waiting_for_recovery.push_back(completion);
163
164 journaler.recover(new FunctionContext([this](int r){
7c673cae
FG
165 if (r == -ENOENT) {
166 dout(1) << "Purge Queue not found, assuming this is an upgrade and "
167 "creating it." << dendl;
3efd9988 168 create(NULL);
7c673cae
FG
169 } else if (r == 0) {
170 Mutex::Locker l(lock);
171 dout(4) << "open complete" << dendl;
172
173 // Journaler only guarantees entries before head write_pos have been
174 // fully flushed. Before appending new entries, we need to find and
175 // drop any partial written entry.
176 if (journaler.last_committed.write_pos < journaler.get_write_pos()) {
177 dout(4) << "recovering write_pos" << dendl;
178 journaler.set_read_pos(journaler.last_committed.write_pos);
3efd9988 179 _recover();
7c673cae
FG
180 return;
181 }
182
183 journaler.set_writeable();
3efd9988
FG
184 recovered = true;
185 finish_contexts(g_ceph_context, waiting_for_recovery);
7c673cae
FG
186 } else {
187 derr << "Error " << r << " loading Journaler" << dendl;
f64942e4 188 _go_readonly(r);
7c673cae
FG
189 }
190 }));
191}
192
3efd9988
FG
193void PurgeQueue::wait_for_recovery(Context* c)
194{
195 Mutex::Locker l(lock);
f64942e4 196 if (recovered) {
3efd9988 197 c->complete(0);
f64942e4
AA
198 } else if (readonly) {
199 dout(10) << "cannot wait for recovery: PurgeQueue is readonly" << dendl;
200 c->complete(-EROFS);
201 } else {
3efd9988 202 waiting_for_recovery.push_back(c);
f64942e4 203 }
3efd9988 204}
7c673cae 205
3efd9988 206void PurgeQueue::_recover()
7c673cae
FG
207{
208 assert(lock.is_locked_by_me());
209
210 // Journaler::is_readable() adjusts write_pos if partial entry is encountered
211 while (1) {
212 if (!journaler.is_readable() &&
213 !journaler.get_error() &&
214 journaler.get_read_pos() < journaler.get_write_pos()) {
3efd9988 215 journaler.wait_for_readable(new FunctionContext([this](int r) {
7c673cae 216 Mutex::Locker l(lock);
3efd9988 217 _recover();
7c673cae
FG
218 }));
219 return;
220 }
221
222 if (journaler.get_error()) {
223 int r = journaler.get_error();
224 derr << "Error " << r << " recovering write_pos" << dendl;
f64942e4 225 _go_readonly(r);
7c673cae
FG
226 return;
227 }
228
229 if (journaler.get_read_pos() == journaler.get_write_pos()) {
230 dout(4) << "write_pos recovered" << dendl;
231 // restore original read_pos
232 journaler.set_read_pos(journaler.last_committed.expire_pos);
233 journaler.set_writeable();
3efd9988
FG
234 recovered = true;
235 finish_contexts(g_ceph_context, waiting_for_recovery);
7c673cae
FG
236 return;
237 }
238
239 bufferlist bl;
240 bool readable = journaler.try_read_entry(bl);
241 assert(readable); // we checked earlier
242 }
243}
244
245void PurgeQueue::create(Context *fin)
246{
247 dout(4) << "creating" << dendl;
248 Mutex::Locker l(lock);
249
3efd9988
FG
250 if (fin)
251 waiting_for_recovery.push_back(fin);
252
7c673cae
FG
253 file_layout_t layout = file_layout_t::get_default();
254 layout.pool_id = metadata_pool;
255 journaler.set_writeable();
256 journaler.create(&layout, JOURNAL_FORMAT_RESILIENT);
3efd9988
FG
257 journaler.write_head(new FunctionContext([this](int r) {
258 Mutex::Locker l(lock);
f64942e4
AA
259 if (r) {
260 _go_readonly(r);
261 } else {
262 recovered = true;
263 finish_contexts(g_ceph_context, waiting_for_recovery);
264 }
3efd9988 265 }));
7c673cae
FG
266}
267
268/**
269 * The `completion` context will always be called back via a Finisher
270 */
271void PurgeQueue::push(const PurgeItem &pi, Context *completion)
272{
273 dout(4) << "pushing inode 0x" << std::hex << pi.ino << std::dec << dendl;
274 Mutex::Locker l(lock);
275
f64942e4
AA
276 if (readonly) {
277 dout(10) << "cannot push inode: PurgeQueue is readonly" << dendl;
278 completion->complete(-EROFS);
279 return;
280 }
281
7c673cae
FG
282 // Callers should have waited for open() before using us
283 assert(!journaler.is_readonly());
284
285 bufferlist bl;
286
287 ::encode(pi, bl);
288 journaler.append_entry(bl);
289 journaler.wait_for_flush(completion);
290
291 // Maybe go ahead and do something with it right away
292 bool could_consume = _consume();
293 if (!could_consume) {
294 // Usually, it is not necessary to explicitly flush here, because the reader
295 // will get flushes generated inside Journaler::is_readable. However,
f64942e4 296 // if we remain in a _can_consume()==false state for a long period then
7c673cae
FG
297 // we should flush in order to allow MDCache to drop its strays rather
298 // than having them wait for purgequeue to progress.
299 if (!delayed_flush) {
300 delayed_flush = new FunctionContext([this](int r){
301 delayed_flush = nullptr;
302 journaler.flush();
303 });
304
305 timer.add_event_after(
306 g_conf->mds_purge_queue_busy_flush_period,
307 delayed_flush);
308 }
309 }
310}
311
312uint32_t PurgeQueue::_calculate_ops(const PurgeItem &item) const
313{
314 uint32_t ops_required = 0;
315 if (item.action == PurgeItem::PURGE_DIR) {
316 // Directory, count dirfrags to be deleted
317 std::list<frag_t> ls;
318 if (!item.fragtree.is_leaf(frag_t())) {
319 item.fragtree.get_leaves(ls);
320 }
321 // One for the root, plus any leaves
322 ops_required = 1 + ls.size();
323 } else {
324 // File, work out concurrent Filer::purge deletes
325 const uint64_t num = (item.size > 0) ?
326 Striper::get_num_objects(item.layout, item.size) : 1;
327
328 ops_required = MIN(num, g_conf->filer_max_purge_ops);
329
330 // Account for removing (or zeroing) backtrace
331 ops_required += 1;
332
333 // Account for deletions for old pools
334 if (item.action != PurgeItem::TRUNCATE_FILE) {
335 ops_required += item.old_pools.size();
336 }
337 }
338
339 return ops_required;
340}
341
f64942e4 342bool PurgeQueue::_can_consume()
7c673cae 343{
f64942e4
AA
344 if (readonly) {
345 dout(10) << "can't consume: PurgeQueue is readonly" << dendl;
346 return false;
347 }
348
7c673cae
FG
349 dout(20) << ops_in_flight << "/" << max_purge_ops << " ops, "
350 << in_flight.size() << "/" << g_conf->mds_max_purge_files
351 << " files" << dendl;
352
353 if (in_flight.size() == 0 && cct->_conf->mds_max_purge_files > 0) {
354 // Always permit consumption if nothing is in flight, so that the ops
355 // limit can never be so low as to forbid all progress (unless
356 // administrator has deliberately paused purging by setting max
357 // purge files to zero).
358 return true;
359 }
360
361 if (ops_in_flight >= max_purge_ops) {
362 dout(20) << "Throttling on op limit " << ops_in_flight << "/"
363 << max_purge_ops << dendl;
364 return false;
365 }
366
367 if (in_flight.size() >= cct->_conf->mds_max_purge_files) {
368 dout(20) << "Throttling on item limit " << in_flight.size()
369 << "/" << cct->_conf->mds_max_purge_files << dendl;
370 return false;
371 } else {
372 return true;
373 }
374}
375
f64942e4
AA
376void PurgeQueue::_go_readonly(int r)
377{
378 if (readonly) return;
379 dout(1) << "going readonly because internal IO failed: " << strerror(-r) << dendl;
380 readonly = true;
381 on_error->complete(r);
382 on_error = nullptr;
383 journaler.set_readonly();
384 finish_contexts(g_ceph_context, waiting_for_recovery, r);
385}
386
7c673cae
FG
387bool PurgeQueue::_consume()
388{
389 assert(lock.is_locked_by_me());
390
391 bool could_consume = false;
f64942e4 392 while(_can_consume()) {
7c673cae
FG
393
394 if (delayed_flush) {
395 // We are now going to read from the journal, so any proactive
396 // flush is no longer necessary. This is not functionally necessary
397 // but it can avoid generating extra fragmented flush IOs.
398 timer.cancel_event(delayed_flush);
399 delayed_flush = nullptr;
400 }
401
1adf2230
AA
402 if (int r = journaler.get_error()) {
403 derr << "Error " << r << " recovering write_pos" << dendl;
f64942e4 404 _go_readonly(r);
1adf2230
AA
405 return could_consume;
406 }
407
7c673cae
FG
408 if (!journaler.is_readable()) {
409 dout(10) << " not readable right now" << dendl;
410 // Because we are the writer and the reader of the journal
411 // via the same Journaler instance, we never need to reread_head
412 if (!journaler.have_waiter()) {
413 journaler.wait_for_readable(new FunctionContext([this](int r) {
414 Mutex::Locker l(lock);
415 if (r == 0) {
416 _consume();
1adf2230 417 } else if (r != -EAGAIN) {
f64942e4 418 _go_readonly(r);
7c673cae
FG
419 }
420 }));
421 }
422
423 return could_consume;
424 }
425
28e407b8 426 could_consume = true;
7c673cae
FG
427 // The journaler is readable: consume an entry
428 bufferlist bl;
429 bool readable = journaler.try_read_entry(bl);
430 assert(readable); // we checked earlier
431
432 dout(20) << " decoding entry" << dendl;
433 PurgeItem item;
434 bufferlist::iterator q = bl.begin();
435 try {
436 ::decode(item, q);
437 } catch (const buffer::error &err) {
438 derr << "Decode error at read_pos=0x" << std::hex
439 << journaler.get_read_pos() << dendl;
f64942e4 440 _go_readonly(EIO);
7c673cae
FG
441 }
442 dout(20) << " executing item (0x" << std::hex << item.ino
443 << std::dec << ")" << dendl;
444 _execute_item(item, journaler.get_read_pos());
445 }
446
447 dout(10) << " cannot consume right now" << dendl;
448
449 return could_consume;
450}
451
452void PurgeQueue::_execute_item(
453 const PurgeItem &item,
454 uint64_t expire_to)
455{
456 assert(lock.is_locked_by_me());
457
458 in_flight[expire_to] = item;
459 logger->set(l_pq_executing, in_flight.size());
f64942e4
AA
460 auto ops = _calculate_ops(item);
461 ops_in_flight += ops;
7c673cae
FG
462 logger->set(l_pq_executing_ops, ops_in_flight);
463
464 SnapContext nullsnapc;
465
466 C_GatherBuilder gather(cct);
467 if (item.action == PurgeItem::PURGE_FILE) {
468 if (item.size > 0) {
469 uint64_t num = Striper::get_num_objects(item.layout, item.size);
470 dout(10) << " 0~" << item.size << " objects 0~" << num
471 << " snapc " << item.snapc << " on " << item.ino << dendl;
472 filer.purge_range(item.ino, &item.layout, item.snapc,
473 0, num, ceph::real_clock::now(), 0,
474 gather.new_sub());
475 }
476
477 // remove the backtrace object if it was not purged
478 object_t oid = CInode::get_object_name(item.ino, frag_t(), "");
479 if (!gather.has_subs() || !item.layout.pool_ns.empty()) {
480 object_locator_t oloc(item.layout.pool_id);
481 dout(10) << " remove backtrace object " << oid
482 << " pool " << oloc.pool << " snapc " << item.snapc << dendl;
483 objecter->remove(oid, oloc, item.snapc,
484 ceph::real_clock::now(), 0,
485 gather.new_sub());
486 }
487
488 // remove old backtrace objects
489 for (const auto &p : item.old_pools) {
490 object_locator_t oloc(p);
491 dout(10) << " remove backtrace object " << oid
492 << " old pool " << p << " snapc " << item.snapc << dendl;
493 objecter->remove(oid, oloc, item.snapc,
494 ceph::real_clock::now(), 0,
495 gather.new_sub());
496 }
497 } else if (item.action == PurgeItem::PURGE_DIR) {
498 object_locator_t oloc(metadata_pool);
499 std::list<frag_t> frags;
500 if (!item.fragtree.is_leaf(frag_t()))
501 item.fragtree.get_leaves(frags);
502 frags.push_back(frag_t());
503 for (const auto &frag : frags) {
504 object_t oid = CInode::get_object_name(item.ino, frag, "");
505 dout(10) << " remove dirfrag " << oid << dendl;
506 objecter->remove(oid, oloc, nullsnapc,
507 ceph::real_clock::now(),
508 0, gather.new_sub());
509 }
510 } else if (item.action == PurgeItem::TRUNCATE_FILE) {
511 const uint64_t num = Striper::get_num_objects(item.layout, item.size);
512 dout(10) << " 0~" << item.size << " objects 0~" << num
513 << " snapc " << item.snapc << " on " << item.ino << dendl;
514
515 // keep backtrace object
516 if (num > 1) {
517 filer.purge_range(item.ino, &item.layout, item.snapc,
518 1, num - 1, ceph::real_clock::now(),
519 0, gather.new_sub());
520 }
521 filer.zero(item.ino, &item.layout, item.snapc,
522 0, item.layout.object_size,
523 ceph::real_clock::now(),
524 0, true, gather.new_sub());
525 } else {
526 derr << "Invalid item (action=" << item.action << ") in purge queue, "
527 "dropping it" << dendl;
f64942e4
AA
528 ops_in_flight -= ops;
529 logger->set(l_pq_executing_ops, ops_in_flight);
7c673cae
FG
530 in_flight.erase(expire_to);
531 logger->set(l_pq_executing, in_flight.size());
532 return;
533 }
534 assert(gather.has_subs());
535
31f18b77
FG
536 gather.set_finisher(new C_OnFinisher(
537 new FunctionContext([this, expire_to](int r){
538 Mutex::Locker l(lock);
539 _execute_item_complete(expire_to);
7c673cae 540
31f18b77 541 _consume();
7c673cae
FG
542
543 // Have we gone idle? If so, do an extra write_head now instead of
544 // waiting for next flush after journaler_write_head_interval.
545 // Also do this periodically even if not idle, so that the persisted
546 // expire_pos doesn't fall too far behind our progress when consuming
547 // a very long queue.
548 if (in_flight.empty() || journaler.write_head_needed()) {
f64942e4 549 journaler.write_head(nullptr);
7c673cae 550 }
31f18b77
FG
551 }), &finisher));
552
7c673cae
FG
553 gather.activate();
554}
555
556void PurgeQueue::_execute_item_complete(
557 uint64_t expire_to)
558{
559 assert(lock.is_locked_by_me());
560 dout(10) << "complete at 0x" << std::hex << expire_to << std::dec << dendl;
561 assert(in_flight.count(expire_to) == 1);
562
563 auto iter = in_flight.find(expire_to);
564 assert(iter != in_flight.end());
565 if (iter == in_flight.begin()) {
566 // This was the lowest journal position in flight, so we can now
567 // safely expire the journal up to here.
568 dout(10) << "expiring to 0x" << std::hex << expire_to << std::dec << dendl;
569 journaler.set_expire_pos(expire_to);
570 } else {
571 // This is completely fine, we're not supposed to purge files in
572 // order when doing them in parallel.
573 dout(10) << "non-sequential completion, not expiring anything" << dendl;
574 }
575
576 ops_in_flight -= _calculate_ops(iter->second);
577 logger->set(l_pq_executing_ops, ops_in_flight);
578
579 dout(10) << "completed item for ino 0x" << std::hex << iter->second.ino
580 << std::dec << dendl;
581
582 in_flight.erase(iter);
583 logger->set(l_pq_executing, in_flight.size());
584 dout(10) << "in_flight.size() now " << in_flight.size() << dendl;
585
586 logger->inc(l_pq_executed);
587}
588
589void PurgeQueue::update_op_limit(const MDSMap &mds_map)
590{
591 Mutex::Locker l(lock);
592
f64942e4
AA
593 if (readonly) {
594 dout(10) << "skipping; PurgeQueue is readonly" << dendl;
595 return;
596 }
597
7c673cae
FG
598 uint64_t pg_count = 0;
599 objecter->with_osdmap([&](const OSDMap& o) {
600 // Number of PGs across all data pools
31f18b77 601 const std::vector<int64_t> &data_pools = mds_map.get_data_pools();
7c673cae
FG
602 for (const auto dp : data_pools) {
603 if (o.get_pg_pool(dp) == NULL) {
604 // It is possible that we have an older OSDMap than MDSMap,
605 // because we don't start watching every OSDMap until after
606 // MDSRank is initialized
607 dout(4) << " data pool " << dp << " not found in OSDMap" << dendl;
608 continue;
609 }
610 pg_count += o.get_pg_num(dp);
611 }
612 });
613
614 // Work out a limit based on n_pgs / n_mdss, multiplied by the user's
615 // preference for how many ops per PG
616 max_purge_ops = uint64_t(((double)pg_count / (double)mds_map.get_max_mds()) *
617 cct->_conf->mds_max_purge_ops_per_pg);
618
619 // User may also specify a hard limit, apply this if so.
620 if (cct->_conf->mds_max_purge_ops) {
621 max_purge_ops = MIN(max_purge_ops, cct->_conf->mds_max_purge_ops);
622 }
623}
624
625void PurgeQueue::handle_conf_change(const struct md_config_t *conf,
626 const std::set <std::string> &changed,
627 const MDSMap &mds_map)
628{
629 if (changed.count("mds_max_purge_ops")
630 || changed.count("mds_max_purge_ops_per_pg")) {
631 update_op_limit(mds_map);
632 } else if (changed.count("mds_max_purge_files")) {
633 Mutex::Locker l(lock);
634
635 if (in_flight.empty()) {
636 // We might have gone from zero to a finite limit, so
637 // might need to kick off consume.
638 dout(4) << "maybe start work again (max_purge_files="
639 << conf->mds_max_purge_files << dendl;
640 finisher.queue(new FunctionContext([this](int r){
641 Mutex::Locker l(lock);
642 _consume();
643 }));
644 }
645 }
646}
647
648bool PurgeQueue::drain(
649 uint64_t *progress,
650 uint64_t *progress_total,
651 size_t *in_flight_count
652 )
653{
f64942e4
AA
654 Mutex::Locker l(lock);
655
656 if (readonly) {
657 dout(10) << "skipping drain; PurgeQueue is readonly" << dendl;
658 return true;
659 }
660
7c673cae
FG
661 assert(progress != nullptr);
662 assert(progress_total != nullptr);
663 assert(in_flight_count != nullptr);
664
665 const bool done = in_flight.empty() && (
666 journaler.get_read_pos() == journaler.get_write_pos());
667 if (done) {
668 return true;
669 }
670
671 const uint64_t bytes_remaining = journaler.get_write_pos()
672 - journaler.get_read_pos();
673
674 if (!draining) {
675 // Start of draining: remember how much there was outstanding at
676 // this point so that we can give a progress percentage later
677 draining = true;
678
679 // Life the op throttle as this daemon now has nothing to do but
680 // drain the purge queue, so do it as fast as we can.
681 max_purge_ops = 0xffff;
682 }
683
684 drain_initial = max(bytes_remaining, drain_initial);
685
686 *progress = drain_initial - bytes_remaining;
687 *progress_total = drain_initial;
688 *in_flight_count = in_flight.size();
689
690 return false;
691}
692