]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/PurgeQueue.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / mds / PurgeQueue.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2015 Red Hat
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "common/debug.h"
16 #include "mds/mdstypes.h"
17 #include "mds/CInode.h"
18 #include "mds/MDCache.h"
19
20 #include "PurgeQueue.h"
21
22 #include <string.h>
23
24 #define dout_context cct
25 #define dout_subsys ceph_subsys_mds
26 #undef dout_prefix
27 #define dout_prefix _prefix(_dout, rank) << __func__ << ": "
28 static ostream& _prefix(std::ostream *_dout, mds_rank_t rank) {
29 return *_dout << "mds." << rank << ".purge_queue ";
30 }
31
32 const std::map<std::string, PurgeItem::Action> PurgeItem::actions = {
33 {"NONE", PurgeItem::NONE},
34 {"PURGE_FILE", PurgeItem::PURGE_FILE},
35 {"TRUNCATE_FILE", PurgeItem::TRUNCATE_FILE},
36 {"PURGE_DIR", PurgeItem::PURGE_DIR}
37 };
38
39 void PurgeItem::encode(bufferlist &bl) const
40 {
41 ENCODE_START(2, 1, bl);
42 encode((uint8_t)action, bl);
43 encode(ino, bl);
44 encode(size, bl);
45 encode(layout, bl, CEPH_FEATURE_FS_FILE_LAYOUT_V2);
46 encode(old_pools, bl);
47 encode(snapc, bl);
48 encode(fragtree, bl);
49 encode(stamp, bl);
50 uint8_t static const pad = 0xff;
51 for (unsigned int i = 0; i<pad_size; i++) {
52 encode(pad, bl);
53 }
54 ENCODE_FINISH(bl);
55 }
56
57 void PurgeItem::decode(bufferlist::const_iterator &p)
58 {
59 DECODE_START(2, p);
60 decode((uint8_t&)action, p);
61 decode(ino, p);
62 decode(size, p);
63 decode(layout, p);
64 decode(old_pools, p);
65 decode(snapc, p);
66 decode(fragtree, p);
67 if (struct_v >= 2) {
68 decode(stamp, p);
69 }
70 DECODE_FINISH(p);
71 }
72
73 // TODO: if Objecter has any slow requests, take that as a hint and
74 // slow down our rate of purging (keep accepting pushes though)
75 PurgeQueue::PurgeQueue(
76 CephContext *cct_,
77 mds_rank_t rank_,
78 const int64_t metadata_pool_,
79 Objecter *objecter_,
80 Context *on_error_)
81 :
82 cct(cct_),
83 rank(rank_),
84 lock("PurgeQueue"),
85 metadata_pool(metadata_pool_),
86 finisher(cct, "PurgeQueue", "PQ_Finisher"),
87 timer(cct, lock),
88 filer(objecter_, &finisher),
89 objecter(objecter_),
90 journaler("pq", MDS_INO_PURGE_QUEUE + rank, metadata_pool,
91 CEPH_FS_ONDISK_MAGIC, objecter_, nullptr, 0,
92 &finisher),
93 on_error(on_error_),
94 ops_in_flight(0),
95 max_purge_ops(0),
96 drain_initial(0),
97 draining(false),
98 delayed_flush(nullptr),
99 recovered(false)
100 {
101 ceph_assert(cct != nullptr);
102 ceph_assert(on_error != nullptr);
103 ceph_assert(objecter != nullptr);
104 journaler.set_write_error_handler(on_error);
105 }
106
107 PurgeQueue::~PurgeQueue()
108 {
109 if (logger) {
110 g_ceph_context->get_perfcounters_collection()->remove(logger.get());
111 }
112 delete on_error;
113 }
114
115 void PurgeQueue::create_logger()
116 {
117 PerfCountersBuilder pcb(g_ceph_context, "purge_queue", l_pq_first, l_pq_last);
118
119 pcb.add_u64_counter(l_pq_executed, "pq_executed", "Purge queue tasks executed",
120 "purg", PerfCountersBuilder::PRIO_INTERESTING);
121
122 pcb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
123 pcb.add_u64(l_pq_executing_ops, "pq_executing_ops", "Purge queue ops in flight");
124 pcb.add_u64(l_pq_executing, "pq_executing", "Purge queue tasks in flight");
125
126 logger.reset(pcb.create_perf_counters());
127 g_ceph_context->get_perfcounters_collection()->add(logger.get());
128 }
129
130 void PurgeQueue::init()
131 {
132 std::lock_guard l(lock);
133
134 ceph_assert(logger != nullptr);
135
136 finisher.start();
137 timer.init();
138 }
139
140 void PurgeQueue::activate()
141 {
142 std::lock_guard l(lock);
143
144 if (readonly) {
145 dout(10) << "skipping activate: PurgeQueue is readonly" << dendl;
146 return;
147 }
148
149 if (journaler.get_read_pos() == journaler.get_write_pos())
150 return;
151
152 if (in_flight.empty()) {
153 dout(4) << "start work (by drain)" << dendl;
154 finisher.queue(new FunctionContext([this](int r) {
155 std::lock_guard l(lock);
156 _consume();
157 }));
158 }
159 }
160
161 void PurgeQueue::shutdown()
162 {
163 std::lock_guard l(lock);
164
165 journaler.shutdown();
166 timer.shutdown();
167 finisher.stop();
168 }
169
170 void PurgeQueue::open(Context *completion)
171 {
172 dout(4) << "opening" << dendl;
173
174 std::lock_guard l(lock);
175
176 if (completion)
177 waiting_for_recovery.push_back(completion);
178
179 journaler.recover(new FunctionContext([this](int r){
180 if (r == -ENOENT) {
181 dout(1) << "Purge Queue not found, assuming this is an upgrade and "
182 "creating it." << dendl;
183 create(NULL);
184 } else if (r == 0) {
185 std::lock_guard l(lock);
186 dout(4) << "open complete" << dendl;
187
188 // Journaler only guarantees entries before head write_pos have been
189 // fully flushed. Before appending new entries, we need to find and
190 // drop any partial written entry.
191 if (journaler.last_committed.write_pos < journaler.get_write_pos()) {
192 dout(4) << "recovering write_pos" << dendl;
193 journaler.set_read_pos(journaler.last_committed.write_pos);
194 _recover();
195 return;
196 }
197
198 journaler.set_writeable();
199 recovered = true;
200 finish_contexts(g_ceph_context, waiting_for_recovery);
201 } else {
202 derr << "Error " << r << " loading Journaler" << dendl;
203 _go_readonly(r);
204 }
205 }));
206 }
207
208 void PurgeQueue::wait_for_recovery(Context* c)
209 {
210 std::lock_guard l(lock);
211 if (recovered) {
212 c->complete(0);
213 } else if (readonly) {
214 dout(10) << "cannot wait for recovery: PurgeQueue is readonly" << dendl;
215 c->complete(-EROFS);
216 } else {
217 waiting_for_recovery.push_back(c);
218 }
219 }
220
221 void PurgeQueue::_recover()
222 {
223 ceph_assert(lock.is_locked_by_me());
224
225 // Journaler::is_readable() adjusts write_pos if partial entry is encountered
226 while (1) {
227 if (!journaler.is_readable() &&
228 !journaler.get_error() &&
229 journaler.get_read_pos() < journaler.get_write_pos()) {
230 journaler.wait_for_readable(new FunctionContext([this](int r) {
231 std::lock_guard l(lock);
232 _recover();
233 }));
234 return;
235 }
236
237 if (journaler.get_error()) {
238 int r = journaler.get_error();
239 derr << "Error " << r << " recovering write_pos" << dendl;
240 _go_readonly(r);
241 return;
242 }
243
244 if (journaler.get_read_pos() == journaler.get_write_pos()) {
245 dout(4) << "write_pos recovered" << dendl;
246 // restore original read_pos
247 journaler.set_read_pos(journaler.last_committed.expire_pos);
248 journaler.set_writeable();
249 recovered = true;
250 finish_contexts(g_ceph_context, waiting_for_recovery);
251 return;
252 }
253
254 bufferlist bl;
255 bool readable = journaler.try_read_entry(bl);
256 ceph_assert(readable); // we checked earlier
257 }
258 }
259
260 void PurgeQueue::create(Context *fin)
261 {
262 dout(4) << "creating" << dendl;
263 std::lock_guard l(lock);
264
265 if (fin)
266 waiting_for_recovery.push_back(fin);
267
268 file_layout_t layout = file_layout_t::get_default();
269 layout.pool_id = metadata_pool;
270 journaler.set_writeable();
271 journaler.create(&layout, JOURNAL_FORMAT_RESILIENT);
272 journaler.write_head(new FunctionContext([this](int r) {
273 std::lock_guard l(lock);
274 if (r) {
275 _go_readonly(r);
276 } else {
277 recovered = true;
278 finish_contexts(g_ceph_context, waiting_for_recovery);
279 }
280 }));
281 }
282
283 /**
284 * The `completion` context will always be called back via a Finisher
285 */
286 void PurgeQueue::push(const PurgeItem &pi, Context *completion)
287 {
288 dout(4) << "pushing inode " << pi.ino << dendl;
289 std::lock_guard l(lock);
290
291 if (readonly) {
292 dout(10) << "cannot push inode: PurgeQueue is readonly" << dendl;
293 completion->complete(-EROFS);
294 return;
295 }
296
297 // Callers should have waited for open() before using us
298 ceph_assert(!journaler.is_readonly());
299
300 bufferlist bl;
301
302 encode(pi, bl);
303 journaler.append_entry(bl);
304 journaler.wait_for_flush(completion);
305
306 // Maybe go ahead and do something with it right away
307 bool could_consume = _consume();
308 if (!could_consume) {
309 // Usually, it is not necessary to explicitly flush here, because the reader
310 // will get flushes generated inside Journaler::is_readable. However,
311 // if we remain in a _can_consume()==false state for a long period then
312 // we should flush in order to allow MDCache to drop its strays rather
313 // than having them wait for purgequeue to progress.
314 if (!delayed_flush) {
315 delayed_flush = new FunctionContext([this](int r){
316 delayed_flush = nullptr;
317 journaler.flush();
318 });
319
320 timer.add_event_after(
321 g_conf()->mds_purge_queue_busy_flush_period,
322 delayed_flush);
323 }
324 }
325 }
326
327 uint32_t PurgeQueue::_calculate_ops(const PurgeItem &item) const
328 {
329 uint32_t ops_required = 0;
330 if (item.action == PurgeItem::PURGE_DIR) {
331 // Directory, count dirfrags to be deleted
332 frag_vec_t leaves;
333 if (!item.fragtree.is_leaf(frag_t())) {
334 item.fragtree.get_leaves(leaves);
335 }
336 // One for the root, plus any leaves
337 ops_required = 1 + leaves.size();
338 } else {
339 // File, work out concurrent Filer::purge deletes
340 const uint64_t num = (item.size > 0) ?
341 Striper::get_num_objects(item.layout, item.size) : 1;
342
343 ops_required = std::min(num, g_conf()->filer_max_purge_ops);
344
345 // Account for removing (or zeroing) backtrace
346 ops_required += 1;
347
348 // Account for deletions for old pools
349 if (item.action != PurgeItem::TRUNCATE_FILE) {
350 ops_required += item.old_pools.size();
351 }
352 }
353
354 return ops_required;
355 }
356
357 bool PurgeQueue::_can_consume()
358 {
359 if (readonly) {
360 dout(10) << "can't consume: PurgeQueue is readonly" << dendl;
361 return false;
362 }
363
364 dout(20) << ops_in_flight << "/" << max_purge_ops << " ops, "
365 << in_flight.size() << "/" << g_conf()->mds_max_purge_files
366 << " files" << dendl;
367
368 if (in_flight.size() == 0 && cct->_conf->mds_max_purge_files > 0) {
369 // Always permit consumption if nothing is in flight, so that the ops
370 // limit can never be so low as to forbid all progress (unless
371 // administrator has deliberately paused purging by setting max
372 // purge files to zero).
373 return true;
374 }
375
376 if (ops_in_flight >= max_purge_ops) {
377 dout(20) << "Throttling on op limit " << ops_in_flight << "/"
378 << max_purge_ops << dendl;
379 return false;
380 }
381
382 if (in_flight.size() >= cct->_conf->mds_max_purge_files) {
383 dout(20) << "Throttling on item limit " << in_flight.size()
384 << "/" << cct->_conf->mds_max_purge_files << dendl;
385 return false;
386 } else {
387 return true;
388 }
389 }
390
391 void PurgeQueue::_go_readonly(int r)
392 {
393 if (readonly) return;
394 dout(1) << "going readonly because internal IO failed: " << strerror(-r) << dendl;
395 readonly = true;
396 on_error->complete(r);
397 on_error = nullptr;
398 journaler.set_readonly();
399 finish_contexts(g_ceph_context, waiting_for_recovery, r);
400 }
401
402 bool PurgeQueue::_consume()
403 {
404 ceph_assert(lock.is_locked_by_me());
405
406 bool could_consume = false;
407 while(_can_consume()) {
408
409 if (delayed_flush) {
410 // We are now going to read from the journal, so any proactive
411 // flush is no longer necessary. This is not functionally necessary
412 // but it can avoid generating extra fragmented flush IOs.
413 timer.cancel_event(delayed_flush);
414 delayed_flush = nullptr;
415 }
416
417 if (int r = journaler.get_error()) {
418 derr << "Error " << r << " recovering write_pos" << dendl;
419 _go_readonly(r);
420 return could_consume;
421 }
422
423 if (!journaler.is_readable()) {
424 dout(10) << " not readable right now" << dendl;
425 // Because we are the writer and the reader of the journal
426 // via the same Journaler instance, we never need to reread_head
427 if (!journaler.have_waiter()) {
428 journaler.wait_for_readable(new FunctionContext([this](int r) {
429 std::lock_guard l(lock);
430 if (r == 0) {
431 _consume();
432 } else if (r != -EAGAIN) {
433 _go_readonly(r);
434 }
435 }));
436 }
437
438 return could_consume;
439 }
440
441 could_consume = true;
442 // The journaler is readable: consume an entry
443 bufferlist bl;
444 bool readable = journaler.try_read_entry(bl);
445 ceph_assert(readable); // we checked earlier
446
447 dout(20) << " decoding entry" << dendl;
448 PurgeItem item;
449 auto q = bl.cbegin();
450 try {
451 decode(item, q);
452 } catch (const buffer::error &err) {
453 derr << "Decode error at read_pos=0x" << std::hex
454 << journaler.get_read_pos() << dendl;
455 _go_readonly(EIO);
456 }
457 dout(20) << " executing item (" << item.ino << ")" << dendl;
458 _execute_item(item, journaler.get_read_pos());
459 }
460
461 dout(10) << " cannot consume right now" << dendl;
462
463 return could_consume;
464 }
465
466 void PurgeQueue::_execute_item(
467 const PurgeItem &item,
468 uint64_t expire_to)
469 {
470 ceph_assert(lock.is_locked_by_me());
471
472 in_flight[expire_to] = item;
473 logger->set(l_pq_executing, in_flight.size());
474 auto ops = _calculate_ops(item);
475 ops_in_flight += ops;
476 logger->set(l_pq_executing_ops, ops_in_flight);
477
478 SnapContext nullsnapc;
479
480 C_GatherBuilder gather(cct);
481 if (item.action == PurgeItem::PURGE_FILE) {
482 if (item.size > 0) {
483 uint64_t num = Striper::get_num_objects(item.layout, item.size);
484 dout(10) << " 0~" << item.size << " objects 0~" << num
485 << " snapc " << item.snapc << " on " << item.ino << dendl;
486 filer.purge_range(item.ino, &item.layout, item.snapc,
487 0, num, ceph::real_clock::now(), 0,
488 gather.new_sub());
489 }
490
491 // remove the backtrace object if it was not purged
492 object_t oid = CInode::get_object_name(item.ino, frag_t(), "");
493 if (!gather.has_subs() || !item.layout.pool_ns.empty()) {
494 object_locator_t oloc(item.layout.pool_id);
495 dout(10) << " remove backtrace object " << oid
496 << " pool " << oloc.pool << " snapc " << item.snapc << dendl;
497 objecter->remove(oid, oloc, item.snapc,
498 ceph::real_clock::now(), 0,
499 gather.new_sub());
500 }
501
502 // remove old backtrace objects
503 for (const auto &p : item.old_pools) {
504 object_locator_t oloc(p);
505 dout(10) << " remove backtrace object " << oid
506 << " old pool " << p << " snapc " << item.snapc << dendl;
507 objecter->remove(oid, oloc, item.snapc,
508 ceph::real_clock::now(), 0,
509 gather.new_sub());
510 }
511 } else if (item.action == PurgeItem::PURGE_DIR) {
512 object_locator_t oloc(metadata_pool);
513 frag_vec_t leaves;
514 if (!item.fragtree.is_leaf(frag_t()))
515 item.fragtree.get_leaves(leaves);
516 leaves.push_back(frag_t());
517 for (const auto &leaf : leaves) {
518 object_t oid = CInode::get_object_name(item.ino, leaf, "");
519 dout(10) << " remove dirfrag " << oid << dendl;
520 objecter->remove(oid, oloc, nullsnapc,
521 ceph::real_clock::now(),
522 0, gather.new_sub());
523 }
524 } else if (item.action == PurgeItem::TRUNCATE_FILE) {
525 const uint64_t num = Striper::get_num_objects(item.layout, item.size);
526 dout(10) << " 0~" << item.size << " objects 0~" << num
527 << " snapc " << item.snapc << " on " << item.ino << dendl;
528
529 // keep backtrace object
530 if (num > 1) {
531 filer.purge_range(item.ino, &item.layout, item.snapc,
532 1, num - 1, ceph::real_clock::now(),
533 0, gather.new_sub());
534 }
535 filer.zero(item.ino, &item.layout, item.snapc,
536 0, item.layout.object_size,
537 ceph::real_clock::now(),
538 0, true, gather.new_sub());
539 } else {
540 derr << "Invalid item (action=" << item.action << ") in purge queue, "
541 "dropping it" << dendl;
542 ops_in_flight -= ops;
543 logger->set(l_pq_executing_ops, ops_in_flight);
544 in_flight.erase(expire_to);
545 logger->set(l_pq_executing, in_flight.size());
546 return;
547 }
548 ceph_assert(gather.has_subs());
549
550 gather.set_finisher(new C_OnFinisher(
551 new FunctionContext([this, expire_to](int r){
552 std::lock_guard l(lock);
553 _execute_item_complete(expire_to);
554
555 _consume();
556
557 // Have we gone idle? If so, do an extra write_head now instead of
558 // waiting for next flush after journaler_write_head_interval.
559 // Also do this periodically even if not idle, so that the persisted
560 // expire_pos doesn't fall too far behind our progress when consuming
561 // a very long queue.
562 if (in_flight.empty() || journaler.write_head_needed()) {
563 journaler.write_head(nullptr);
564 }
565 }), &finisher));
566
567 gather.activate();
568 }
569
570 void PurgeQueue::_execute_item_complete(
571 uint64_t expire_to)
572 {
573 ceph_assert(lock.is_locked_by_me());
574 dout(10) << "complete at 0x" << std::hex << expire_to << std::dec << dendl;
575 ceph_assert(in_flight.count(expire_to) == 1);
576
577 auto iter = in_flight.find(expire_to);
578 ceph_assert(iter != in_flight.end());
579 if (iter == in_flight.begin()) {
580 uint64_t pos = expire_to;
581 if (!pending_expire.empty()) {
582 auto n = iter;
583 ++n;
584 if (n == in_flight.end()) {
585 pos = *pending_expire.rbegin();
586 pending_expire.clear();
587 } else {
588 auto p = pending_expire.begin();
589 do {
590 if (*p >= n->first)
591 break;
592 pos = *p;
593 pending_expire.erase(p++);
594 } while (p != pending_expire.end());
595 }
596 }
597 dout(10) << "expiring to 0x" << std::hex << pos << std::dec << dendl;
598 journaler.set_expire_pos(pos);
599 } else {
600 // This is completely fine, we're not supposed to purge files in
601 // order when doing them in parallel.
602 dout(10) << "non-sequential completion, not expiring anything" << dendl;
603 pending_expire.insert(expire_to);
604 }
605
606 ops_in_flight -= _calculate_ops(iter->second);
607 logger->set(l_pq_executing_ops, ops_in_flight);
608
609 dout(10) << "completed item for ino " << iter->second.ino << dendl;
610
611 in_flight.erase(iter);
612 logger->set(l_pq_executing, in_flight.size());
613 dout(10) << "in_flight.size() now " << in_flight.size() << dendl;
614
615 logger->inc(l_pq_executed);
616 }
617
618 void PurgeQueue::update_op_limit(const MDSMap &mds_map)
619 {
620 std::lock_guard l(lock);
621
622 if (readonly) {
623 dout(10) << "skipping; PurgeQueue is readonly" << dendl;
624 return;
625 }
626
627 uint64_t pg_count = 0;
628 objecter->with_osdmap([&](const OSDMap& o) {
629 // Number of PGs across all data pools
630 const std::vector<int64_t> &data_pools = mds_map.get_data_pools();
631 for (const auto dp : data_pools) {
632 if (o.get_pg_pool(dp) == NULL) {
633 // It is possible that we have an older OSDMap than MDSMap,
634 // because we don't start watching every OSDMap until after
635 // MDSRank is initialized
636 dout(4) << " data pool " << dp << " not found in OSDMap" << dendl;
637 continue;
638 }
639 pg_count += o.get_pg_num(dp);
640 }
641 });
642
643 // Work out a limit based on n_pgs / n_mdss, multiplied by the user's
644 // preference for how many ops per PG
645 max_purge_ops = uint64_t(((double)pg_count / (double)mds_map.get_max_mds()) *
646 cct->_conf->mds_max_purge_ops_per_pg);
647
648 // User may also specify a hard limit, apply this if so.
649 if (cct->_conf->mds_max_purge_ops) {
650 max_purge_ops = std::min(max_purge_ops, cct->_conf->mds_max_purge_ops);
651 }
652 }
653
654 void PurgeQueue::handle_conf_change(const ConfigProxy& conf,
655 const std::set <std::string> &changed,
656 const MDSMap &mds_map)
657 {
658 if (changed.count("mds_max_purge_ops")
659 || changed.count("mds_max_purge_ops_per_pg")) {
660 update_op_limit(mds_map);
661 } else if (changed.count("mds_max_purge_files")) {
662 std::lock_guard l(lock);
663 if (in_flight.empty()) {
664 // We might have gone from zero to a finite limit, so
665 // might need to kick off consume.
666 dout(4) << "maybe start work again (max_purge_files="
667 << conf->mds_max_purge_files << dendl;
668 finisher.queue(new FunctionContext([this](int r){
669 std::lock_guard l(lock);
670 _consume();
671 }));
672 }
673 }
674 }
675
676 bool PurgeQueue::drain(
677 uint64_t *progress,
678 uint64_t *progress_total,
679 size_t *in_flight_count
680 )
681 {
682 std::lock_guard l(lock);
683
684 if (readonly) {
685 dout(10) << "skipping drain; PurgeQueue is readonly" << dendl;
686 return true;
687 }
688
689 ceph_assert(progress != nullptr);
690 ceph_assert(progress_total != nullptr);
691 ceph_assert(in_flight_count != nullptr);
692
693 const bool done = in_flight.empty() && (
694 journaler.get_read_pos() == journaler.get_write_pos());
695 if (done) {
696 return true;
697 }
698
699 const uint64_t bytes_remaining = journaler.get_write_pos()
700 - journaler.get_read_pos();
701
702 if (!draining) {
703 // Start of draining: remember how much there was outstanding at
704 // this point so that we can give a progress percentage later
705 draining = true;
706
707 // Life the op throttle as this daemon now has nothing to do but
708 // drain the purge queue, so do it as fast as we can.
709 max_purge_ops = 0xffff;
710 }
711
712 drain_initial = std::max(bytes_remaining, drain_initial);
713
714 *progress = drain_initial - bytes_remaining;
715 *progress_total = drain_initial;
716 *in_flight_count = in_flight.size();
717
718 return false;
719 }
720
721 std::string_view PurgeItem::get_type_str() const
722 {
723 switch(action) {
724 case PurgeItem::NONE: return "NONE";
725 case PurgeItem::PURGE_FILE: return "PURGE_FILE";
726 case PurgeItem::PURGE_DIR: return "PURGE_DIR";
727 case PurgeItem::TRUNCATE_FILE: return "TRUNCATE_FILE";
728 default:
729 return "UNKNOWN";
730 }
731 }
732