1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
9 #include <boost/optional.hpp>
12 #include <liboath/oath.h>
15 #include "auth/Crypto.h"
16 #include "compressor/Compressor.h"
18 #include "common/armor.h"
19 #include "common/ceph_json.h"
20 #include "common/config.h"
21 #include "common/ceph_argparse.h"
22 #include "common/Formatter.h"
23 #include "common/errno.h"
24 #include "common/safe_io.h"
26 #include "include/util.h"
28 #include "cls/rgw/cls_rgw_types.h"
29 #include "cls/rgw/cls_rgw_client.h"
31 #include "global/global_init.h"
33 #include "include/utime.h"
34 #include "include/str_list.h"
37 #include "rgw_bucket.h"
39 #include "rgw_rados.h"
41 #include "rgw_acl_s3.h"
42 #include "rgw_datalog.h"
45 #include "rgw_formats.h"
46 #include "rgw_usage.h"
47 #include "rgw_orphan.h"
49 #include "rgw_trim_bilog.h"
50 #include "rgw_trim_datalog.h"
51 #include "rgw_trim_mdlog.h"
52 #include "rgw_data_sync.h"
53 #include "rgw_rest_conn.h"
54 #include "rgw_realm_watcher.h"
56 #include "rgw_reshard.h"
57 #include "rgw_http_client_curl.h"
59 #include "rgw_pubsub.h"
60 #include "rgw_bucket_sync.h"
61 #include "rgw_sync_checkpoint.h"
64 #include "services/svc_sync_modules.h"
65 #include "services/svc_cls.h"
66 #include "services/svc_bilog_rados.h"
67 #include "services/svc_mdlog.h"
68 #include "services/svc_meta_be_otp.h"
69 #include "services/svc_zone.h"
71 #define dout_context g_ceph_context
72 #define dout_subsys ceph_subsys_rgw
74 #define SECRET_KEY_LEN 40
75 #define PUBLIC_ID_LEN 20
79 static rgw::sal::Store
* store
= NULL
;
81 static const DoutPrefixProvider
* dpp() {
82 struct GlobalPrefix
: public DoutPrefixProvider
{
83 CephContext
*get_cct() const override
{ return dout_context
; }
84 unsigned get_subsys() const override
{ return dout_subsys
; }
85 std::ostream
& gen_prefix(std::ostream
& out
) const override
{ return out
; }
87 static GlobalPrefix global_dpp
;
91 #define CHECK_TRUE(x, msg, err) \
94 cerr << msg << std::endl; \
99 #define CHECK_SUCCESS(x, msg) \
103 cerr << msg << ": " << cpp_strerror(-_x_val) << std::endl; \
110 cout
<< "usage: radosgw-admin <cmd> [options...]" << std::endl
;
111 cout
<< "commands:\n";
112 cout
<< " user create create a new user\n" ;
113 cout
<< " user modify modify user\n";
114 cout
<< " user info get user info\n";
115 cout
<< " user rename rename user\n";
116 cout
<< " user rm remove user\n";
117 cout
<< " user suspend suspend a user\n";
118 cout
<< " user enable re-enable user after suspension\n";
119 cout
<< " user check check user info\n";
120 cout
<< " user stats show user stats as accounted by quota subsystem\n";
121 cout
<< " user list list users\n";
122 cout
<< " caps add add user capabilities\n";
123 cout
<< " caps rm remove user capabilities\n";
124 cout
<< " subuser create create a new subuser\n" ;
125 cout
<< " subuser modify modify subuser\n";
126 cout
<< " subuser rm remove subuser\n";
127 cout
<< " key create create access key\n";
128 cout
<< " key rm remove access key\n";
129 cout
<< " bucket list list buckets (specify --allow-unordered for\n";
130 cout
<< " faster, unsorted listing)\n";
131 cout
<< " bucket limit check show bucket sharding stats\n";
132 cout
<< " bucket link link bucket to specified user\n";
133 cout
<< " bucket unlink unlink bucket from specified user\n";
134 cout
<< " bucket stats returns bucket statistics\n";
135 cout
<< " bucket rm remove bucket\n";
136 cout
<< " bucket check check bucket index\n";
137 cout
<< " bucket chown link bucket to specified user and update its object ACLs\n";
138 cout
<< " bucket reshard reshard bucket\n";
139 cout
<< " bucket rewrite rewrite all objects in the specified bucket\n";
140 cout
<< " bucket sync checkpoint poll a bucket's sync status until it catches up to its remote\n";
141 cout
<< " bucket sync disable disable bucket sync\n";
142 cout
<< " bucket sync enable enable bucket sync\n";
143 cout
<< " bucket radoslist list rados objects backing bucket's objects\n";
144 cout
<< " bi get retrieve bucket index object entries\n";
145 cout
<< " bi put store bucket index object entries\n";
146 cout
<< " bi list list raw bucket index entries\n";
147 cout
<< " bi purge purge bucket index entries\n";
148 cout
<< " object rm remove object\n";
149 cout
<< " object put put object\n";
150 cout
<< " object stat stat an object for its metadata\n";
151 cout
<< " object unlink unlink object from bucket index\n";
152 cout
<< " object rewrite rewrite the specified object\n";
153 cout
<< " objects expire run expired objects cleanup\n";
154 cout
<< " objects expire-stale list list stale expired objects (caused by reshard)\n";
155 cout
<< " objects expire-stale rm remove stale expired objects\n";
156 cout
<< " period rm remove a period\n";
157 cout
<< " period get get period info\n";
158 cout
<< " period get-current get current period info\n";
159 cout
<< " period pull pull a period\n";
160 cout
<< " period push push a period\n";
161 cout
<< " period list list all periods\n";
162 cout
<< " period update update the staging period\n";
163 cout
<< " period commit commit the staging period\n";
164 cout
<< " quota set set quota params\n";
165 cout
<< " quota enable enable quota\n";
166 cout
<< " quota disable disable quota\n";
167 cout
<< " ratelimit get get ratelimit params\n";
168 cout
<< " ratelimit set set ratelimit params\n";
169 cout
<< " ratelimit enable enable ratelimit\n";
170 cout
<< " ratelimit disable disable ratelimit\n";
171 cout
<< " global quota get view global quota params\n";
172 cout
<< " global quota set set global quota params\n";
173 cout
<< " global quota enable enable a global quota\n";
174 cout
<< " global quota disable disable a global quota\n";
175 cout
<< " global ratelimit get view global ratelimit params\n";
176 cout
<< " global ratelimit set set global ratelimit params\n";
177 cout
<< " global ratelimit enable enable a ratelimit quota\n";
178 cout
<< " global ratelimit disable disable a ratelimit quota\n";
179 cout
<< " realm create create a new realm\n";
180 cout
<< " realm rm remove a realm\n";
181 cout
<< " realm get show realm info\n";
182 cout
<< " realm get-default get default realm name\n";
183 cout
<< " realm list list realms\n";
184 cout
<< " realm list-periods list all realm periods\n";
185 cout
<< " realm rename rename a realm\n";
186 cout
<< " realm set set realm info (requires infile)\n";
187 cout
<< " realm default set realm as default\n";
188 cout
<< " realm pull pull a realm and its current period\n";
189 cout
<< " zonegroup add add a zone to a zonegroup\n";
190 cout
<< " zonegroup create create a new zone group info\n";
191 cout
<< " zonegroup default set default zone group\n";
192 cout
<< " zonegroup delete delete a zone group info\n";
193 cout
<< " zonegroup get show zone group info\n";
194 cout
<< " zonegroup modify modify an existing zonegroup\n";
195 cout
<< " zonegroup set set zone group info (requires infile)\n";
196 cout
<< " zonegroup rm remove a zone from a zonegroup\n";
197 cout
<< " zonegroup rename rename a zone group\n";
198 cout
<< " zonegroup list list all zone groups set on this cluster\n";
199 cout
<< " zonegroup placement list list zonegroup's placement targets\n";
200 cout
<< " zonegroup placement get get a placement target of a specific zonegroup\n";
201 cout
<< " zonegroup placement add add a placement target id to a zonegroup\n";
202 cout
<< " zonegroup placement modify modify a placement target of a specific zonegroup\n";
203 cout
<< " zonegroup placement rm remove a placement target from a zonegroup\n";
204 cout
<< " zonegroup placement default set a zonegroup's default placement target\n";
205 cout
<< " zone create create a new zone\n";
206 cout
<< " zone rm remove a zone\n";
207 cout
<< " zone get show zone cluster params\n";
208 cout
<< " zone modify modify an existing zone\n";
209 cout
<< " zone set set zone cluster params (requires infile)\n";
210 cout
<< " zone list list all zones set on this cluster\n";
211 cout
<< " zone rename rename a zone\n";
212 cout
<< " zone placement list list zone's placement targets\n";
213 cout
<< " zone placement get get a zone placement target\n";
214 cout
<< " zone placement add add a zone placement target\n";
215 cout
<< " zone placement modify modify a zone placement target\n";
216 cout
<< " zone placement rm remove a zone placement target\n";
217 cout
<< " metadata sync status get metadata sync status\n";
218 cout
<< " metadata sync init init metadata sync\n";
219 cout
<< " metadata sync run run metadata sync\n";
220 cout
<< " data sync status get data sync status of the specified source zone\n";
221 cout
<< " data sync init init data sync for the specified source zone\n";
222 cout
<< " data sync run run data sync for the specified source zone\n";
223 cout
<< " pool add add an existing pool for data placement\n";
224 cout
<< " pool rm remove an existing pool from data placement set\n";
225 cout
<< " pools list list placement active set\n";
226 cout
<< " policy read bucket/object policy\n";
227 cout
<< " log list list log objects\n";
228 cout
<< " log show dump a log from specific object or (bucket + date\n";
229 cout
<< " + bucket-id)\n";
230 cout
<< " (NOTE: required to specify formatting of date\n";
231 cout
<< " to \"YYYY-MM-DD-hh\")\n";
232 cout
<< " log rm remove log object\n";
233 cout
<< " usage show show usage (by user, by bucket, date range)\n";
234 cout
<< " usage trim trim usage (by user, by bucket, date range)\n";
235 cout
<< " usage clear reset all the usage stats for the cluster\n";
236 cout
<< " gc list dump expired garbage collection objects (specify\n";
237 cout
<< " --include-all to list all entries, including unexpired)\n";
238 cout
<< " gc process manually process garbage (specify\n";
239 cout
<< " --include-all to process all entries, including unexpired)\n";
240 cout
<< " lc list list all bucket lifecycle progress\n";
241 cout
<< " lc get get a lifecycle bucket configuration\n";
242 cout
<< " lc process manually process lifecycle\n";
243 cout
<< " lc reshard fix fix LC for a resharded bucket\n";
244 cout
<< " metadata get get metadata info\n";
245 cout
<< " metadata put put metadata info\n";
246 cout
<< " metadata rm remove metadata info\n";
247 cout
<< " metadata list list metadata info\n";
248 cout
<< " mdlog list list metadata log\n";
249 cout
<< " mdlog autotrim auto trim metadata log\n";
250 cout
<< " mdlog trim trim metadata log (use marker)\n";
251 cout
<< " mdlog status read metadata log status\n";
252 cout
<< " bilog list list bucket index log\n";
253 cout
<< " bilog trim trim bucket index log (use start-marker, end-marker)\n";
254 cout
<< " bilog status read bucket index log status\n";
255 cout
<< " bilog autotrim auto trim bucket index log\n";
256 cout
<< " datalog list list data log\n";
257 cout
<< " datalog trim trim data log\n";
258 cout
<< " datalog status read data log status\n";
259 cout
<< " datalog type change datalog type to --log_type={fifo,omap}\n";
260 cout
<< " orphans find deprecated -- init and run search for leaked rados objects (use job-id, pool)\n";
261 cout
<< " orphans finish deprecated -- clean up search for leaked rados objects\n";
262 cout
<< " orphans list-jobs deprecated -- list the current job-ids for orphans search\n";
263 cout
<< " * the three 'orphans' sub-commands are now deprecated; consider using the `rgw-orphan-list` tool\n";
264 cout
<< " role create create a AWS role for use with STS\n";
265 cout
<< " role delete remove a role\n";
266 cout
<< " role get get a role\n";
267 cout
<< " role list list roles with specified path prefix\n";
268 cout
<< " role modify modify the assume role policy of an existing role\n";
269 cout
<< " role-policy put add/update permission policy to role\n";
270 cout
<< " role-policy list list policies attached to a role\n";
271 cout
<< " role-policy get get the specified inline policy document embedded with the given role\n";
272 cout
<< " role-policy delete remove policy attached to a role\n";
273 cout
<< " reshard add schedule a resharding of a bucket\n";
274 cout
<< " reshard list list all bucket resharding or scheduled to be resharded\n";
275 cout
<< " reshard status read bucket resharding status\n";
276 cout
<< " reshard process process of scheduled reshard jobs\n";
277 cout
<< " reshard cancel cancel resharding a bucket\n";
278 cout
<< " reshard stale-instances list list stale-instances from bucket resharding\n";
279 cout
<< " reshard stale-instances rm cleanup stale-instances from bucket resharding\n";
280 cout
<< " sync error list list sync error\n";
281 cout
<< " sync error trim trim sync error\n";
282 cout
<< " mfa create create a new MFA TOTP token\n";
283 cout
<< " mfa list list MFA TOTP tokens\n";
284 cout
<< " mfa get show MFA TOTP token\n";
285 cout
<< " mfa remove delete MFA TOTP token\n";
286 cout
<< " mfa check check MFA TOTP token\n";
287 cout
<< " mfa resync re-sync MFA TOTP token\n";
288 cout
<< " topic list list bucket notifications/pubsub topics\n";
289 cout
<< " topic get get a bucket notifications/pubsub topic\n";
290 cout
<< " topic rm remove a bucket notifications/pubsub topic\n";
291 cout
<< " subscription get get a pubsub subscription definition\n";
292 cout
<< " subscription rm remove a pubsub subscription\n";
293 cout
<< " subscription pull show events in a pubsub subscription\n";
294 cout
<< " subscription ack ack (remove) an events in a pubsub subscription\n";
295 cout
<< " script put upload a lua script to a context\n";
296 cout
<< " script get get the lua script of a context\n";
297 cout
<< " script rm remove the lua scripts of a context\n";
298 cout
<< " script-package add add a lua package to the scripts allowlist\n";
299 cout
<< " script-package rm remove a lua package from the scripts allowlist\n";
300 cout
<< " script-package list get the lua packages allowlist\n";
301 cout
<< "options:\n";
302 cout
<< " --tenant=<tenant> tenant name\n";
303 cout
<< " --user_ns=<namespace> namespace of user (oidc in case of users authenticated with oidc provider)\n";
304 cout
<< " --uid=<id> user id\n";
305 cout
<< " --new-uid=<id> new user id\n";
306 cout
<< " --subuser=<name> subuser name\n";
307 cout
<< " --access-key=<key> S3 access key\n";
308 cout
<< " --email=<email> user's email address\n";
309 cout
<< " --secret/--secret-key=<key>\n";
310 cout
<< " specify secret key\n";
311 cout
<< " --gen-access-key generate random access key (for S3)\n";
312 cout
<< " --gen-secret generate random secret key\n";
313 cout
<< " --key-type=<type> key type, options are: swift, s3\n";
314 cout
<< " --temp-url-key[-2]=<key> temp url key\n";
315 cout
<< " --access=<access> Set access permissions for sub-user, should be one\n";
316 cout
<< " of read, write, readwrite, full\n";
317 cout
<< " --display-name=<name> user's display name\n";
318 cout
<< " --max-buckets max number of buckets for a user\n";
319 cout
<< " --admin set the admin flag on the user\n";
320 cout
<< " --system set the system flag on the user\n";
321 cout
<< " --op-mask set the op mask on the user\n";
322 cout
<< " --bucket=<bucket> Specify the bucket name. Also used by the quota command.\n";
323 cout
<< " --pool=<pool> Specify the pool name. Also used to scan for leaked rados objects.\n";
324 cout
<< " --object=<object> object name\n";
325 cout
<< " --object-version=<version> object version\n";
326 cout
<< " --date=<date> date in the format yyyy-mm-dd\n";
327 cout
<< " --start-date=<date> start date in the format yyyy-mm-dd\n";
328 cout
<< " --end-date=<date> end date in the format yyyy-mm-dd\n";
329 cout
<< " --bucket-id=<bucket-id> bucket id\n";
330 cout
<< " --bucket-new-name=<bucket>\n";
331 cout
<< " for bucket link: optional new name\n";
332 cout
<< " --shard-id=<shard-id> optional for: \n";
333 cout
<< " mdlog list\n";
334 cout
<< " data sync status\n";
335 cout
<< " required for: \n";
336 cout
<< " mdlog trim\n";
337 cout
<< " --max-entries=<entries> max entries for listing operations\n";
338 cout
<< " --metadata-key=<key> key to retrieve metadata from with metadata get\n";
339 cout
<< " --remote=<remote> zone or zonegroup id of remote gateway\n";
340 cout
<< " --period=<id> period id\n";
341 cout
<< " --url=<url> url for pushing/pulling period/realm\n";
342 cout
<< " --epoch=<number> period epoch\n";
343 cout
<< " --commit commit the period during 'period update'\n";
344 cout
<< " --staging get staging period info\n";
345 cout
<< " --master set as master\n";
346 cout
<< " --master-zone=<id> master zone id\n";
347 cout
<< " --rgw-realm=<name> realm name\n";
348 cout
<< " --realm-id=<id> realm id\n";
349 cout
<< " --realm-new-name=<name> realm new name\n";
350 cout
<< " --rgw-zonegroup=<name> zonegroup name\n";
351 cout
<< " --zonegroup-id=<id> zonegroup id\n";
352 cout
<< " --zonegroup-new-name=<name>\n";
353 cout
<< " zonegroup new name\n";
354 cout
<< " --rgw-zone=<name> name of zone in which radosgw is running\n";
355 cout
<< " --zone-id=<id> zone id\n";
356 cout
<< " --zone-new-name=<name> zone new name\n";
357 cout
<< " --source-zone specify the source zone (for data sync)\n";
358 cout
<< " --default set entity (realm, zonegroup, zone) as default\n";
359 cout
<< " --read-only set zone as read-only (when adding to zonegroup)\n";
360 cout
<< " --redirect-zone specify zone id to redirect when response is 404 (not found)\n";
361 cout
<< " --placement-id placement id for zonegroup placement commands\n";
362 cout
<< " --storage-class storage class for zonegroup placement commands\n";
363 cout
<< " --tags=<list> list of tags for zonegroup placement add and modify commands\n";
364 cout
<< " --tags-add=<list> list of tags to add for zonegroup placement modify command\n";
365 cout
<< " --tags-rm=<list> list of tags to remove for zonegroup placement modify command\n";
366 cout
<< " --endpoints=<list> zone endpoints\n";
367 cout
<< " --index-pool=<pool> placement target index pool\n";
368 cout
<< " --data-pool=<pool> placement target data pool\n";
369 cout
<< " --data-extra-pool=<pool> placement target data extra (non-ec) pool\n";
370 cout
<< " --placement-index-type=<type>\n";
371 cout
<< " placement target index type (normal, indexless, or #id)\n";
372 cout
<< " --compression=<type> placement target compression type (plugin name or empty/none)\n";
373 cout
<< " --tier-type=<type> zone tier type\n";
374 cout
<< " --tier-config=<k>=<v>[,...]\n";
375 cout
<< " set zone tier config keys, values\n";
376 cout
<< " --tier-config-rm=<k>[,...]\n";
377 cout
<< " unset zone tier config keys\n";
378 cout
<< " --sync-from-all[=false] set/reset whether zone syncs from all zonegroup peers\n";
379 cout
<< " --sync-from=[zone-name][,...]\n";
380 cout
<< " set list of zones to sync from\n";
381 cout
<< " --sync-from-rm=[zone-name][,...]\n";
382 cout
<< " remove zones from list of zones to sync from\n";
383 cout
<< " --bucket-index-max-shards override a zone/zonegroup's default bucket index shard count\n";
384 cout
<< " --fix besides checking bucket index, will also fix it\n";
385 cout
<< " --check-objects bucket check: rebuilds bucket index according to\n";
386 cout
<< " actual objects state\n";
387 cout
<< " --format=<format> specify output format for certain operations: xml,\n";
389 cout
<< " --purge-data when specified, user removal will also purge all the\n";
390 cout
<< " user data\n";
391 cout
<< " --purge-keys when specified, subuser removal will also purge all the\n";
392 cout
<< " subuser keys\n";
393 cout
<< " --purge-objects remove a bucket's objects before deleting it\n";
394 cout
<< " (NOTE: required to delete a non-empty bucket)\n";
395 cout
<< " --sync-stats option to 'user stats', update user stats with current\n";
396 cout
<< " stats reported by user's buckets indexes\n";
397 cout
<< " --reset-stats option to 'user stats', reset stats in accordance with user buckets\n";
398 cout
<< " --show-config show configuration\n";
399 cout
<< " --show-log-entries=<flag> enable/disable dump of log entries on log show\n";
400 cout
<< " --show-log-sum=<flag> enable/disable dump of log summation on log show\n";
401 cout
<< " --skip-zero-entries log show only dumps entries that don't have zero value\n";
402 cout
<< " in one of the numeric field\n";
403 cout
<< " --infile=<file> specify a file to read in when setting data\n";
404 cout
<< " --categories=<list> comma separated list of categories, used in usage show\n";
405 cout
<< " --caps=<caps> list of caps (e.g., \"usage=read, write; user=read\")\n";
406 cout
<< " --op-mask=<op-mask> permission of user's operations (e.g., \"read, write, delete, *\")\n";
407 cout
<< " --yes-i-really-mean-it required for certain operations\n";
408 cout
<< " --warnings-only when specified with bucket limit check, list\n";
409 cout
<< " only buckets nearing or over the current max\n";
410 cout
<< " objects per shard value\n";
411 cout
<< " --bypass-gc when specified with bucket deletion, triggers\n";
412 cout
<< " object deletions by not involving GC\n";
413 cout
<< " --inconsistent-index when specified with bucket deletion and bypass-gc set to true,\n";
414 cout
<< " ignores bucket index consistency\n";
415 cout
<< " --min-rewrite-size min object size for bucket rewrite (default 4M)\n";
416 cout
<< " --max-rewrite-size max object size for bucket rewrite (default ULLONG_MAX)\n";
417 cout
<< " --min-rewrite-stripe-size min stripe size for object rewrite (default 0)\n";
418 cout
<< " --trim-delay-ms time interval in msec to limit the frequency of sync error log entries trimming operations,\n";
419 cout
<< " the trimming process will sleep the specified msec for every 1000 entries trimmed\n";
420 cout
<< " --max-concurrent-ios maximum concurrent ios for bucket operations (default: 32)\n";
422 cout
<< "<date> := \"YYYY-MM-DD[ hh:mm:ss]\"\n";
423 cout
<< "\nQuota options:\n";
424 cout
<< " --max-objects specify max objects (negative value to disable)\n";
425 cout
<< " --max-size specify max size (in B/K/M/G/T, negative value to disable)\n";
426 cout
<< " --quota-scope scope of quota (bucket, user)\n";
427 cout
<< "\nRate limiting options:\n";
428 cout
<< " --max-read-ops specify max requests per minute for READ ops per RGW (GET and HEAD request methods), 0 means unlimited\n";
429 cout
<< " --max-read-bytes specify max bytes per minute for READ ops per RGW (GET and HEAD request methods), 0 means unlimited\n";
430 cout
<< " --max-write-ops specify max requests per minute for WRITE ops per RGW (Not GET or HEAD request methods), 0 means unlimited\n";
431 cout
<< " --max-write-bytes specify max bytes per minute for WRITE ops per RGW (Not GET or HEAD request methods), 0 means unlimited\n";
432 cout
<< " --ratelimit-scope scope of rate limiting: bucket, user, anonymous\n";
433 cout
<< " anonymous can be configured only with global rate limit\n";
434 cout
<< "\nOrphans search options:\n";
435 cout
<< " --num-shards num of shards to use for keeping the temporary scan info\n";
436 cout
<< " --orphan-stale-secs num of seconds to wait before declaring an object to be an orphan (default: 86400)\n";
437 cout
<< " --job-id set the job id (for orphans find)\n";
438 cout
<< " --detail detailed mode, log and stat head objects as well\n";
439 cout
<< "\nOrphans list-jobs options:\n";
440 cout
<< " --extra-info provide extra info in job list\n";
441 cout
<< "\nRole options:\n";
442 cout
<< " --role-name name of the role to create\n";
443 cout
<< " --path path to the role\n";
444 cout
<< " --assume-role-policy-doc the trust relationship policy document that grants an entity permission to assume the role\n";
445 cout
<< " --policy-name name of the policy document\n";
446 cout
<< " --policy-doc permission policy document\n";
447 cout
<< " --path-prefix path prefix for filtering roles\n";
448 cout
<< "\nMFA options:\n";
449 cout
<< " --totp-serial a string that represents the ID of a TOTP token\n";
450 cout
<< " --totp-seed the secret seed that is used to calculate the TOTP\n";
451 cout
<< " --totp-seconds the time resolution that is being used for TOTP generation\n";
452 cout
<< " --totp-window the number of TOTP tokens that are checked before and after the current token when validating token\n";
453 cout
<< " --totp-pin the valid value of a TOTP token at a certain time\n";
454 cout
<< "\nBucket notifications/pubsub options:\n";
455 cout
<< " --topic bucket notifications/pubsub topic name\n";
456 cout
<< " --subscription pubsub subscription name\n";
457 cout
<< " --event-id event id in a pubsub subscription\n";
458 cout
<< "\nScript options:\n";
459 cout
<< " --context context in which the script runs. one of: preRequest, postRequest\n";
460 cout
<< " --package name of the lua package that should be added/removed to/from the allowlist\n";
461 cout
<< " --allow-compilation package is allowed to compile C code as part of its installation\n";
462 cout
<< "\nradoslist options:\n";
463 cout
<< " --rgw-obj-fs the field separator that will separate the rados\n";
464 cout
<< " object name from the rgw object name;\n";
465 cout
<< " additionally rados objects for incomplete\n";
466 cout
<< " multipart uploads will not be output\n";
468 generic_client_usage();
479 using Aliases
= std::vector
<std::set
<string
> >;
480 using Commands
= std::vector
<Def
>;
484 map
<string
, Node
> next
;
485 set
<string
> expected
; /* separate un-normalized list */
490 map
<string
, string
> alias_map
;
492 string
normalize_alias(const string
& s
) const {
493 auto iter
= alias_map
.find(s
);
494 if (iter
== alias_map
.end()) {
500 void init_alias_map(Aliases
& aliases
) {
501 for (auto& alias_set
: aliases
) {
502 std::optional
<string
> first
;
504 for (auto& alias
: alias_set
) {
508 alias_map
[alias
] = *first
;
514 bool gen_next_expected(Node
*node
, vector
<string
> *expected
, bool ret
) {
515 for (auto& next_cmd
: node
->expected
) {
516 expected
->push_back(next_cmd
);
526 SimpleCmd(std::optional
<Commands
> cmds
,
527 std::optional
<Aliases
> aliases
) {
529 add_aliases(*aliases
);
537 void add_aliases(Aliases
& aliases
) {
538 init_alias_map(aliases
);
541 void add_commands(std::vector
<Def
>& cmds
) {
542 for (auto& cmd
: cmds
) {
543 vector
<string
> words
;
544 get_str_vec(cmd
.cmd
, " ", words
);
546 auto node
= &cmd_root
;
547 for (auto& word
: words
) {
548 auto norm
= normalize_alias(word
);
551 node
->expected
.insert(word
);
553 node
= &node
->next
[norm
];
555 if (norm
== "[*]") { /* optional param at the end */
556 parent
->next
["*"] = *node
; /* can be also looked up by '*' */
557 parent
->opt
= cmd
.opt
;
565 template <class Container
>
566 bool find_command(Container
& args
,
568 vector
<string
> *extra_args
,
570 vector
<string
> *expected
) {
571 auto node
= &cmd_root
;
573 std::optional
<std::any
> found_opt
;
575 for (auto& arg
: args
) {
576 string norm
= normalize_alias(arg
);
577 auto iter
= node
->next
.find(norm
);
578 if (iter
== node
->next
.end()) {
579 iter
= node
->next
.find("*");
580 if (iter
== node
->next
.end()) {
581 *error
= string("ERROR: Unrecognized argument: '") + arg
+ "'";
582 return gen_next_expected(node
, expected
, false);
584 extra_args
->push_back(arg
);
586 found_opt
= node
->opt
;
589 node
= &(iter
->second
);
592 *opt_cmd
= found_opt
.value_or(node
->opt
);
594 if (!opt_cmd
->has_value()) {
595 *error
="ERROR: Unknown command";
596 return gen_next_expected(node
, expected
, false);
604 namespace rgw_admin
{
629 BUCKET_SYNC_CHECKPOINT
,
658 OBJECTS_EXPIRE_STALE_LIST
,
659 OBJECTS_EXPIRE_STALE_RM
,
692 ZONEGROUP_PLACEMENT_ADD
,
693 ZONEGROUP_PLACEMENT_MODIFY
,
694 ZONEGROUP_PLACEMENT_RM
,
695 ZONEGROUP_PLACEMENT_LIST
,
696 ZONEGROUP_PLACEMENT_GET
,
697 ZONEGROUP_PLACEMENT_DEFAULT
,
707 ZONE_PLACEMENT_MODIFY
,
717 METADATA_SYNC_STATUS
,
731 SYNC_GROUP_FLOW_CREATE
,
732 SYNC_GROUP_FLOW_REMOVE
,
733 SYNC_GROUP_PIPE_CREATE
,
734 SYNC_GROUP_PIPE_MODIFY
,
735 SYNC_GROUP_PIPE_REMOVE
,
771 GLOBAL_QUOTA_DISABLE
,
772 GLOBAL_RATELIMIT_GET
,
773 GLOBAL_RATELIMIT_SET
,
774 GLOBAL_RATELIMIT_ENABLE
,
775 GLOBAL_RATELIMIT_DISABLE
,
798 RESHARD_STALE_INSTANCES_LIST
,
799 RESHARD_STALE_INSTANCES_DELETE
,
801 // TODO add "subscription list" command
818 using namespace rgw_admin
;
820 static SimpleCmd::Commands all_cmds
= {
821 { "user create", OPT::USER_CREATE
},
822 { "user info", OPT::USER_INFO
},
823 { "user modify", OPT::USER_MODIFY
},
824 { "user rename", OPT::USER_RENAME
},
825 { "user rm", OPT::USER_RM
},
826 { "user suspend", OPT::USER_SUSPEND
},
827 { "user enable", OPT::USER_ENABLE
},
828 { "user check", OPT::USER_CHECK
},
829 { "user stats", OPT::USER_STATS
},
830 { "user list", OPT::USER_LIST
},
831 { "subuser create", OPT::SUBUSER_CREATE
},
832 { "subuser modify", OPT::SUBUSER_MODIFY
},
833 { "subuser rm", OPT::SUBUSER_RM
},
834 { "key create", OPT::KEY_CREATE
},
835 { "key rm", OPT::KEY_RM
},
836 { "buckets list", OPT::BUCKETS_LIST
},
837 { "bucket list", OPT::BUCKETS_LIST
},
838 { "bucket limit check", OPT::BUCKET_LIMIT_CHECK
},
839 { "bucket link", OPT::BUCKET_LINK
},
840 { "bucket unlink", OPT::BUCKET_UNLINK
},
841 { "bucket stats", OPT::BUCKET_STATS
},
842 { "bucket check", OPT::BUCKET_CHECK
},
843 { "bucket sync checkpoint", OPT::BUCKET_SYNC_CHECKPOINT
},
844 { "bucket sync info", OPT::BUCKET_SYNC_INFO
},
845 { "bucket sync status", OPT::BUCKET_SYNC_STATUS
},
846 { "bucket sync markers", OPT::BUCKET_SYNC_MARKERS
},
847 { "bucket sync init", OPT::BUCKET_SYNC_INIT
},
848 { "bucket sync run", OPT::BUCKET_SYNC_RUN
},
849 { "bucket sync disable", OPT::BUCKET_SYNC_DISABLE
},
850 { "bucket sync enable", OPT::BUCKET_SYNC_ENABLE
},
851 { "bucket rm", OPT::BUCKET_RM
},
852 { "bucket rewrite", OPT::BUCKET_REWRITE
},
853 { "bucket reshard", OPT::BUCKET_RESHARD
},
854 { "bucket chown", OPT::BUCKET_CHOWN
},
855 { "bucket radoslist", OPT::BUCKET_RADOS_LIST
},
856 { "bucket rados list", OPT::BUCKET_RADOS_LIST
},
857 { "policy", OPT::POLICY
},
858 { "pool add", OPT::POOL_ADD
},
859 { "pool rm", OPT::POOL_RM
},
860 { "pool list", OPT::POOLS_LIST
},
861 { "pools list", OPT::POOLS_LIST
},
862 { "log list", OPT::LOG_LIST
},
863 { "log show", OPT::LOG_SHOW
},
864 { "log rm", OPT::LOG_RM
},
865 { "usage show", OPT::USAGE_SHOW
},
866 { "usage trim", OPT::USAGE_TRIM
},
867 { "usage clear", OPT::USAGE_CLEAR
},
868 { "object put", OPT::OBJECT_PUT
},
869 { "object rm", OPT::OBJECT_RM
},
870 { "object unlink", OPT::OBJECT_UNLINK
},
871 { "object stat", OPT::OBJECT_STAT
},
872 { "object rewrite", OPT::OBJECT_REWRITE
},
873 { "objects expire", OPT::OBJECTS_EXPIRE
},
874 { "objects expire-stale list", OPT::OBJECTS_EXPIRE_STALE_LIST
},
875 { "objects expire-stale rm", OPT::OBJECTS_EXPIRE_STALE_RM
},
876 { "bi get", OPT::BI_GET
},
877 { "bi put", OPT::BI_PUT
},
878 { "bi list", OPT::BI_LIST
},
879 { "bi purge", OPT::BI_PURGE
},
880 { "olh get", OPT::OLH_GET
},
881 { "olh readlog", OPT::OLH_READLOG
},
882 { "quota set", OPT::QUOTA_SET
},
883 { "quota enable", OPT::QUOTA_ENABLE
},
884 { "quota disable", OPT::QUOTA_DISABLE
},
885 { "ratelimit get", OPT::RATELIMIT_GET
},
886 { "ratelimit set", OPT::RATELIMIT_SET
},
887 { "ratelimit enable", OPT::RATELIMIT_ENABLE
},
888 { "ratelimit disable", OPT::RATELIMIT_DISABLE
},
889 { "gc list", OPT::GC_LIST
},
890 { "gc process", OPT::GC_PROCESS
},
891 { "lc list", OPT::LC_LIST
},
892 { "lc get", OPT::LC_GET
},
893 { "lc process", OPT::LC_PROCESS
},
894 { "lc reshard fix", OPT::LC_RESHARD_FIX
},
895 { "orphans find", OPT::ORPHANS_FIND
},
896 { "orphans finish", OPT::ORPHANS_FINISH
},
897 { "orphans list jobs", OPT::ORPHANS_LIST_JOBS
},
898 { "orphans list-jobs", OPT::ORPHANS_LIST_JOBS
},
899 { "zonegroup add", OPT::ZONEGROUP_ADD
},
900 { "zonegroup create", OPT::ZONEGROUP_CREATE
},
901 { "zonegroup default", OPT::ZONEGROUP_DEFAULT
},
902 { "zonegroup delete", OPT::ZONEGROUP_DELETE
},
903 { "zonegroup get", OPT::ZONEGROUP_GET
},
904 { "zonegroup modify", OPT::ZONEGROUP_MODIFY
},
905 { "zonegroup set", OPT::ZONEGROUP_SET
},
906 { "zonegroup list", OPT::ZONEGROUP_LIST
},
907 { "zonegroups list", OPT::ZONEGROUP_LIST
},
908 { "zonegroup remove", OPT::ZONEGROUP_REMOVE
},
909 { "zonegroup remove zone", OPT::ZONEGROUP_REMOVE
},
910 { "zonegroup rename", OPT::ZONEGROUP_RENAME
},
911 { "zonegroup placement add", OPT::ZONEGROUP_PLACEMENT_ADD
},
912 { "zonegroup placement modify", OPT::ZONEGROUP_PLACEMENT_MODIFY
},
913 { "zonegroup placement rm", OPT::ZONEGROUP_PLACEMENT_RM
},
914 { "zonegroup placement list", OPT::ZONEGROUP_PLACEMENT_LIST
},
915 { "zonegroup placement get", OPT::ZONEGROUP_PLACEMENT_GET
},
916 { "zonegroup placement default", OPT::ZONEGROUP_PLACEMENT_DEFAULT
},
917 { "zone create", OPT::ZONE_CREATE
},
918 { "zone delete", OPT::ZONE_DELETE
},
919 { "zone get", OPT::ZONE_GET
},
920 { "zone modify", OPT::ZONE_MODIFY
},
921 { "zone set", OPT::ZONE_SET
},
922 { "zone list", OPT::ZONE_LIST
},
923 { "zones list", OPT::ZONE_LIST
},
924 { "zone rename", OPT::ZONE_RENAME
},
925 { "zone default", OPT::ZONE_DEFAULT
},
926 { "zone placement add", OPT::ZONE_PLACEMENT_ADD
},
927 { "zone placement modify", OPT::ZONE_PLACEMENT_MODIFY
},
928 { "zone placement rm", OPT::ZONE_PLACEMENT_RM
},
929 { "zone placement list", OPT::ZONE_PLACEMENT_LIST
},
930 { "zone placement get", OPT::ZONE_PLACEMENT_GET
},
931 { "caps add", OPT::CAPS_ADD
},
932 { "caps rm", OPT::CAPS_RM
},
933 { "metadata get [*]", OPT::METADATA_GET
},
934 { "metadata put [*]", OPT::METADATA_PUT
},
935 { "metadata rm [*]", OPT::METADATA_RM
},
936 { "metadata list [*]", OPT::METADATA_LIST
},
937 { "metadata sync status", OPT::METADATA_SYNC_STATUS
},
938 { "metadata sync init", OPT::METADATA_SYNC_INIT
},
939 { "metadata sync run", OPT::METADATA_SYNC_RUN
},
940 { "mdlog list", OPT::MDLOG_LIST
},
941 { "mdlog autotrim", OPT::MDLOG_AUTOTRIM
},
942 { "mdlog trim", OPT::MDLOG_TRIM
},
943 { "mdlog fetch", OPT::MDLOG_FETCH
},
944 { "mdlog status", OPT::MDLOG_STATUS
},
945 { "sync error list", OPT::SYNC_ERROR_LIST
},
946 { "sync error trim", OPT::SYNC_ERROR_TRIM
},
947 { "sync policy get", OPT::SYNC_POLICY_GET
},
948 { "sync group create", OPT::SYNC_GROUP_CREATE
},
949 { "sync group modify", OPT::SYNC_GROUP_MODIFY
},
950 { "sync group get", OPT::SYNC_GROUP_GET
},
951 { "sync group remove", OPT::SYNC_GROUP_REMOVE
},
952 { "sync group flow create", OPT::SYNC_GROUP_FLOW_CREATE
},
953 { "sync group flow remove", OPT::SYNC_GROUP_FLOW_REMOVE
},
954 { "sync group pipe create", OPT::SYNC_GROUP_PIPE_CREATE
},
955 { "sync group pipe modify", OPT::SYNC_GROUP_PIPE_MODIFY
},
956 { "sync group pipe remove", OPT::SYNC_GROUP_PIPE_REMOVE
},
957 { "bilog list", OPT::BILOG_LIST
},
958 { "bilog trim", OPT::BILOG_TRIM
},
959 { "bilog status", OPT::BILOG_STATUS
},
960 { "bilog autotrim", OPT::BILOG_AUTOTRIM
},
961 { "data sync status", OPT::DATA_SYNC_STATUS
},
962 { "data sync init", OPT::DATA_SYNC_INIT
},
963 { "data sync run", OPT::DATA_SYNC_RUN
},
964 { "datalog list", OPT::DATALOG_LIST
},
965 { "datalog status", OPT::DATALOG_STATUS
},
966 { "datalog autotrim", OPT::DATALOG_AUTOTRIM
},
967 { "datalog trim", OPT::DATALOG_TRIM
},
968 { "datalog type", OPT::DATALOG_TYPE
},
969 { "datalog prune", OPT::DATALOG_PRUNE
},
970 { "realm create", OPT::REALM_CREATE
},
971 { "realm rm", OPT::REALM_DELETE
},
972 { "realm get", OPT::REALM_GET
},
973 { "realm get default", OPT::REALM_GET_DEFAULT
},
974 { "realm get-default", OPT::REALM_GET_DEFAULT
},
975 { "realm list", OPT::REALM_LIST
},
976 { "realm list periods", OPT::REALM_LIST_PERIODS
},
977 { "realm list-periods", OPT::REALM_LIST_PERIODS
},
978 { "realm rename", OPT::REALM_RENAME
},
979 { "realm set", OPT::REALM_SET
},
980 { "realm default", OPT::REALM_DEFAULT
},
981 { "realm pull", OPT::REALM_PULL
},
982 { "period delete", OPT::PERIOD_DELETE
},
983 { "period get", OPT::PERIOD_GET
},
984 { "period get-current", OPT::PERIOD_GET_CURRENT
},
985 { "period get current", OPT::PERIOD_GET_CURRENT
},
986 { "period pull", OPT::PERIOD_PULL
},
987 { "period push", OPT::PERIOD_PUSH
},
988 { "period list", OPT::PERIOD_LIST
},
989 { "period update", OPT::PERIOD_UPDATE
},
990 { "period commit", OPT::PERIOD_COMMIT
},
991 { "global quota get", OPT::GLOBAL_QUOTA_GET
},
992 { "global quota set", OPT::GLOBAL_QUOTA_SET
},
993 { "global quota enable", OPT::GLOBAL_QUOTA_ENABLE
},
994 { "global quota disable", OPT::GLOBAL_QUOTA_DISABLE
},
995 { "global ratelimit get", OPT::GLOBAL_RATELIMIT_GET
},
996 { "global ratelimit set", OPT::GLOBAL_RATELIMIT_SET
},
997 { "global ratelimit enable", OPT::GLOBAL_RATELIMIT_ENABLE
},
998 { "global ratelimit disable", OPT::GLOBAL_RATELIMIT_DISABLE
},
999 { "sync info", OPT::SYNC_INFO
},
1000 { "sync status", OPT::SYNC_STATUS
},
1001 { "role create", OPT::ROLE_CREATE
},
1002 { "role delete", OPT::ROLE_DELETE
},
1003 { "role get", OPT::ROLE_GET
},
1004 { "role modify", OPT::ROLE_MODIFY
},
1005 { "role list", OPT::ROLE_LIST
},
1006 { "role policy put", OPT::ROLE_POLICY_PUT
},
1007 { "role-policy put", OPT::ROLE_POLICY_PUT
},
1008 { "role policy list", OPT::ROLE_POLICY_LIST
},
1009 { "role-policy list", OPT::ROLE_POLICY_LIST
},
1010 { "role policy get", OPT::ROLE_POLICY_GET
},
1011 { "role-policy get", OPT::ROLE_POLICY_GET
},
1012 { "role policy delete", OPT::ROLE_POLICY_DELETE
},
1013 { "role-policy delete", OPT::ROLE_POLICY_DELETE
},
1014 { "reshard bucket", OPT::BUCKET_RESHARD
},
1015 { "reshard add", OPT::RESHARD_ADD
},
1016 { "reshard list", OPT::RESHARD_LIST
},
1017 { "reshard status", OPT::RESHARD_STATUS
},
1018 { "reshard process", OPT::RESHARD_PROCESS
},
1019 { "reshard cancel", OPT::RESHARD_CANCEL
},
1020 { "mfa create", OPT::MFA_CREATE
},
1021 { "mfa remove", OPT::MFA_REMOVE
},
1022 { "mfa get", OPT::MFA_GET
},
1023 { "mfa list", OPT::MFA_LIST
},
1024 { "mfa check", OPT::MFA_CHECK
},
1025 { "mfa resync", OPT::MFA_RESYNC
},
1026 { "reshard stale-instances list", OPT::RESHARD_STALE_INSTANCES_LIST
},
1027 { "reshard stale list", OPT::RESHARD_STALE_INSTANCES_LIST
},
1028 { "reshard stale-instances delete", OPT::RESHARD_STALE_INSTANCES_DELETE
},
1029 { "reshard stale delete", OPT::RESHARD_STALE_INSTANCES_DELETE
},
1030 { "topic list", OPT::PUBSUB_TOPICS_LIST
},
1031 { "topic get", OPT::PUBSUB_TOPIC_GET
},
1032 { "topic rm", OPT::PUBSUB_TOPIC_RM
},
1033 { "subscription get", OPT::PUBSUB_SUB_GET
},
1034 { "subscription rm", OPT::PUBSUB_SUB_RM
},
1035 { "subscription pull", OPT::PUBSUB_SUB_PULL
},
1036 { "subscription ack", OPT::PUBSUB_EVENT_RM
},
1037 { "script put", OPT::SCRIPT_PUT
},
1038 { "script get", OPT::SCRIPT_GET
},
1039 { "script rm", OPT::SCRIPT_RM
},
1040 { "script-package add", OPT::SCRIPT_PACKAGE_ADD
},
1041 { "script-package rm", OPT::SCRIPT_PACKAGE_RM
},
1042 { "script-package list", OPT::SCRIPT_PACKAGE_LIST
},
1045 static SimpleCmd::Aliases cmd_aliases
= {
1046 { "delete", "del" },
1053 BIIndexType
get_bi_index_type(const string
& type_str
) {
1054 if (type_str
== "plain")
1055 return BIIndexType::Plain
;
1056 if (type_str
== "instance")
1057 return BIIndexType::Instance
;
1058 if (type_str
== "olh")
1059 return BIIndexType::OLH
;
1061 return BIIndexType::Invalid
;
1064 log_type
get_log_type(const string
& type_str
) {
1065 if (strcasecmp(type_str
.c_str(), "fifo") == 0)
1066 return log_type::fifo
;
1067 if (strcasecmp(type_str
.c_str(), "omap") == 0)
1068 return log_type::omap
;
1070 return static_cast<log_type
>(0xff);
1073 void dump_bi_entry(bufferlist
& bl
, BIIndexType index_type
, Formatter
*formatter
)
1075 auto iter
= bl
.cbegin();
1076 switch (index_type
) {
1077 case BIIndexType::Plain
:
1078 case BIIndexType::Instance
:
1080 rgw_bucket_dir_entry entry
;
1081 decode(entry
, iter
);
1082 encode_json("entry", entry
, formatter
);
1085 case BIIndexType::OLH
:
1087 rgw_bucket_olh_entry entry
;
1088 decode(entry
, iter
);
1089 encode_json("entry", entry
, formatter
);
1098 static void show_user_info(RGWUserInfo
& info
, Formatter
*formatter
)
1100 encode_json("user_info", info
, formatter
);
1101 formatter
->flush(cout
);
1105 static void show_perm_policy(string perm_policy
, Formatter
* formatter
)
1107 formatter
->open_object_section("role");
1108 formatter
->dump_string("Permission policy", perm_policy
);
1109 formatter
->close_section();
1110 formatter
->flush(cout
);
1113 static void show_policy_names(std::vector
<string
> policy_names
, Formatter
* formatter
)
1115 formatter
->open_array_section("PolicyNames");
1116 for (const auto& it
: policy_names
) {
1117 formatter
->dump_string("policyname", it
);
1119 formatter
->close_section();
1120 formatter
->flush(cout
);
1123 static void show_role_info(rgw::sal::RGWRole
* role
, Formatter
* formatter
)
1125 formatter
->open_object_section("role");
1126 role
->dump(formatter
);
1127 formatter
->close_section();
1128 formatter
->flush(cout
);
1131 static void show_roles_info(vector
<std::unique_ptr
<rgw::sal::RGWRole
>>& roles
, Formatter
* formatter
)
1133 formatter
->open_array_section("Roles");
1134 for (const auto& it
: roles
) {
1135 formatter
->open_object_section("role");
1136 it
->dump(formatter
);
1137 formatter
->close_section();
1139 formatter
->close_section();
1140 formatter
->flush(cout
);
1143 static void show_reshard_status(
1144 const list
<cls_rgw_bucket_instance_entry
>& status
, Formatter
*formatter
)
1146 formatter
->open_array_section("status");
1147 for (const auto& entry
: status
) {
1148 formatter
->open_object_section("entry");
1149 formatter
->dump_string("reshard_status", to_string(entry
.reshard_status
));
1150 formatter
->dump_string("new_bucket_instance_id",
1151 entry
.new_bucket_instance_id
);
1152 formatter
->dump_int("num_shards", entry
.num_shards
);
1153 formatter
->close_section();
1155 formatter
->close_section();
1156 formatter
->flush(cout
);
1159 class StoreDestructor
{
1160 rgw::sal::Store
* store
;
1162 explicit StoreDestructor(rgw::sal::RadosStore
* _s
) : store(_s
) {}
1163 ~StoreDestructor() {
1164 StoreManager::close_storage(store
);
1165 rgw_http_client_cleanup();
1169 static int init_bucket(rgw::sal::User
* user
, const rgw_bucket
& b
,
1170 std::unique_ptr
<rgw::sal::Bucket
>* bucket
)
1172 return store
->get_bucket(dpp(), nullptr, b
, bucket
, null_yield
);
1175 static int init_bucket(rgw::sal::User
* user
,
1176 const string
& tenant_name
,
1177 const string
& bucket_name
,
1178 const string
& bucket_id
,
1179 std::unique_ptr
<rgw::sal::Bucket
>* bucket
)
1181 rgw_bucket b
{tenant_name
, bucket_name
, bucket_id
};
1182 return init_bucket(user
, b
, bucket
);
1185 static int read_input(const string
& infile
, bufferlist
& bl
)
1188 if (infile
.size()) {
1189 fd
= open(infile
.c_str(), O_RDONLY
);
1192 cerr
<< "error reading input file " << infile
<< std::endl
;
1197 #define READ_CHUNK 8196
1202 char buf
[READ_CHUNK
];
1204 r
= safe_read(fd
, buf
, READ_CHUNK
);
1207 cerr
<< "error while reading input" << std::endl
;
1215 if (infile
.size()) {
1222 static int read_decode_json(const string
& infile
, T
& t
)
1225 int ret
= read_input(infile
, bl
);
1227 cerr
<< "ERROR: failed to read input: " << cpp_strerror(-ret
) << std::endl
;
1231 if (!p
.parse(bl
.c_str(), bl
.length())) {
1232 cout
<< "failed to parse JSON" << std::endl
;
1237 decode_json_obj(t
, &p
);
1238 } catch (const JSONDecoder::err
& e
) {
1239 cout
<< "failed to decode JSON input: " << e
.what() << std::endl
;
1245 template <class T
, class K
>
1246 static int read_decode_json(const string
& infile
, T
& t
, K
*k
)
1249 int ret
= read_input(infile
, bl
);
1251 cerr
<< "ERROR: failed to read input: " << cpp_strerror(-ret
) << std::endl
;
1255 if (!p
.parse(bl
.c_str(), bl
.length())) {
1256 cout
<< "failed to parse JSON" << std::endl
;
1261 t
.decode_json(&p
, k
);
1262 } catch (const JSONDecoder::err
& e
) {
1263 cout
<< "failed to decode JSON input: " << e
.what() << std::endl
;
1270 static bool decode_dump(const char *field_name
, bufferlist
& bl
, Formatter
*f
)
1274 auto iter
= bl
.cbegin();
1278 } catch (buffer::error
& err
) {
1282 encode_json(field_name
, t
, f
);
1287 static bool dump_string(const char *field_name
, bufferlist
& bl
, Formatter
*f
)
1289 string val
= bl
.to_str();
1290 f
->dump_string(field_name
, val
.c_str() /* hide encoded null termination chars */);
1295 bool set_ratelimit_info(RGWRateLimitInfo
& ratelimit
, OPT opt_cmd
, int64_t max_read_ops
, int64_t max_write_ops
,
1296 int64_t max_read_bytes
, int64_t max_write_bytes
,
1297 bool have_max_read_ops
, bool have_max_write_ops
,
1298 bool have_max_read_bytes
, bool have_max_write_bytes
)
1300 bool ratelimit_configured
= true;
1302 case OPT::RATELIMIT_ENABLE
:
1303 case OPT::GLOBAL_RATELIMIT_ENABLE
:
1304 ratelimit
.enabled
= true;
1307 case OPT::RATELIMIT_SET
:
1308 case OPT::GLOBAL_RATELIMIT_SET
:
1309 ratelimit_configured
= false;
1310 if (have_max_read_ops
) {
1311 if (max_read_ops
>= 0) {
1312 ratelimit
.max_read_ops
= max_read_ops
;
1313 ratelimit_configured
= true;
1316 if (have_max_write_ops
) {
1317 if (max_write_ops
>= 0) {
1318 ratelimit
.max_write_ops
= max_write_ops
;
1319 ratelimit_configured
= true;
1322 if (have_max_read_bytes
) {
1323 if (max_read_bytes
>= 0) {
1324 ratelimit
.max_read_bytes
= max_read_bytes
;
1325 ratelimit_configured
= true;
1328 if (have_max_write_bytes
) {
1329 if (max_write_bytes
>= 0) {
1330 ratelimit
.max_write_bytes
= max_write_bytes
;
1331 ratelimit_configured
= true;
1335 case OPT::RATELIMIT_DISABLE
:
1336 case OPT::GLOBAL_RATELIMIT_DISABLE
:
1337 ratelimit
.enabled
= false;
1342 return ratelimit_configured
;
1345 void set_quota_info(RGWQuotaInfo
& quota
, OPT opt_cmd
, int64_t max_size
, int64_t max_objects
,
1346 bool have_max_size
, bool have_max_objects
)
1349 case OPT::QUOTA_ENABLE
:
1350 case OPT::GLOBAL_QUOTA_ENABLE
:
1351 quota
.enabled
= true;
1353 // falling through on purpose
1355 case OPT::QUOTA_SET
:
1356 case OPT::GLOBAL_QUOTA_SET
:
1357 if (have_max_objects
) {
1358 if (max_objects
< 0) {
1359 quota
.max_objects
= -1;
1361 quota
.max_objects
= max_objects
;
1364 if (have_max_size
) {
1366 quota
.max_size
= -1;
1368 quota
.max_size
= rgw_rounded_kb(max_size
) * 1024;
1372 case OPT::QUOTA_DISABLE
:
1373 case OPT::GLOBAL_QUOTA_DISABLE
:
1374 quota
.enabled
= false;
1381 int set_bucket_quota(rgw::sal::Store
* store
, OPT opt_cmd
,
1382 const string
& tenant_name
, const string
& bucket_name
,
1383 int64_t max_size
, int64_t max_objects
,
1384 bool have_max_size
, bool have_max_objects
)
1386 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
1387 int r
= store
->get_bucket(dpp(), nullptr, tenant_name
, bucket_name
, &bucket
, null_yield
);
1389 cerr
<< "could not get bucket info for bucket=" << bucket_name
<< ": " << cpp_strerror(-r
) << std::endl
;
1393 set_quota_info(bucket
->get_info().quota
, opt_cmd
, max_size
, max_objects
, have_max_size
, have_max_objects
);
1395 r
= bucket
->put_info(dpp(), false, real_time());
1397 cerr
<< "ERROR: failed writing bucket instance info: " << cpp_strerror(-r
) << std::endl
;
1403 int set_bucket_ratelimit(rgw::sal::Store
* store
, OPT opt_cmd
,
1404 const string
& tenant_name
, const string
& bucket_name
,
1405 int64_t max_read_ops
, int64_t max_write_ops
,
1406 int64_t max_read_bytes
, int64_t max_write_bytes
,
1407 bool have_max_read_ops
, bool have_max_write_ops
,
1408 bool have_max_read_bytes
, bool have_max_write_bytes
)
1410 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
1411 int r
= store
->get_bucket(dpp(), nullptr, tenant_name
, bucket_name
, &bucket
, null_yield
);
1413 cerr
<< "could not get bucket info for bucket=" << bucket_name
<< ": " << cpp_strerror(-r
) << std::endl
;
1416 RGWRateLimitInfo ratelimit_info
;
1417 auto iter
= bucket
->get_attrs().find(RGW_ATTR_RATELIMIT
);
1418 if(iter
!= bucket
->get_attrs().end()) {
1420 bufferlist
& bl
= iter
->second
;
1421 auto biter
= bl
.cbegin();
1422 decode(ratelimit_info
, biter
);
1423 } catch (buffer::error
& err
) {
1424 ldpp_dout(dpp(), 0) << "ERROR: failed to decode rate limit" << dendl
;
1428 bool ratelimit_configured
= set_ratelimit_info(ratelimit_info
, opt_cmd
, max_read_ops
, max_write_ops
,
1429 max_read_bytes
, max_write_bytes
,
1430 have_max_read_ops
, have_max_write_ops
,
1431 have_max_read_bytes
, have_max_write_bytes
);
1432 if (!ratelimit_configured
) {
1433 ldpp_dout(dpp(), 0) << "ERROR: no rate limit values have been specified" << dendl
;
1437 ratelimit_info
.encode(bl
);
1438 rgw::sal::Attrs attr
;
1439 attr
[RGW_ATTR_RATELIMIT
] = bl
;
1440 r
= bucket
->merge_and_store_attrs(dpp(), attr
, null_yield
);
1442 cerr
<< "ERROR: failed writing bucket instance info: " << cpp_strerror(-r
) << std::endl
;
1448 int set_user_ratelimit(OPT opt_cmd
, std::unique_ptr
<rgw::sal::User
>& user
,
1449 int64_t max_read_ops
, int64_t max_write_ops
,
1450 int64_t max_read_bytes
, int64_t max_write_bytes
,
1451 bool have_max_read_ops
, bool have_max_write_ops
,
1452 bool have_max_read_bytes
, bool have_max_write_bytes
)
1454 RGWRateLimitInfo ratelimit_info
;
1455 user
->load_user(dpp(), null_yield
);
1456 auto iter
= user
->get_attrs().find(RGW_ATTR_RATELIMIT
);
1457 if(iter
!= user
->get_attrs().end()) {
1459 bufferlist
& bl
= iter
->second
;
1460 auto biter
= bl
.cbegin();
1461 decode(ratelimit_info
, biter
);
1462 } catch (buffer::error
& err
) {
1463 ldpp_dout(dpp(), 0) << "ERROR: failed to decode rate limit" << dendl
;
1467 bool ratelimit_configured
= set_ratelimit_info(ratelimit_info
, opt_cmd
, max_read_ops
, max_write_ops
,
1468 max_read_bytes
, max_write_bytes
,
1469 have_max_read_ops
, have_max_write_ops
,
1470 have_max_read_bytes
, have_max_write_bytes
);
1471 if (!ratelimit_configured
) {
1472 ldpp_dout(dpp(), 0) << "ERROR: no rate limit values have been specified" << dendl
;
1476 ratelimit_info
.encode(bl
);
1477 rgw::sal::Attrs attr
;
1478 attr
[RGW_ATTR_RATELIMIT
] = bl
;
1479 int r
= user
->merge_and_store_attrs(dpp(), attr
, null_yield
);
1481 cerr
<< "ERROR: failed writing user instance info: " << cpp_strerror(-r
) << std::endl
;
1487 int show_user_ratelimit(std::unique_ptr
<rgw::sal::User
>& user
, Formatter
*formatter
)
1489 RGWRateLimitInfo ratelimit_info
;
1490 user
->load_user(dpp(), null_yield
);
1491 auto iter
= user
->get_attrs().find(RGW_ATTR_RATELIMIT
);
1492 if(iter
!= user
->get_attrs().end()) {
1494 bufferlist
& bl
= iter
->second
;
1495 auto biter
= bl
.cbegin();
1496 decode(ratelimit_info
, biter
);
1497 } catch (buffer::error
& err
) {
1498 ldpp_dout(dpp(), 0) << "ERROR: failed to decode rate limit" << dendl
;
1502 formatter
->open_object_section("user_ratelimit");
1503 encode_json("user_ratelimit", ratelimit_info
, formatter
);
1504 formatter
->close_section();
1505 formatter
->flush(cout
);
1510 int show_bucket_ratelimit(rgw::sal::Store
* store
, const string
& tenant_name
,
1511 const string
& bucket_name
, Formatter
*formatter
)
1513 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
1514 int r
= store
->get_bucket(dpp(), nullptr, tenant_name
, bucket_name
, &bucket
, null_yield
);
1516 cerr
<< "could not get bucket info for bucket=" << bucket_name
<< ": " << cpp_strerror(-r
) << std::endl
;
1519 RGWRateLimitInfo ratelimit_info
;
1520 auto iter
= bucket
->get_attrs().find(RGW_ATTR_RATELIMIT
);
1521 if (iter
!= bucket
->get_attrs().end()) {
1523 bufferlist
& bl
= iter
->second
;
1524 auto biter
= bl
.cbegin();
1525 decode(ratelimit_info
, biter
);
1526 } catch (buffer::error
& err
) {
1527 ldpp_dout(dpp(), 0) << "ERROR: failed to decode rate limit" << dendl
;
1531 formatter
->open_object_section("bucket_ratelimit");
1532 encode_json("bucket_ratelimit", ratelimit_info
, formatter
);
1533 formatter
->close_section();
1534 formatter
->flush(cout
);
1538 int set_user_bucket_quota(OPT opt_cmd
, RGWUser
& user
, RGWUserAdminOpState
& op_state
, int64_t max_size
, int64_t max_objects
,
1539 bool have_max_size
, bool have_max_objects
)
1541 RGWUserInfo
& user_info
= op_state
.get_user_info();
1543 set_quota_info(user_info
.bucket_quota
, opt_cmd
, max_size
, max_objects
, have_max_size
, have_max_objects
);
1545 op_state
.set_bucket_quota(user_info
.bucket_quota
);
1548 int r
= user
.modify(dpp(), op_state
, null_yield
, &err
);
1550 cerr
<< "ERROR: failed updating user info: " << cpp_strerror(-r
) << ": " << err
<< std::endl
;
1556 int set_user_quota(OPT opt_cmd
, RGWUser
& user
, RGWUserAdminOpState
& op_state
, int64_t max_size
, int64_t max_objects
,
1557 bool have_max_size
, bool have_max_objects
)
1559 RGWUserInfo
& user_info
= op_state
.get_user_info();
1561 set_quota_info(user_info
.user_quota
, opt_cmd
, max_size
, max_objects
, have_max_size
, have_max_objects
);
1563 op_state
.set_user_quota(user_info
.user_quota
);
1566 int r
= user
.modify(dpp(), op_state
, null_yield
, &err
);
1568 cerr
<< "ERROR: failed updating user info: " << cpp_strerror(-r
) << ": " << err
<< std::endl
;
1574 int check_min_obj_stripe_size(rgw::sal::Store
* store
, rgw::sal::Object
* obj
, uint64_t min_stripe_size
, bool *need_rewrite
)
1576 RGWObjectCtx
obj_ctx(store
);
1577 int ret
= obj
->get_obj_attrs(&obj_ctx
, null_yield
, dpp());
1579 ldpp_dout(dpp(), -1) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret
) << dendl
;
1583 map
<string
, bufferlist
>::iterator iter
;
1584 iter
= obj
->get_attrs().find(RGW_ATTR_MANIFEST
);
1585 if (iter
== obj
->get_attrs().end()) {
1586 *need_rewrite
= (obj
->get_obj_size() >= min_stripe_size
);
1590 RGWObjManifest manifest
;
1593 bufferlist
& bl
= iter
->second
;
1594 auto biter
= bl
.cbegin();
1595 decode(manifest
, biter
);
1596 } catch (buffer::error
& err
) {
1597 ldpp_dout(dpp(), 0) << "ERROR: failed to decode manifest" << dendl
;
1601 map
<uint64_t, RGWObjManifestPart
>& objs
= manifest
.get_explicit_objs();
1602 map
<uint64_t, RGWObjManifestPart
>::iterator oiter
;
1603 for (oiter
= objs
.begin(); oiter
!= objs
.end(); ++oiter
) {
1604 RGWObjManifestPart
& part
= oiter
->second
;
1606 if (part
.size
>= min_stripe_size
) {
1607 *need_rewrite
= true;
1611 *need_rewrite
= false;
1617 int check_obj_locator_underscore(rgw::sal::Object
* obj
, bool fix
, bool remove_bad
, Formatter
*f
) {
1618 f
->open_object_section("object");
1619 f
->open_object_section("key");
1620 f
->dump_string("type", "head");
1621 f
->dump_string("name", obj
->get_name());
1622 f
->dump_string("instance", obj
->get_instance());
1628 get_obj_bucket_and_oid_loc(obj
->get_obj(), oid
, locator
);
1630 f
->dump_string("oid", oid
);
1631 f
->dump_string("locator", locator
);
1633 RGWObjectCtx
obj_ctx(store
);
1634 std::unique_ptr
<rgw::sal::Object::ReadOp
> read_op
= obj
->get_read_op(&obj_ctx
);
1636 int ret
= read_op
->prepare(null_yield
, dpp());
1637 bool needs_fixing
= (ret
== -ENOENT
);
1639 f
->dump_bool("needs_fixing", needs_fixing
);
1641 string status
= (needs_fixing
? "needs_fixing" : "ok");
1643 if ((needs_fixing
|| remove_bad
) && fix
) {
1644 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->fix_head_obj_locator(dpp(), obj
->get_bucket()->get_info(), needs_fixing
, remove_bad
, obj
->get_key());
1646 cerr
<< "ERROR: fix_head_object_locator() returned ret=" << ret
<< std::endl
;
1653 f
->dump_string("status", status
);
1660 int check_obj_tail_locator_underscore(RGWBucketInfo
& bucket_info
, rgw_obj_key
& key
, bool fix
, Formatter
*f
) {
1661 f
->open_object_section("object");
1662 f
->open_object_section("key");
1663 f
->dump_string("type", "tail");
1664 f
->dump_string("name", key
.name
);
1665 f
->dump_string("instance", key
.instance
);
1671 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->fix_tail_obj_locator(dpp(), bucket_info
, key
, fix
, &needs_fixing
, null_yield
);
1673 cerr
<< "ERROR: fix_tail_object_locator_underscore() returned ret=" << ret
<< std::endl
;
1676 status
= (needs_fixing
&& !fix
? "needs_fixing" : "ok");
1679 f
->dump_bool("needs_fixing", needs_fixing
);
1680 f
->dump_string("status", status
);
1687 int do_check_object_locator(const string
& tenant_name
, const string
& bucket_name
,
1688 bool fix
, bool remove_bad
, Formatter
*f
)
1690 if (remove_bad
&& !fix
) {
1691 cerr
<< "ERROR: can't have remove_bad specified without fix" << std::endl
;
1695 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
1698 f
->open_object_section("bucket");
1699 f
->dump_string("bucket", bucket_name
);
1700 int ret
= init_bucket(nullptr, tenant_name
, bucket_name
, bucket_id
, &bucket
);
1702 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
1707 int max_entries
= 1000;
1712 vector
<rgw_bucket_dir_entry
> result
;
1715 rgw::sal::Bucket::ListParams params
;
1716 rgw::sal::Bucket::ListResults results
;
1718 params
.prefix
= prefix
;
1719 params
.delim
= delim
;
1720 params
.marker
= rgw_obj_key(marker
);
1722 params
.enforce_ns
= true;
1723 params
.list_versions
= true;
1725 f
->open_array_section("check_objects");
1727 ret
= bucket
->list(dpp(), params
, max_entries
- count
, results
, null_yield
);
1729 cerr
<< "ERROR: store->list_objects(): " << cpp_strerror(-ret
) << std::endl
;
1733 count
+= results
.objs
.size();
1735 for (vector
<rgw_bucket_dir_entry
>::iterator iter
= results
.objs
.begin(); iter
!= results
.objs
.end(); ++iter
) {
1736 std::unique_ptr
<rgw::sal::Object
> obj
= bucket
->get_object(iter
->key
);
1738 if (obj
->get_name()[0] == '_') {
1739 ret
= check_obj_locator_underscore(obj
.get(), fix
, remove_bad
, f
);
1742 ret
= check_obj_tail_locator_underscore(bucket
->get_info(), obj
->get_key(), fix
, f
);
1744 cerr
<< "ERROR: check_obj_tail_locator_underscore(): " << cpp_strerror(-ret
) << std::endl
;
1751 } while (results
.is_truncated
&& count
< max_entries
);
1760 /// search for a matching zone/zonegroup id and return a connection if found
1761 static boost::optional
<RGWRESTConn
> get_remote_conn(rgw::sal::RadosStore
* store
,
1762 const RGWZoneGroup
& zonegroup
,
1763 const std::string
& remote
)
1765 boost::optional
<RGWRESTConn
> conn
;
1766 if (remote
== zonegroup
.get_id()) {
1767 conn
.emplace(store
->ctx(), store
->svc()->zone
, remote
, zonegroup
.endpoints
, zonegroup
.api_name
);
1769 for (const auto& z
: zonegroup
.zones
) {
1770 const auto& zone
= z
.second
;
1771 if (remote
== zone
.id
) {
1772 conn
.emplace(store
->ctx(), store
->svc()->zone
, remote
, zone
.endpoints
, zonegroup
.api_name
);
1780 /// search each zonegroup for a connection
1781 static boost::optional
<RGWRESTConn
> get_remote_conn(rgw::sal::RadosStore
* store
,
1782 const RGWPeriodMap
& period_map
,
1783 const std::string
& remote
)
1785 boost::optional
<RGWRESTConn
> conn
;
1786 for (const auto& zg
: period_map
.zonegroups
) {
1787 conn
= get_remote_conn(store
, zg
.second
, remote
);
1795 // we expect a very small response
1796 static constexpr size_t MAX_REST_RESPONSE
= 128 * 1024;
1798 static int send_to_remote_gateway(RGWRESTConn
* conn
, req_info
& info
,
1799 bufferlist
& in_data
, JSONParser
& parser
)
1805 ceph::bufferlist response
;
1807 int ret
= conn
->forward(dpp(), user
, info
, nullptr, MAX_REST_RESPONSE
, &in_data
, &response
, null_yield
);
1809 int parse_ret
= parser
.parse(response
.c_str(), response
.length());
1810 if (parse_ret
< 0) {
1811 cerr
<< "failed to parse response" << std::endl
;
1817 static int send_to_url(const string
& url
,
1818 std::optional
<string
> opt_region
,
1819 const string
& access
,
1820 const string
& secret
, req_info
& info
,
1821 bufferlist
& in_data
, JSONParser
& parser
)
1823 if (access
.empty() || secret
.empty()) {
1824 cerr
<< "An --access-key and --secret must be provided with --url." << std::endl
;
1832 RGWRESTSimpleRequest
req(g_ceph_context
, info
.method
, url
, NULL
, ¶ms
, opt_region
);
1834 bufferlist response
;
1835 int ret
= req
.forward_request(dpp(), key
, info
, MAX_REST_RESPONSE
, &in_data
, &response
, null_yield
);
1837 int parse_ret
= parser
.parse(response
.c_str(), response
.length());
1838 if (parse_ret
< 0) {
1839 cout
<< "failed to parse response" << std::endl
;
1845 static int send_to_remote_or_url(RGWRESTConn
*conn
, const string
& url
,
1846 std::optional
<string
> opt_region
,
1847 const string
& access
, const string
& secret
,
1848 req_info
& info
, bufferlist
& in_data
,
1852 return send_to_remote_gateway(conn
, info
, in_data
, parser
);
1854 return send_to_url(url
, opt_region
, access
, secret
, info
, in_data
, parser
);
1857 static int commit_period(RGWRealm
& realm
, RGWPeriod
& period
,
1858 string remote
, const string
& url
,
1859 std::optional
<string
> opt_region
,
1860 const string
& access
, const string
& secret
,
1863 auto& master_zone
= period
.get_master_zone();
1864 if (master_zone
.empty()) {
1865 cerr
<< "cannot commit period: period does not have a master zone of a master zonegroup" << std::endl
;
1868 // are we the period's master zone?
1869 if (store
->get_zone()->get_id() == master_zone
) {
1870 // read the current period
1871 RGWPeriod current_period
;
1872 int ret
= current_period
.init(dpp(), g_ceph_context
,
1873 static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm
.get_id(),
1876 cerr
<< "Error initializing current period: "
1877 << cpp_strerror(-ret
) << std::endl
;
1880 // the master zone can commit locally
1881 ret
= period
.commit(dpp(), store
, realm
, current_period
, cerr
, null_yield
, force
);
1883 cerr
<< "failed to commit period: " << cpp_strerror(-ret
) << std::endl
;
1888 if (remote
.empty() && url
.empty()) {
1889 // use the new master zone's connection
1890 remote
= master_zone
.id
;
1891 cerr
<< "Sending period to new master zone " << remote
<< std::endl
;
1893 boost::optional
<RGWRESTConn
> conn
;
1894 RGWRESTConn
*remote_conn
= nullptr;
1895 if (!remote
.empty()) {
1896 conn
= get_remote_conn(static_cast<rgw::sal::RadosStore
*>(store
), period
.get_map(), remote
);
1898 cerr
<< "failed to find a zone or zonegroup for remote "
1899 << remote
<< std::endl
;
1902 remote_conn
= &*conn
;
1905 // push period to the master with an empty period id
1906 period
.set_id(string());
1909 req_info
info(g_ceph_context
, &env
);
1910 info
.method
= "POST";
1911 info
.request_uri
= "/admin/realm/period";
1913 // json format into a bufferlist
1914 JSONFormatter
jf(false);
1915 encode_json("period", period
, &jf
);
1920 int ret
= send_to_remote_or_url(remote_conn
, url
, opt_region
, access
, secret
, info
, bl
, p
);
1922 cerr
<< "request failed: " << cpp_strerror(-ret
) << std::endl
;
1924 // did we parse an error message?
1925 auto message
= p
.find_obj("Message");
1927 cerr
<< "Reason: " << message
->get_data() << std::endl
;
1932 // decode the response and store it back
1934 decode_json_obj(period
, &p
);
1935 } catch (const JSONDecoder::err
& e
) {
1936 cout
<< "failed to decode JSON input: " << e
.what() << std::endl
;
1939 if (period
.get_id().empty()) {
1940 cerr
<< "Period commit got back an empty period id" << std::endl
;
1943 // the master zone gave us back the period that it committed, so it's
1944 // safe to save it as our latest epoch
1945 ret
= period
.store_info(dpp(), false, null_yield
);
1947 cerr
<< "Error storing committed period " << period
.get_id() << ": "
1948 << cpp_strerror(ret
) << std::endl
;
1951 ret
= period
.set_latest_epoch(dpp(), null_yield
, period
.get_epoch());
1953 cerr
<< "Error updating period epoch: " << cpp_strerror(ret
) << std::endl
;
1956 ret
= period
.reflect(dpp(), null_yield
);
1958 cerr
<< "Error updating local objects: " << cpp_strerror(ret
) << std::endl
;
1961 realm
.notify_new_period(dpp(), period
, null_yield
);
1965 static int update_period(const string
& realm_id
, const string
& realm_name
,
1966 const string
& period_id
, const string
& period_epoch
,
1967 bool commit
, const string
& remote
, const string
& url
,
1968 std::optional
<string
> opt_region
,
1969 const string
& access
, const string
& secret
,
1970 Formatter
*formatter
, bool force
)
1972 RGWRealm
realm(realm_id
, realm_name
);
1973 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
1975 cerr
<< "Error initializing realm " << cpp_strerror(-ret
) << std::endl
;
1979 if (!period_epoch
.empty()) {
1980 epoch
= atoi(period_epoch
.c_str());
1982 RGWPeriod
period(period_id
, epoch
);
1983 ret
= period
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm
.get_id(), null_yield
);
1985 cerr
<< "period init failed: " << cpp_strerror(-ret
) << std::endl
;
1989 ret
= period
.update(dpp(), null_yield
);
1991 // Dropping the error message here, as both the ret codes were handled in
1995 ret
= period
.store_info(dpp(), false, null_yield
);
1997 cerr
<< "failed to store period: " << cpp_strerror(-ret
) << std::endl
;
2001 ret
= commit_period(realm
, period
, remote
, url
, opt_region
, access
, secret
, force
);
2003 cerr
<< "failed to commit period: " << cpp_strerror(-ret
) << std::endl
;
2007 encode_json("period", period
, formatter
);
2008 formatter
->flush(cout
);
2012 static int init_bucket_for_sync(rgw::sal::User
* user
,
2013 const string
& tenant
, const string
& bucket_name
,
2014 const string
& bucket_id
,
2015 std::unique_ptr
<rgw::sal::Bucket
>* bucket
)
2017 int ret
= init_bucket(user
, tenant
, bucket_name
, bucket_id
, bucket
);
2019 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
2026 static int do_period_pull(RGWRESTConn
*remote_conn
, const string
& url
,
2027 std::optional
<string
> opt_region
,
2028 const string
& access_key
, const string
& secret_key
,
2029 const string
& realm_id
, const string
& realm_name
,
2030 const string
& period_id
, const string
& period_epoch
,
2034 req_info
info(g_ceph_context
, &env
);
2035 info
.method
= "GET";
2036 info
.request_uri
= "/admin/realm/period";
2038 map
<string
, string
> ¶ms
= info
.args
.get_params();
2039 if (!realm_id
.empty())
2040 params
["realm_id"] = realm_id
;
2041 if (!realm_name
.empty())
2042 params
["realm_name"] = realm_name
;
2043 if (!period_id
.empty())
2044 params
["period_id"] = period_id
;
2045 if (!period_epoch
.empty())
2046 params
["epoch"] = period_epoch
;
2050 int ret
= send_to_remote_or_url(remote_conn
, url
, opt_region
, access_key
, secret_key
,
2053 cerr
<< "request failed: " << cpp_strerror(-ret
) << std::endl
;
2056 ret
= period
->init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
, false);
2058 cerr
<< "faile to init period " << cpp_strerror(-ret
) << std::endl
;
2062 decode_json_obj(*period
, &p
);
2063 } catch (const JSONDecoder::err
& e
) {
2064 cout
<< "failed to decode JSON input: " << e
.what() << std::endl
;
2067 ret
= period
->store_info(dpp(), false, null_yield
);
2069 cerr
<< "Error storing period " << period
->get_id() << ": " << cpp_strerror(ret
) << std::endl
;
2071 // store latest epoch (ignore errors)
2072 period
->update_latest_epoch(dpp(), period
->get_epoch(), null_yield
);
2076 static int read_current_period_id(rgw::sal::RadosStore
* store
, const std::string
& realm_id
,
2077 const std::string
& realm_name
,
2078 std::string
* period_id
)
2080 RGWRealm
realm(realm_id
, realm_name
);
2081 int ret
= realm
.init(dpp(), g_ceph_context
, store
->svc()->sysobj
, null_yield
);
2083 std::cerr
<< "failed to read realm: " << cpp_strerror(-ret
) << std::endl
;
2086 *period_id
= realm
.get_current_period();
2090 void flush_ss(stringstream
& ss
, list
<string
>& l
)
2092 if (!ss
.str().empty()) {
2093 l
.push_back(ss
.str());
2098 stringstream
& push_ss(stringstream
& ss
, list
<string
>& l
, int tab
= 0)
2102 ss
<< setw(tab
) << "" << setw(1);
2107 static void get_md_sync_status(list
<string
>& status
)
2109 RGWMetaSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor());
2111 int ret
= sync
.init(dpp());
2113 status
.push_back(string("failed to retrieve sync info: sync.init() failed: ") + cpp_strerror(-ret
));
2117 rgw_meta_sync_status sync_status
;
2118 ret
= sync
.read_sync_status(dpp(), &sync_status
);
2120 status
.push_back(string("failed to read sync status: ") + cpp_strerror(-ret
));
2125 switch (sync_status
.sync_info
.state
) {
2126 case rgw_meta_sync_info::StateInit
:
2127 status_str
= "init";
2129 case rgw_meta_sync_info::StateBuildingFullSyncMaps
:
2130 status_str
= "preparing for full sync";
2132 case rgw_meta_sync_info::StateSync
:
2133 status_str
= "syncing";
2136 status_str
= "unknown";
2139 status
.push_back(status_str
);
2141 uint64_t full_total
= 0;
2142 uint64_t full_complete
= 0;
2146 int total_shards
= 0;
2147 set
<int> shards_behind_set
;
2149 for (auto marker_iter
: sync_status
.sync_markers
) {
2150 full_total
+= marker_iter
.second
.total_entries
;
2152 if (marker_iter
.second
.state
== rgw_meta_sync_marker::SyncState::FullSync
) {
2154 full_complete
+= marker_iter
.second
.pos
;
2155 int shard_id
= marker_iter
.first
;
2156 shards_behind_set
.insert(shard_id
);
2158 full_complete
+= marker_iter
.second
.total_entries
;
2160 if (marker_iter
.second
.state
== rgw_meta_sync_marker::SyncState::IncrementalSync
) {
2166 push_ss(ss
, status
) << "full sync: " << num_full
<< "/" << total_shards
<< " shards";
2169 push_ss(ss
, status
) << "full sync: " << full_total
- full_complete
<< " entries to sync";
2172 push_ss(ss
, status
) << "incremental sync: " << num_inc
<< "/" << total_shards
<< " shards";
2174 map
<int, RGWMetadataLogInfo
> master_shards_info
;
2175 string master_period
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_current_period_id();
2177 ret
= sync
.read_master_log_shards_info(dpp(), master_period
, &master_shards_info
);
2179 status
.push_back(string("failed to fetch master sync status: ") + cpp_strerror(-ret
));
2183 map
<int, string
> shards_behind
;
2184 if (sync_status
.sync_info
.period
!= master_period
) {
2185 status
.push_back(string("master is on a different period: master_period=" +
2186 master_period
+ " local_period=" + sync_status
.sync_info
.period
));
2188 for (auto local_iter
: sync_status
.sync_markers
) {
2189 int shard_id
= local_iter
.first
;
2190 auto iter
= master_shards_info
.find(shard_id
);
2192 if (iter
== master_shards_info
.end()) {
2194 derr
<< "ERROR: could not find remote sync shard status for shard_id=" << shard_id
<< dendl
;
2197 auto master_marker
= iter
->second
.marker
;
2198 if (local_iter
.second
.state
== rgw_meta_sync_marker::SyncState::IncrementalSync
&&
2199 master_marker
> local_iter
.second
.marker
) {
2200 shards_behind
[shard_id
] = local_iter
.second
.marker
;
2201 shards_behind_set
.insert(shard_id
);
2206 // fetch remote log entries to determine the oldest change
2207 std::optional
<std::pair
<int, ceph::real_time
>> oldest
;
2208 if (!shards_behind
.empty()) {
2209 map
<int, rgw_mdlog_shard_data
> master_pos
;
2210 ret
= sync
.read_master_log_shards_next(dpp(), sync_status
.sync_info
.period
, shards_behind
, &master_pos
);
2212 derr
<< "ERROR: failed to fetch master next positions (" << cpp_strerror(-ret
) << ")" << dendl
;
2214 for (auto iter
: master_pos
) {
2215 rgw_mdlog_shard_data
& shard_data
= iter
.second
;
2217 if (shard_data
.entries
.empty()) {
2218 // there aren't any entries in this shard, so we're not really behind
2219 shards_behind
.erase(iter
.first
);
2220 shards_behind_set
.erase(iter
.first
);
2222 rgw_mdlog_entry
& entry
= shard_data
.entries
.front();
2224 oldest
.emplace(iter
.first
, entry
.timestamp
);
2225 } else if (!ceph::real_clock::is_zero(entry
.timestamp
) && entry
.timestamp
< oldest
->second
) {
2226 oldest
.emplace(iter
.first
, entry
.timestamp
);
2233 int total_behind
= shards_behind
.size() + (sync_status
.sync_info
.num_shards
- num_inc
);
2234 if (total_behind
== 0) {
2235 push_ss(ss
, status
) << "metadata is caught up with master";
2237 push_ss(ss
, status
) << "metadata is behind on " << total_behind
<< " shards";
2238 push_ss(ss
, status
) << "behind shards: " << "[" << shards_behind_set
<< "]";
2240 push_ss(ss
, status
) << "oldest incremental change not applied: "
2241 << oldest
->second
<< " [" << oldest
->first
<< ']';
2245 flush_ss(ss
, status
);
2248 static void get_data_sync_status(const rgw_zone_id
& source_zone
, list
<string
>& status
, int tab
)
2254 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone(source_zone
, &sz
)) {
2255 push_ss(ss
, status
, tab
) << string("zone not found");
2256 flush_ss(ss
, status
);
2260 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->zone_syncs_from(static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_zone(), *sz
)) {
2261 push_ss(ss
, status
, tab
) << string("not syncing from zone");
2262 flush_ss(ss
, status
);
2265 RGWDataSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor(), source_zone
, nullptr);
2267 int ret
= sync
.init(dpp());
2269 push_ss(ss
, status
, tab
) << string("failed to retrieve sync info: ") + cpp_strerror(-ret
);
2270 flush_ss(ss
, status
);
2274 rgw_data_sync_status sync_status
;
2275 ret
= sync
.read_sync_status(dpp(), &sync_status
);
2276 if (ret
< 0 && ret
!= -ENOENT
) {
2277 push_ss(ss
, status
, tab
) << string("failed read sync status: ") + cpp_strerror(-ret
);
2281 set
<int> recovering_shards
;
2282 ret
= sync
.read_recovering_shards(dpp(), sync_status
.sync_info
.num_shards
, recovering_shards
);
2283 if (ret
< 0 && ret
!= ENOENT
) {
2284 push_ss(ss
, status
, tab
) << string("failed read recovering shards: ") + cpp_strerror(-ret
);
2289 switch (sync_status
.sync_info
.state
) {
2290 case rgw_data_sync_info::StateInit
:
2291 status_str
= "init";
2293 case rgw_data_sync_info::StateBuildingFullSyncMaps
:
2294 status_str
= "preparing for full sync";
2296 case rgw_data_sync_info::StateSync
:
2297 status_str
= "syncing";
2300 status_str
= "unknown";
2303 push_ss(ss
, status
, tab
) << status_str
;
2305 uint64_t full_total
= 0;
2306 uint64_t full_complete
= 0;
2310 int total_shards
= 0;
2311 set
<int> shards_behind_set
;
2313 for (auto marker_iter
: sync_status
.sync_markers
) {
2314 full_total
+= marker_iter
.second
.total_entries
;
2316 if (marker_iter
.second
.state
== rgw_data_sync_marker::SyncState::FullSync
) {
2318 full_complete
+= marker_iter
.second
.pos
;
2319 int shard_id
= marker_iter
.first
;
2320 shards_behind_set
.insert(shard_id
);
2322 full_complete
+= marker_iter
.second
.total_entries
;
2324 if (marker_iter
.second
.state
== rgw_data_sync_marker::SyncState::IncrementalSync
) {
2329 push_ss(ss
, status
, tab
) << "full sync: " << num_full
<< "/" << total_shards
<< " shards";
2332 push_ss(ss
, status
, tab
) << "full sync: " << full_total
- full_complete
<< " buckets to sync";
2335 push_ss(ss
, status
, tab
) << "incremental sync: " << num_inc
<< "/" << total_shards
<< " shards";
2337 map
<int, RGWDataChangesLogInfo
> source_shards_info
;
2339 ret
= sync
.read_source_log_shards_info(dpp(), &source_shards_info
);
2341 push_ss(ss
, status
, tab
) << string("failed to fetch source sync status: ") + cpp_strerror(-ret
);
2345 map
<int, string
> shards_behind
;
2347 for (auto local_iter
: sync_status
.sync_markers
) {
2348 int shard_id
= local_iter
.first
;
2349 auto iter
= source_shards_info
.find(shard_id
);
2351 if (iter
== source_shards_info
.end()) {
2353 derr
<< "ERROR: could not find remote sync shard status for shard_id=" << shard_id
<< dendl
;
2356 auto master_marker
= iter
->second
.marker
;
2357 if (local_iter
.second
.state
== rgw_data_sync_marker::SyncState::IncrementalSync
&&
2358 master_marker
> local_iter
.second
.marker
) {
2359 shards_behind
[shard_id
] = local_iter
.second
.marker
;
2360 shards_behind_set
.insert(shard_id
);
2364 int total_behind
= shards_behind
.size() + (sync_status
.sync_info
.num_shards
- num_inc
);
2365 int total_recovering
= recovering_shards
.size();
2366 if (total_behind
== 0 && total_recovering
== 0) {
2367 push_ss(ss
, status
, tab
) << "data is caught up with source";
2368 } else if (total_behind
> 0) {
2369 push_ss(ss
, status
, tab
) << "data is behind on " << total_behind
<< " shards";
2371 push_ss(ss
, status
, tab
) << "behind shards: " << "[" << shards_behind_set
<< "]" ;
2373 map
<int, rgw_datalog_shard_data
> master_pos
;
2374 ret
= sync
.read_source_log_shards_next(dpp(), shards_behind
, &master_pos
);
2376 derr
<< "ERROR: failed to fetch next positions (" << cpp_strerror(-ret
) << ")" << dendl
;
2378 std::optional
<std::pair
<int, ceph::real_time
>> oldest
;
2380 for (auto iter
: master_pos
) {
2381 rgw_datalog_shard_data
& shard_data
= iter
.second
;
2383 if (!shard_data
.entries
.empty()) {
2384 rgw_datalog_entry
& entry
= shard_data
.entries
.front();
2386 oldest
.emplace(iter
.first
, entry
.timestamp
);
2387 } else if (!ceph::real_clock::is_zero(entry
.timestamp
) && entry
.timestamp
< oldest
->second
) {
2388 oldest
.emplace(iter
.first
, entry
.timestamp
);
2394 push_ss(ss
, status
, tab
) << "oldest incremental change not applied: "
2395 << oldest
->second
<< " [" << oldest
->first
<< ']';
2400 if (total_recovering
> 0) {
2401 push_ss(ss
, status
, tab
) << total_recovering
<< " shards are recovering";
2402 push_ss(ss
, status
, tab
) << "recovering shards: " << "[" << recovering_shards
<< "]";
2405 flush_ss(ss
, status
);
2408 static void tab_dump(const string
& header
, int width
, const list
<string
>& entries
)
2412 for (auto e
: entries
) {
2413 cout
<< std::setw(width
) << s
<< std::setw(1) << " " << e
<< std::endl
;
2419 static void sync_status(Formatter
*formatter
)
2421 const RGWRealm
& realm
= store
->get_zone()->get_realm();
2422 const RGWZoneGroup
& zonegroup
= store
->get_zone()->get_zonegroup();
2423 const RGWZone
& zone
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_zone();
2427 cout
<< std::setw(width
) << "realm" << std::setw(1) << " " << realm
.get_id() << " (" << realm
.get_name() << ")" << std::endl
;
2428 cout
<< std::setw(width
) << "zonegroup" << std::setw(1) << " " << zonegroup
.get_id() << " (" << zonegroup
.get_name() << ")" << std::endl
;
2429 cout
<< std::setw(width
) << "zone" << std::setw(1) << " " << zone
.id
<< " (" << zone
.name
<< ")" << std::endl
;
2431 list
<string
> md_status
;
2433 if (store
->is_meta_master()) {
2434 md_status
.push_back("no sync (zone is master)");
2436 get_md_sync_status(md_status
);
2439 tab_dump("metadata sync", width
, md_status
);
2441 list
<string
> data_status
;
2443 auto& zone_conn_map
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_zone_conn_map();
2445 for (auto iter
: zone_conn_map
) {
2446 const rgw_zone_id
& source_id
= iter
.first
;
2447 string source_str
= "source: ";
2448 string s
= source_str
+ source_id
.id
;
2450 if (static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone(source_id
, &sz
)) {
2451 s
+= string(" (") + sz
->name
+ ")";
2453 data_status
.push_back(s
);
2454 get_data_sync_status(source_id
, data_status
, source_str
.size());
2457 tab_dump("data sync", width
, data_status
);
2461 int w
; // indent width
2462 std::string_view header
;
2463 indented(int w
, std::string_view header
= "") : w(w
), header(header
) {}
2465 std::ostream
& operator<<(std::ostream
& out
, const indented
& h
) {
2466 return out
<< std::setw(h
.w
) << h
.header
<< std::setw(1) << ' ';
2469 static int bucket_source_sync_status(const DoutPrefixProvider
*dpp
, rgw::sal::RadosStore
* store
, const RGWZone
& zone
,
2470 const RGWZone
& source
, RGWRESTConn
*conn
,
2471 const RGWBucketInfo
& bucket_info
,
2472 rgw_sync_bucket_pipe pipe
,
2473 int width
, std::ostream
& out
)
2475 out
<< indented
{width
, "source zone"} << source
.id
<< " (" << source
.name
<< ")" << std::endl
;
2477 // syncing from this zone?
2478 if (!zone
.syncs_from(source
.name
)) {
2479 out
<< indented
{width
} << "does not sync from zone\n";
2483 if (!pipe
.source
.bucket
) {
2484 ldpp_dout(dpp
, -1) << __func__
<< "(): missing source bucket" << dendl
;
2488 std::unique_ptr
<rgw::sal::Bucket
> source_bucket
;
2489 int r
= init_bucket(nullptr, *pipe
.source
.bucket
, &source_bucket
);
2491 ldpp_dout(dpp
, -1) << "failed to read source bucket info: " << cpp_strerror(r
) << dendl
;
2495 pipe
.source
.bucket
= source_bucket
->get_key();
2496 pipe
.dest
.bucket
= bucket_info
.bucket
;
2498 std::vector
<rgw_bucket_shard_sync_info
> status
;
2499 r
= rgw_bucket_sync_status(dpp
, store
, pipe
, bucket_info
, &source_bucket
->get_info(), &status
);
2501 ldpp_dout(dpp
, -1) << "failed to read bucket sync status: " << cpp_strerror(r
) << dendl
;
2505 out
<< indented
{width
, "source bucket"} << source_bucket
<< std::endl
;
2509 uint64_t full_complete
= 0;
2510 const size_t total_shards
= status
.size();
2512 using BucketSyncState
= rgw_bucket_shard_sync_info::SyncState
;
2513 for (size_t shard_id
= 0; shard_id
< total_shards
; shard_id
++) {
2514 auto& m
= status
[shard_id
];
2515 if (m
.state
== BucketSyncState::StateFullSync
) {
2517 full_complete
+= m
.full_marker
.count
;
2518 } else if (m
.state
== BucketSyncState::StateIncrementalSync
) {
2523 out
<< indented
{width
} << "full sync: " << num_full
<< "/" << total_shards
<< " shards\n";
2525 out
<< indented
{width
} << "full sync: " << full_complete
<< " objects completed\n";
2527 out
<< indented
{width
} << "incremental sync: " << num_inc
<< "/" << total_shards
<< " shards\n";
2529 BucketIndexShardsManager remote_markers
;
2530 r
= rgw_read_remote_bilog_info(dpp
, conn
, source_bucket
->get_key(), remote_markers
, null_yield
);
2532 ldpp_dout(dpp
, -1) << "failed to read remote log: " << cpp_strerror(r
) << dendl
;
2536 std::set
<int> shards_behind
;
2537 for (auto& r
: remote_markers
.get()) {
2538 auto shard_id
= r
.first
;
2539 auto& m
= status
[shard_id
];
2540 if (r
.second
.empty()) {
2541 continue; // empty bucket index shard
2543 auto pos
= BucketIndexShardsManager::get_shard_marker(m
.inc_marker
.position
);
2544 if (m
.state
!= BucketSyncState::StateIncrementalSync
|| pos
!= r
.second
) {
2545 shards_behind
.insert(shard_id
);
2548 if (!shards_behind
.empty()) {
2549 out
<< indented
{width
} << "bucket is behind on " << shards_behind
.size() << " shards\n";
2550 out
<< indented
{width
} << "behind shards: [" << shards_behind
<< "]\n" ;
2551 } else if (!num_full
) {
2552 out
<< indented
{width
} << "bucket is caught up with source\n";
2557 void encode_json(const char *name
, const RGWBucketSyncFlowManager::pipe_set
& pset
, Formatter
*f
)
2559 Formatter::ObjectSection
top_section(*f
, name
);
2560 Formatter::ArraySection
as(*f
, "entries");
2562 for (auto& pipe_handler
: pset
) {
2563 Formatter::ObjectSection
hs(*f
, "handler");
2564 encode_json("source", pipe_handler
.source
, f
);
2565 encode_json("dest", pipe_handler
.dest
, f
);
2569 static std::vector
<string
> convert_bucket_set_to_str_vec(const std::set
<rgw_bucket
>& bs
)
2571 std::vector
<string
> result
;
2572 result
.reserve(bs
.size());
2573 for (auto& b
: bs
) {
2574 result
.push_back(b
.get_key());
2579 static void get_hint_entities(const std::set
<rgw_zone_id
>& zones
, const std::set
<rgw_bucket
>& buckets
,
2580 std::set
<rgw_sync_bucket_entity
> *hint_entities
)
2582 for (auto& zone_id
: zones
) {
2583 for (auto& b
: buckets
) {
2584 std::unique_ptr
<rgw::sal::Bucket
> hint_bucket
;
2585 int ret
= init_bucket(nullptr, b
, &hint_bucket
);
2587 ldpp_dout(dpp(), 20) << "could not init bucket info for hint bucket=" << b
<< " ... skipping" << dendl
;
2591 hint_entities
->insert(rgw_sync_bucket_entity(zone_id
, hint_bucket
->get_key()));
2596 static rgw_zone_id
resolve_zone_id(const string
& s
)
2601 if (static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone(s
, &zone
)) {
2602 return rgw_zone_id(s
);
2604 if (static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone_id_by_name(s
, &result
)) {
2607 return rgw_zone_id(s
);
2610 rgw_zone_id
validate_zone_id(const rgw_zone_id
& zone_id
)
2612 return resolve_zone_id(zone_id
.id
);
2615 static int sync_info(std::optional
<rgw_zone_id
> opt_target_zone
, std::optional
<rgw_bucket
> opt_bucket
, Formatter
*formatter
)
2617 rgw_zone_id zone_id
= opt_target_zone
.value_or(store
->get_zone()->get_id());
2619 auto zone_policy_handler
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_sync_policy_handler(zone_id
);
2621 RGWBucketSyncPolicyHandlerRef bucket_handler
;
2623 std::optional
<rgw_bucket
> eff_bucket
= opt_bucket
;
2625 auto handler
= zone_policy_handler
;
2628 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
2630 int ret
= init_bucket(nullptr, *eff_bucket
, &bucket
);
2631 if (ret
< 0 && ret
!= -ENOENT
) {
2632 cerr
<< "ERROR: init_bucket failed: " << cpp_strerror(-ret
) << std::endl
;
2637 rgw::sal::Attrs attrs
= bucket
->get_attrs();
2638 bucket_handler
.reset(handler
->alloc_child(bucket
->get_info(), std::move(attrs
)));
2640 cerr
<< "WARNING: bucket not found, simulating result" << std::endl
;
2641 bucket_handler
.reset(handler
->alloc_child(*eff_bucket
, nullopt
));
2644 ret
= bucket_handler
->init(dpp(), null_yield
);
2646 cerr
<< "ERROR: failed to init bucket sync policy handler: " << cpp_strerror(-ret
) << " (ret=" << ret
<< ")" << std::endl
;
2650 handler
= bucket_handler
;
2653 std::set
<rgw_sync_bucket_pipe
> sources
;
2654 std::set
<rgw_sync_bucket_pipe
> dests
;
2656 handler
->get_pipes(&sources
, &dests
, std::nullopt
);
2658 auto source_hints_vec
= convert_bucket_set_to_str_vec(handler
->get_source_hints());
2659 auto target_hints_vec
= convert_bucket_set_to_str_vec(handler
->get_target_hints());
2661 std::set
<rgw_sync_bucket_pipe
> resolved_sources
;
2662 std::set
<rgw_sync_bucket_pipe
> resolved_dests
;
2664 rgw_sync_bucket_entity
self_entity(zone_id
, opt_bucket
);
2666 set
<rgw_zone_id
> source_zones
;
2667 set
<rgw_zone_id
> target_zones
;
2669 zone_policy_handler
->reflect(dpp(), nullptr, nullptr,
2673 false); /* relaxed: also get all zones that we allow to sync to/from */
2675 std::set
<rgw_sync_bucket_entity
> hint_entities
;
2677 get_hint_entities(source_zones
, handler
->get_source_hints(), &hint_entities
);
2678 get_hint_entities(target_zones
, handler
->get_target_hints(), &hint_entities
);
2680 for (auto& hint_entity
: hint_entities
) {
2681 if (!hint_entity
.zone
||
2682 !hint_entity
.bucket
) {
2683 continue; /* shouldn't really happen */
2686 auto zid
= validate_zone_id(*hint_entity
.zone
);
2687 auto& hint_bucket
= *hint_entity
.bucket
;
2689 RGWBucketSyncPolicyHandlerRef hint_bucket_handler
;
2690 int r
= store
->get_sync_policy_handler(dpp(), zid
, hint_bucket
, &hint_bucket_handler
, null_yield
);
2692 ldpp_dout(dpp(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket
<< " ... skipping" << dendl
;
2696 hint_bucket_handler
->get_pipes(&resolved_dests
,
2698 self_entity
); /* flipping resolved dests and sources as these are
2699 relative to the remote entity */
2703 Formatter::ObjectSection
os(*formatter
, "result");
2704 encode_json("sources", sources
, formatter
);
2705 encode_json("dests", dests
, formatter
);
2707 Formatter::ObjectSection
hints_section(*formatter
, "hints");
2708 encode_json("sources", source_hints_vec
, formatter
);
2709 encode_json("dests", target_hints_vec
, formatter
);
2712 Formatter::ObjectSection
resolved_hints_section(*formatter
, "resolved-hints-1");
2713 encode_json("sources", resolved_sources
, formatter
);
2714 encode_json("dests", resolved_dests
, formatter
);
2717 Formatter::ObjectSection
resolved_hints_section(*formatter
, "resolved-hints");
2718 encode_json("sources", handler
->get_resolved_source_hints(), formatter
);
2719 encode_json("dests", handler
->get_resolved_dest_hints(), formatter
);
2723 formatter
->flush(cout
);
2728 static int bucket_sync_info(rgw::sal::RadosStore
* store
, const RGWBucketInfo
& info
,
2731 const RGWRealm
& realm
= store
->get_zone()->get_realm();
2732 const RGWZoneGroup
& zonegroup
= store
->get_zone()->get_zonegroup();
2733 const RGWZone
& zone
= store
->svc()->zone
->get_zone();
2734 constexpr int width
= 15;
2736 out
<< indented
{width
, "realm"} << realm
.get_id() << " (" << realm
.get_name() << ")\n";
2737 out
<< indented
{width
, "zonegroup"} << zonegroup
.get_id() << " (" << zonegroup
.get_name() << ")\n";
2738 out
<< indented
{width
, "zone"} << zone
.id
<< " (" << zone
.name
<< ")\n";
2739 out
<< indented
{width
, "bucket"} << info
.bucket
<< "\n\n";
2741 if (!static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->bucket
->bucket_imports_data(info
.bucket
, null_yield
, dpp())) {
2742 out
<< "Sync is disabled for bucket " << info
.bucket
.name
<< '\n';
2746 RGWBucketSyncPolicyHandlerRef handler
;
2748 int r
= store
->get_sync_policy_handler(dpp(), std::nullopt
, info
.bucket
, &handler
, null_yield
);
2750 ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info
.bucket
<< "): r=" << r
<< ": " << cpp_strerror(-r
) << dendl
;
2754 auto& sources
= handler
->get_sources();
2756 for (auto& m
: sources
) {
2757 auto& zone
= m
.first
;
2758 out
<< indented
{width
, "source zone"} << zone
<< std::endl
;
2759 for (auto& pipe_handler
: m
.second
) {
2760 out
<< indented
{width
, "bucket"} << *pipe_handler
.source
.bucket
<< std::endl
;
2767 static int bucket_sync_status(rgw::sal::RadosStore
* store
, const RGWBucketInfo
& info
,
2768 const rgw_zone_id
& source_zone_id
,
2769 std::optional
<rgw_bucket
>& opt_source_bucket
,
2772 const RGWRealm
& realm
= store
->get_zone()->get_realm();
2773 const RGWZoneGroup
& zonegroup
= store
->get_zone()->get_zonegroup();
2774 const RGWZone
& zone
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_zone();
2775 constexpr int width
= 15;
2777 out
<< indented
{width
, "realm"} << realm
.get_id() << " (" << realm
.get_name() << ")\n";
2778 out
<< indented
{width
, "zonegroup"} << zonegroup
.get_id() << " (" << zonegroup
.get_name() << ")\n";
2779 out
<< indented
{width
, "zone"} << zone
.id
<< " (" << zone
.name
<< ")\n";
2780 out
<< indented
{width
, "bucket"} << info
.bucket
<< "\n\n";
2782 if (!static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->bucket
->bucket_imports_data(info
.bucket
, null_yield
, dpp())) {
2783 out
<< "Sync is disabled for bucket " << info
.bucket
.name
<< " or bucket has no sync sources" << std::endl
;
2787 RGWBucketSyncPolicyHandlerRef handler
;
2789 int r
= store
->get_sync_policy_handler(dpp(), std::nullopt
, info
.bucket
, &handler
, null_yield
);
2791 ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info
.bucket
<< "): r=" << r
<< ": " << cpp_strerror(-r
) << dendl
;
2795 auto sources
= handler
->get_all_sources();
2797 auto& zone_conn_map
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_zone_conn_map();
2798 set
<rgw_zone_id
> zone_ids
;
2800 if (!source_zone_id
.empty()) {
2801 auto z
= zonegroup
.zones
.find(source_zone_id
);
2802 if (z
== zonegroup
.zones
.end()) {
2803 ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup "
2804 << zonegroup
.get_name() << dendl
;
2807 auto c
= zone_conn_map
.find(source_zone_id
);
2808 if (c
== zone_conn_map
.end()) {
2809 ldpp_dout(dpp(), -1) << "No connection to zone " << z
->second
.name
<< dendl
;
2812 zone_ids
.insert(source_zone_id
);
2814 for (const auto& entry
: zonegroup
.zones
) {
2815 zone_ids
.insert(entry
.second
.id
);
2819 for (auto& zone_id
: zone_ids
) {
2820 auto z
= zonegroup
.zones
.find(zone_id
.id
);
2821 if (z
== zonegroup
.zones
.end()) { /* should't happen */
2824 auto c
= zone_conn_map
.find(zone_id
.id
);
2825 if (c
== zone_conn_map
.end()) { /* should't happen */
2829 for (auto& entry
: sources
) {
2830 auto& pipe
= entry
.second
;
2831 if (opt_source_bucket
&&
2832 pipe
.source
.bucket
!= opt_source_bucket
) {
2835 if (pipe
.source
.zone
.value_or(rgw_zone_id()) == z
->second
.id
) {
2836 bucket_source_sync_status(dpp(), store
, zone
, z
->second
,
2847 static void parse_tier_config_param(const string
& s
, map
<string
, string
, ltstr_nocase
>& out
)
2855 confs
.push_back(cur_conf
);
2862 } else if (c
== '}') {
2867 if (!cur_conf
.empty()) {
2868 confs
.push_back(cur_conf
);
2871 for (auto c
: confs
) {
2872 ssize_t pos
= c
.find("=");
2876 out
[c
.substr(0, pos
)] = c
.substr(pos
+ 1);
2881 static int check_pool_support_omap(const rgw_pool
& pool
)
2883 librados::IoCtx io_ctx
;
2884 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->get_rados_handle()->ioctx_create(pool
.to_str().c_str(), io_ctx
);
2886 // the pool may not exist at this moment, we have no way to check if it supports omap.
2890 ret
= io_ctx
.omap_clear("__omap_test_not_exist_oid__");
2891 if (ret
== -EOPNOTSUPP
) {
2899 int check_reshard_bucket_params(rgw::sal::RadosStore
* store
,
2900 const string
& bucket_name
,
2901 const string
& tenant
,
2902 const string
& bucket_id
,
2903 bool num_shards_specified
,
2905 int yes_i_really_mean_it
,
2906 std::unique_ptr
<rgw::sal::Bucket
>* bucket
)
2908 if (bucket_name
.empty()) {
2909 cerr
<< "ERROR: bucket not specified" << std::endl
;
2913 if (!num_shards_specified
) {
2914 cerr
<< "ERROR: --num-shards not specified" << std::endl
;
2918 if (num_shards
> (int)static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->get_max_bucket_shards()) {
2919 cerr
<< "ERROR: num_shards too high, max value: " << static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->get_max_bucket_shards() << std::endl
;
2923 if (num_shards
< 0) {
2924 cerr
<< "ERROR: num_shards must be non-negative integer" << std::endl
;
2928 int ret
= init_bucket(nullptr, tenant
, bucket_name
, bucket_id
, bucket
);
2930 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
2934 if ((*bucket
)->get_info().reshard_status
!= cls_rgw_reshard_status::NOT_RESHARDING
) {
2935 // if in_progress or done then we have an old BucketInfo
2936 cerr
<< "ERROR: the bucket is currently undergoing resharding and "
2937 "cannot be added to the reshard list at this time" << std::endl
;
2941 int num_source_shards
= ((*bucket
)->get_info().layout
.current_index
.layout
.normal
.num_shards
> 0 ? (*bucket
)->get_info().layout
.current_index
.layout
.normal
.num_shards
: 1);
2943 if (num_shards
<= num_source_shards
&& !yes_i_really_mean_it
) {
2944 cerr
<< "num shards is less or equal to current shards count" << std::endl
2945 << "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl
;
2951 static int scan_totp(CephContext
*cct
, ceph::real_time
& now
, rados::cls::otp::otp_info_t
& totp
, vector
<string
>& pins
,
2954 #define MAX_TOTP_SKEW_HOURS (24 * 7)
2955 time_t start_time
= ceph::real_clock::to_time_t(now
);
2956 time_t time_ofs
= 0, time_ofs_abs
= 0;
2957 time_t step_size
= totp
.step_size
;
2958 if (step_size
== 0) {
2959 step_size
= OATH_TOTP_DEFAULT_TIME_STEP_SIZE
;
2964 uint32_t max_skew
= MAX_TOTP_SKEW_HOURS
* 3600;
2966 while (time_ofs_abs
< max_skew
) {
2967 int rc
= oath_totp_validate2(totp
.seed_bin
.c_str(), totp
.seed_bin
.length(),
2974 if (rc
!= OATH_INVALID_OTP
) {
2975 rc
= oath_totp_validate2(totp
.seed_bin
.c_str(), totp
.seed_bin
.length(),
2978 time_ofs
- step_size
, /* smaller time_ofs moves time forward */
2982 if (rc
!= OATH_INVALID_OTP
) {
2983 *pofs
= time_ofs
- step_size
+ step_size
* totp
.window
/ 2;
2984 ldpp_dout(dpp(), 20) << "found at time=" << start_time
- time_ofs
<< " time_ofs=" << time_ofs
<< dendl
;
2989 time_ofs_abs
= (++count
) * step_size
;
2990 time_ofs
= sign
* time_ofs_abs
;
2996 static int trim_sync_error_log(int shard_id
, const string
& marker
, int delay_ms
)
2998 auto oid
= RGWSyncErrorLogger::get_shard_oid(RGW_SYNC_ERROR_LOG_SHARD_PREFIX
,
3000 // call cls_log_trim() until it returns -ENODATA
3002 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->timelog
.trim(dpp(), oid
, {}, {}, {}, marker
, nullptr,
3004 if (ret
== -ENODATA
) {
3011 std::this_thread::sleep_for(std::chrono::milliseconds(delay_ms
));
3017 const string
& get_tier_type(rgw::sal::RadosStore
* store
) {
3018 return store
->svc()->zone
->get_zone().tier_type
;
3021 static bool symmetrical_flow_opt(const string
& opt
)
3023 return (opt
== "symmetrical" || opt
== "symmetric");
3026 static bool directional_flow_opt(const string
& opt
)
3028 return (opt
== "directional" || opt
== "direction");
3032 static bool require_opt(std::optional
<T
> opt
, bool extra_check
= true)
3034 if (!opt
|| !extra_check
) {
3041 static bool require_non_empty_opt(std::optional
<T
> opt
, bool extra_check
= true)
3043 if (!opt
|| opt
->empty() || !extra_check
) {
3050 static void show_result(T
& obj
,
3051 Formatter
*formatter
,
3054 encode_json("obj", obj
, formatter
);
3056 formatter
->flush(cout
);
3059 void init_optional_bucket(std::optional
<rgw_bucket
>& opt_bucket
,
3060 std::optional
<string
>& opt_tenant
,
3061 std::optional
<string
>& opt_bucket_name
,
3062 std::optional
<string
>& opt_bucket_id
)
3064 if (opt_tenant
|| opt_bucket_name
|| opt_bucket_id
) {
3065 opt_bucket
.emplace();
3067 opt_bucket
->tenant
= *opt_tenant
;
3069 if (opt_bucket_name
) {
3070 opt_bucket
->name
= *opt_bucket_name
;
3072 if (opt_bucket_id
) {
3073 opt_bucket
->bucket_id
= *opt_bucket_id
;
3078 class SyncPolicyContext
3080 RGWZoneGroup zonegroup
;
3082 std::optional
<rgw_bucket
> b
;
3083 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
3085 rgw_sync_policy_info
*policy
{nullptr};
3087 std::optional
<rgw_user
> owner
;
3090 SyncPolicyContext(const string
& zonegroup_id
,
3091 const string
& zonegroup_name
,
3092 std::optional
<rgw_bucket
> _bucket
) : zonegroup(zonegroup_id
, zonegroup_name
),
3096 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
3098 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
3103 policy
= &zonegroup
.sync_policy
;
3107 ret
= init_bucket(nullptr, *b
, &bucket
);
3109 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
3113 owner
= bucket
->get_info().owner
;
3115 if (!bucket
->get_info().sync_policy
) {
3116 rgw_sync_policy_info new_policy
;
3117 bucket
->get_info().set_sync_policy(std::move(new_policy
));
3120 policy
= &(*bucket
->get_info().sync_policy
);
3125 int write_policy() {
3127 int ret
= zonegroup
.update(dpp(), null_yield
);
3129 cerr
<< "failed to update zonegroup: " << cpp_strerror(-ret
) << std::endl
;
3135 int ret
= bucket
->put_info(dpp(), false, real_time());
3137 cerr
<< "failed to store bucket info: " << cpp_strerror(-ret
) << std::endl
;
3144 rgw_sync_policy_info
& get_policy() {
3148 std::optional
<rgw_user
>& get_owner() {
3153 void resolve_zone_id_opt(std::optional
<string
>& zone_name
, std::optional
<rgw_zone_id
>& zone_id
)
3155 if (!zone_name
|| zone_id
) {
3159 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone_id_by_name(*zone_name
, &(*zone_id
))) {
3160 cerr
<< "WARNING: cannot find source zone id for name=" << *zone_name
<< std::endl
;
3161 zone_id
= rgw_zone_id(*zone_name
);
3164 void resolve_zone_ids_opt(std::optional
<vector
<string
> >& names
, std::optional
<vector
<rgw_zone_id
> >& ids
)
3166 if (!names
|| ids
) {
3170 for (auto& name
: *names
) {
3172 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone_id_by_name(name
, &zid
)) {
3173 cerr
<< "WARNING: cannot find source zone id for name=" << name
<< std::endl
;
3174 zid
= rgw_zone_id(name
);
3176 ids
->push_back(zid
);
3180 static vector
<rgw_zone_id
> zone_ids_from_str(const string
& val
)
3182 vector
<rgw_zone_id
> result
;
3184 get_str_vec(val
, v
);
3186 result
.push_back(rgw_zone_id(z
));
3191 class JSONFormatter_PrettyZone
: public JSONFormatter
{
3192 class Handler
: public JSONEncodeFilter::Handler
<rgw_zone_id
> {
3193 void encode_json(const char *name
, const void *pval
, ceph::Formatter
*f
) const override
{
3194 auto zone_id
= *(static_cast<const rgw_zone_id
*>(pval
));
3197 if (static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone(zone_id
, &zone
)) {
3198 zone_name
= zone
->name
;
3200 cerr
<< "WARNING: cannot find zone name for id=" << zone_id
<< std::endl
;
3201 zone_name
= zone_id
.id
;
3204 ::encode_json(name
, zone_name
, f
);
3206 } zone_id_type_handler
;
3208 JSONEncodeFilter encode_filter
;
3210 JSONFormatter_PrettyZone(bool pretty_format
) : JSONFormatter(pretty_format
) {
3211 encode_filter
.register_type(&zone_id_type_handler
);
3214 void *get_external_feature_handler(const std::string
& feature
) override
{
3215 if (feature
!= "JSONEncodeFilter") {
3218 return &encode_filter
;
3222 static int search_entities_by_zone(rgw_zone_id zone_id
,
3225 RGWZoneGroup
*pzonegroup
,
3230 auto& found
= *pfound
;
3232 list
<string
> realms
;
3233 int r
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_realms(dpp(), realms
);
3235 cerr
<< "failed to list realms: " << cpp_strerror(-r
) << std::endl
;
3239 for (auto& realm_name
: realms
) {
3242 RGWRealm
realm(realm_id
, realm_name
);
3243 r
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
3245 cerr
<< "WARNING: can't open realm " << realm_name
<< ": " << cpp_strerror(-r
) << " ... skipping" << std::endl
;
3249 r
= realm
.find_zone(dpp(), zone_id
, pperiod
,
3250 pzonegroup
, &found
, null_yield
);
3261 static int try_to_resolve_local_zone(string
& zone_id
, string
& zone_name
)
3263 /* try to read zone info */
3264 RGWZoneParams
zone(zone_id
, zone_name
);
3265 int r
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
3267 ldpp_dout(dpp(), 20) << __func__
<< "(): local zone not found (id=" << zone_id
<< ", name= " << zone_name
<< ")" << dendl
;
3272 ldpp_dout(dpp(), 0) << __func__
<< "(): unable to read zone (id=" << zone_id
<< ", name= " << zone_name
<< "): " << cpp_strerror(-r
) << dendl
;
3277 zone_id
= zone
.get_id();
3278 zone_name
= zone
.get_name();
3283 static void check_set_consistent(const string
& resolved_param
,
3285 const string
& param_name
)
3287 if (!param
.empty() && param
!= resolved_param
) {
3288 ldpp_dout(dpp(), 5) << "WARNING: " << param_name
<< " resolve mismatch. (param=" << param
<< ", resolved=" << resolved_param
<< ")" << dendl
;
3292 param
= resolved_param
;
3293 ldpp_dout(dpp(), 20) << __func__
<< "(): resolved param: " << param_name
<< ": " << param
<< dendl
;
3297 static int try_to_resolve_local_entities(string
& realm_id
, string
& realm_name
,
3298 string
& zonegroup_id
, string
& zonegroup_name
,
3299 string
& zone_id
, string
& zone_name
)
3302 * Try to figure out realm, zonegroup, and zone entities, based on provided params and local zone.
3304 * First read the local zone info (for zone id/name). Then search existing realm and period
3305 * configuration and if found, update (but don't override) passed params.
3309 ldpp_dout(dpp(), 20) << __func__
<< "(): before: realm_id=" << realm_id
<< " realm_name=" << realm_name
<< " zonegroup_id=" << zonegroup_id
<< " zonegroup_name=" << zonegroup_name
<< " zone_id=" << zone_id
<< " zone_name=" << zone_name
<< dendl
;
3310 int r
= try_to_resolve_local_zone(zone_id
, zone_name
);
3312 /* this local zone doesn't exist, abort */
3319 if (zone_id
.empty()) {
3320 /* not sure it's possible, but let's abort */
3327 RGWZoneGroup zonegroup
;
3328 r
= search_entities_by_zone(zone_id
, &realm
, &period
, &zonegroup
, &found
);
3330 ldpp_dout(dpp(), 0) << "ERROR: error when searching for realm id (r=" << r
<< "), ignoring" << dendl
;
3338 check_set_consistent(realm
.get_id(), realm_id
, "realm id (--realm-id)");
3339 check_set_consistent(realm
.get_name(), realm_name
, "realm name (--rgw-realm)");
3340 check_set_consistent(zonegroup
.get_id(), zonegroup_id
, "zonegroup id (--zonegroup-id)");
3341 check_set_consistent(zonegroup
.get_name(), zonegroup_name
, "zonegroup name (--rgw-zonegroup)");
3343 ldpp_dout(dpp(), 20) << __func__
<< "(): after: realm_id=" << realm_id
<< " realm_name=" << realm_name
<< " zonegroup_id=" << zonegroup_id
<< " zonegroup_name=" << zonegroup_name
<< " zone_id=" << zone_id
<< " zone_name=" << zone_name
<< dendl
;
3348 static bool empty_opt(std::optional
<string
>& os
)
3350 return (!os
|| os
->empty());
3353 static string
safe_opt(std::optional
<string
>& os
)
3355 return os
.value_or(string());
3358 void init_realm_param(CephContext
*cct
, string
& var
, std::optional
<string
>& opt_var
, const string
& conf_name
)
3360 var
= cct
->_conf
.get_val
<string
>(conf_name
);
3366 int main(int argc
, const char **argv
)
3368 auto args
= argv_to_vec(argc
, argv
);
3370 cerr
<< argv
[0] << ": -h or --help for usage" << std::endl
;
3373 if (ceph_argparse_need_usage(args
)) {
3378 auto cct
= global_init(NULL
, args
, CEPH_ENTITY_TYPE_CLIENT
,
3379 CODE_ENVIRONMENT_UTILITY
, 0);
3381 // for region -> zonegroup conversion (must happen before common_init_finish())
3382 if (!g_conf()->rgw_region
.empty() && g_conf()->rgw_zonegroup
.empty()) {
3383 g_conf().set_val_or_die("rgw_zonegroup", g_conf()->rgw_region
.c_str());
3386 rgw_user user_id_arg
;
3387 std::unique_ptr
<rgw::sal::User
> user
;
3390 rgw_user new_user_id
;
3391 std::string access_key
, secret_key
, user_email
, display_name
;
3392 std::string bucket_name
, pool_name
, object
;
3394 std::string date
, subuser
, access
, format
;
3395 std::string start_date
, end_date
;
3396 std::string key_type_str
;
3397 std::string period_id
, period_epoch
, remote
, url
;
3398 std::optional
<string
> opt_region
;
3399 std::string master_zone
;
3400 std::string realm_name
, realm_id
, realm_new_name
;
3401 std::optional
<string
> opt_realm_name
, opt_realm_id
;
3402 std::string zone_name
, zone_id
, zone_new_name
;
3403 std::optional
<string
> opt_zone_name
, opt_zone_id
;
3404 std::string zonegroup_name
, zonegroup_id
, zonegroup_new_name
;
3405 std::optional
<string
> opt_zonegroup_name
, opt_zonegroup_id
;
3406 std::string api_name
;
3407 std::string role_name
, path
, assume_role_doc
, policy_name
, perm_policy_doc
, path_prefix
;
3408 std::string redirect_zone
;
3409 bool redirect_zone_set
= false;
3410 list
<string
> endpoints
;
3412 int sync_from_all_specified
= false;
3413 bool sync_from_all
= false;
3414 list
<string
> sync_from
;
3415 list
<string
> sync_from_rm
;
3417 int set_default
= 0;
3418 bool is_master
= false;
3419 bool is_master_set
= false;
3421 bool read_only
= false;
3422 int is_read_only_set
= false;
3424 int staging
= false;
3425 int key_type
= KEY_TYPE_UNDEFINED
;
3426 std::unique_ptr
<rgw::sal::Bucket
> bucket
;
3427 uint32_t perm_mask
= 0;
3429 OPT opt_cmd
= OPT::NO_CMD
;
3430 int gen_access_key
= 0;
3431 int gen_secret_key
= 0;
3432 bool set_perm
= false;
3433 bool set_temp_url_key
= false;
3434 map
<int, string
> temp_url_keys
;
3436 string new_bucket_name
;
3437 std::unique_ptr
<Formatter
> formatter
;
3438 std::unique_ptr
<Formatter
> zone_formatter
;
3439 int purge_data
= false;
3440 int pretty_format
= false;
3441 int show_log_entries
= true;
3442 int show_log_sum
= true;
3443 int skip_zero_entries
= false; // log show
3444 int purge_keys
= false;
3445 int yes_i_really_mean_it
= false;
3446 int delete_child_objects
= false;
3448 int remove_bad
= false;
3449 int check_head_obj_locator
= false;
3450 int max_buckets
= -1;
3451 bool max_buckets_specified
= false;
3452 map
<string
, bool> categories
;
3454 int check_objects
= false;
3455 RGWBucketAdminOpState bucket_op
;
3457 string metadata_key
;
3458 RGWObjVersionTracker objv_tracker
;
3460 string start_marker
;
3462 int max_entries
= -1;
3463 bool max_entries_specified
= false;
3465 bool admin_specified
= false;
3467 bool system_specified
= false;
3469 bool specified_shard_id
= false;
3474 string ratelimit_scope
;
3475 string object_version
;
3476 string placement_id
;
3477 std::optional
<string
> opt_storage_class
;
3479 list
<string
> tags_add
;
3480 list
<string
> tags_rm
;
3482 int64_t max_objects
= -1;
3483 int64_t max_size
= -1;
3484 int64_t max_read_ops
= 0;
3485 int64_t max_write_ops
= 0;
3486 int64_t max_read_bytes
= 0;
3487 int64_t max_write_bytes
= 0;
3488 bool have_max_objects
= false;
3489 bool have_max_size
= false;
3490 bool have_max_write_ops
= false;
3491 bool have_max_read_ops
= false;
3492 bool have_max_write_bytes
= false;
3493 bool have_max_read_bytes
= false;
3494 int include_all
= false;
3495 int allow_unordered
= false;
3497 int sync_stats
= false;
3498 int reset_stats
= false;
3499 int bypass_gc
= false;
3500 int warnings_only
= false;
3501 int inconsistent_index
= false;
3503 int verbose
= false;
3505 int extra_info
= false;
3507 uint64_t min_rewrite_size
= 4 * 1024 * 1024;
3508 uint64_t max_rewrite_size
= ULLONG_MAX
;
3509 uint64_t min_rewrite_stripe_size
= 0;
3511 BIIndexType bi_index_type
= BIIndexType::Plain
;
3512 std::optional
<log_type
> opt_log_type
;
3516 bool num_shards_specified
= false;
3517 std::optional
<int> bucket_index_max_shards
;
3518 int max_concurrent_ios
= 32;
3519 uint64_t orphan_stale_secs
= (24 * 3600);
3523 std::ostringstream errs
;
3526 string source_zone_name
;
3527 rgw_zone_id source_zone
; /* zone id */
3530 bool tier_type_specified
= false;
3532 map
<string
, string
, ltstr_nocase
> tier_config_add
;
3533 map
<string
, string
, ltstr_nocase
> tier_config_rm
;
3535 boost::optional
<string
> index_pool
;
3536 boost::optional
<string
> data_pool
;
3537 boost::optional
<string
> data_extra_pool
;
3538 rgw::BucketIndexType placement_index_type
= rgw::BucketIndexType::Normal
;
3539 bool index_type_specified
= false;
3541 boost::optional
<std::string
> compression_type
;
3545 string totp_seed_type
= "hex";
3546 vector
<string
> totp_pin
;
3547 int totp_seconds
= 0;
3548 int totp_window
= 0;
3549 int trim_delay_ms
= 0;
3555 std::optional
<std::string
> str_script_ctx
;
3556 std::optional
<std::string
> script_package
;
3557 int allow_compilation
= false;
3559 std::optional
<string
> opt_group_id
;
3560 std::optional
<string
> opt_status
;
3561 std::optional
<string
> opt_flow_type
;
3562 std::optional
<vector
<string
> > opt_zone_names
;
3563 std::optional
<vector
<rgw_zone_id
> > opt_zone_ids
;
3564 std::optional
<string
> opt_flow_id
;
3565 std::optional
<string
> opt_source_zone_name
;
3566 std::optional
<rgw_zone_id
> opt_source_zone_id
;
3567 std::optional
<string
> opt_dest_zone_name
;
3568 std::optional
<rgw_zone_id
> opt_dest_zone_id
;
3569 std::optional
<vector
<string
> > opt_source_zone_names
;
3570 std::optional
<vector
<rgw_zone_id
> > opt_source_zone_ids
;
3571 std::optional
<vector
<string
> > opt_dest_zone_names
;
3572 std::optional
<vector
<rgw_zone_id
> > opt_dest_zone_ids
;
3573 std::optional
<string
> opt_pipe_id
;
3574 std::optional
<rgw_bucket
> opt_bucket
;
3575 std::optional
<string
> opt_tenant
;
3576 std::optional
<string
> opt_bucket_name
;
3577 std::optional
<string
> opt_bucket_id
;
3578 std::optional
<rgw_bucket
> opt_source_bucket
;
3579 std::optional
<string
> opt_source_tenant
;
3580 std::optional
<string
> opt_source_bucket_name
;
3581 std::optional
<string
> opt_source_bucket_id
;
3582 std::optional
<rgw_bucket
> opt_dest_bucket
;
3583 std::optional
<string
> opt_dest_tenant
;
3584 std::optional
<string
> opt_dest_bucket_name
;
3585 std::optional
<string
> opt_dest_bucket_id
;
3586 std::optional
<string
> opt_effective_zone_name
;
3587 std::optional
<rgw_zone_id
> opt_effective_zone_id
;
3589 std::optional
<string
> opt_prefix
;
3590 std::optional
<string
> opt_prefix_rm
;
3592 std::optional
<int> opt_priority
;
3593 std::optional
<string
> opt_mode
;
3594 std::optional
<rgw_user
> opt_dest_owner
;
3595 ceph::timespan opt_retry_delay_ms
= std::chrono::milliseconds(2000);
3596 ceph::timespan opt_timeout_sec
= std::chrono::seconds(60);
3598 SimpleCmd
cmd(all_cmds
, cmd_aliases
);
3599 bool raw_storage_op
= false;
3601 std::optional
<std::string
> rgw_obj_fs
; // radoslist field separator
3603 init_realm_param(cct
.get(), realm_id
, opt_realm_id
, "rgw_realm_id");
3604 init_realm_param(cct
.get(), zonegroup_id
, opt_zonegroup_id
, "rgw_zonegroup_id");
3605 init_realm_param(cct
.get(), zone_id
, opt_zone_id
, "rgw_zone_id");
3607 for (std::vector
<const char*>::iterator i
= args
.begin(); i
!= args
.end(); ) {
3608 if (ceph_argparse_double_dash(args
, i
)) {
3610 } else if (ceph_argparse_witharg(args
, i
, &val
, "-i", "--uid", (char*)NULL
)) {
3611 user_id_arg
.from_str(val
);
3612 if (user_id_arg
.empty()) {
3613 cerr
<< "no value for uid" << std::endl
;
3616 } else if (ceph_argparse_witharg(args
, i
, &val
, "--new-uid", (char*)NULL
)) {
3617 new_user_id
.from_str(val
);
3618 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tenant", (char*)NULL
)) {
3621 } else if (ceph_argparse_witharg(args
, i
, &val
, "--user_ns", (char*)NULL
)) {
3623 } else if (ceph_argparse_witharg(args
, i
, &val
, "--access-key", (char*)NULL
)) {
3625 } else if (ceph_argparse_witharg(args
, i
, &val
, "--subuser", (char*)NULL
)) {
3627 } else if (ceph_argparse_witharg(args
, i
, &val
, "--secret", "--secret-key", (char*)NULL
)) {
3629 } else if (ceph_argparse_witharg(args
, i
, &val
, "-e", "--email", (char*)NULL
)) {
3631 } else if (ceph_argparse_witharg(args
, i
, &val
, "-n", "--display-name", (char*)NULL
)) {
3633 } else if (ceph_argparse_witharg(args
, i
, &val
, "-b", "--bucket", (char*)NULL
)) {
3635 opt_bucket_name
= val
;
3636 } else if (ceph_argparse_witharg(args
, i
, &val
, "-p", "--pool", (char*)NULL
)) {
3638 pool
= rgw_pool(pool_name
);
3639 } else if (ceph_argparse_witharg(args
, i
, &val
, "-o", "--object", (char*)NULL
)) {
3641 } else if (ceph_argparse_witharg(args
, i
, &val
, "--object-version", (char*)NULL
)) {
3642 object_version
= val
;
3643 } else if (ceph_argparse_witharg(args
, i
, &val
, "--client-id", (char*)NULL
)) {
3645 } else if (ceph_argparse_witharg(args
, i
, &val
, "--op-id", (char*)NULL
)) {
3647 } else if (ceph_argparse_witharg(args
, i
, &val
, "--op-mask", (char*)NULL
)) {
3649 } else if (ceph_argparse_witharg(args
, i
, &val
, "--key-type", (char*)NULL
)) {
3651 if (key_type_str
.compare("swift") == 0) {
3652 key_type
= KEY_TYPE_SWIFT
;
3653 } else if (key_type_str
.compare("s3") == 0) {
3654 key_type
= KEY_TYPE_S3
;
3656 cerr
<< "bad key type: " << key_type_str
<< std::endl
;
3659 } else if (ceph_argparse_witharg(args
, i
, &val
, "--job-id", (char*)NULL
)) {
3661 } else if (ceph_argparse_binary_flag(args
, i
, &gen_access_key
, NULL
, "--gen-access-key", (char*)NULL
)) {
3663 } else if (ceph_argparse_binary_flag(args
, i
, &gen_secret_key
, NULL
, "--gen-secret", (char*)NULL
)) {
3665 } else if (ceph_argparse_binary_flag(args
, i
, &show_log_entries
, NULL
, "--show-log-entries", (char*)NULL
)) {
3667 } else if (ceph_argparse_binary_flag(args
, i
, &show_log_sum
, NULL
, "--show-log-sum", (char*)NULL
)) {
3669 } else if (ceph_argparse_binary_flag(args
, i
, &skip_zero_entries
, NULL
, "--skip-zero-entries", (char*)NULL
)) {
3671 } else if (ceph_argparse_binary_flag(args
, i
, &admin
, NULL
, "--admin", (char*)NULL
)) {
3672 admin_specified
= true;
3673 } else if (ceph_argparse_binary_flag(args
, i
, &system
, NULL
, "--system", (char*)NULL
)) {
3674 system_specified
= true;
3675 } else if (ceph_argparse_binary_flag(args
, i
, &verbose
, NULL
, "--verbose", (char*)NULL
)) {
3677 } else if (ceph_argparse_binary_flag(args
, i
, &staging
, NULL
, "--staging", (char*)NULL
)) {
3679 } else if (ceph_argparse_binary_flag(args
, i
, &commit
, NULL
, "--commit", (char*)NULL
)) {
3681 } else if (ceph_argparse_witharg(args
, i
, &val
, "--min-rewrite-size", (char*)NULL
)) {
3682 min_rewrite_size
= (uint64_t)atoll(val
.c_str());
3683 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-rewrite-size", (char*)NULL
)) {
3684 max_rewrite_size
= (uint64_t)atoll(val
.c_str());
3685 } else if (ceph_argparse_witharg(args
, i
, &val
, "--min-rewrite-stripe-size", (char*)NULL
)) {
3686 min_rewrite_stripe_size
= (uint64_t)atoll(val
.c_str());
3687 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-buckets", (char*)NULL
)) {
3688 max_buckets
= (int)strict_strtol(val
.c_str(), 10, &err
);
3690 cerr
<< "ERROR: failed to parse max buckets: " << err
<< std::endl
;
3693 max_buckets_specified
= true;
3694 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-entries", (char*)NULL
)) {
3695 max_entries
= (int)strict_strtol(val
.c_str(), 10, &err
);
3696 max_entries_specified
= true;
3698 cerr
<< "ERROR: failed to parse max entries: " << err
<< std::endl
;
3701 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-size", (char*)NULL
)) {
3702 max_size
= strict_iec_cast
<long long>(val
, &err
);
3704 cerr
<< "ERROR: failed to parse max size: " << err
<< std::endl
;
3707 have_max_size
= true;
3708 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-objects", (char*)NULL
)) {
3709 max_objects
= (int64_t)strict_strtoll(val
.c_str(), 10, &err
);
3711 cerr
<< "ERROR: failed to parse max objects: " << err
<< std::endl
;
3714 have_max_objects
= true;
3715 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-read-ops", (char*)NULL
)) {
3716 max_read_ops
= (int64_t)strict_strtoll(val
.c_str(), 10, &err
);
3718 cerr
<< "ERROR: failed to parse max read requests: " << err
<< std::endl
;
3721 have_max_read_ops
= true;
3722 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-write-ops", (char*)NULL
)) {
3723 max_write_ops
= (int64_t)strict_strtoll(val
.c_str(), 10, &err
);
3725 cerr
<< "ERROR: failed to parse max write requests: " << err
<< std::endl
;
3728 have_max_write_ops
= true;
3729 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-read-bytes", (char*)NULL
)) {
3730 max_read_bytes
= (int64_t)strict_strtoll(val
.c_str(), 10, &err
);
3732 cerr
<< "ERROR: failed to parse max read bytes: " << err
<< std::endl
;
3735 have_max_read_bytes
= true;
3736 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-write-bytes", (char*)NULL
)) {
3737 max_write_bytes
= (int64_t)strict_strtoll(val
.c_str(), 10, &err
);
3739 cerr
<< "ERROR: failed to parse max write bytes: " << err
<< std::endl
;
3742 have_max_write_bytes
= true;
3743 } else if (ceph_argparse_witharg(args
, i
, &val
, "--date", "--time", (char*)NULL
)) {
3745 if (end_date
.empty())
3747 } else if (ceph_argparse_witharg(args
, i
, &val
, "--start-date", "--start-time", (char*)NULL
)) {
3749 } else if (ceph_argparse_witharg(args
, i
, &val
, "--end-date", "--end-time", (char*)NULL
)) {
3751 } else if (ceph_argparse_witharg(args
, i
, &val
, "--num-shards", (char*)NULL
)) {
3752 num_shards
= (int)strict_strtol(val
.c_str(), 10, &err
);
3754 cerr
<< "ERROR: failed to parse num shards: " << err
<< std::endl
;
3757 num_shards_specified
= true;
3758 } else if (ceph_argparse_witharg(args
, i
, &val
, "--bucket-index-max-shards", (char*)NULL
)) {
3759 bucket_index_max_shards
= (int)strict_strtol(val
.c_str(), 10, &err
);
3761 cerr
<< "ERROR: failed to parse bucket-index-max-shards: " << err
<< std::endl
;
3764 } else if (ceph_argparse_witharg(args
, i
, &val
, "--max-concurrent-ios", (char*)NULL
)) {
3765 max_concurrent_ios
= (int)strict_strtol(val
.c_str(), 10, &err
);
3767 cerr
<< "ERROR: failed to parse max concurrent ios: " << err
<< std::endl
;
3770 } else if (ceph_argparse_witharg(args
, i
, &val
, "--orphan-stale-secs", (char*)NULL
)) {
3771 orphan_stale_secs
= (uint64_t)strict_strtoll(val
.c_str(), 10, &err
);
3773 cerr
<< "ERROR: failed to parse orphan stale secs: " << err
<< std::endl
;
3776 } else if (ceph_argparse_witharg(args
, i
, &val
, "--shard-id", (char*)NULL
)) {
3777 shard_id
= (int)strict_strtol(val
.c_str(), 10, &err
);
3779 cerr
<< "ERROR: failed to parse shard id: " << err
<< std::endl
;
3782 specified_shard_id
= true;
3783 } else if (ceph_argparse_witharg(args
, i
, &val
, "--access", (char*)NULL
)) {
3785 perm_mask
= rgw_str_to_perm(access
.c_str());
3787 } else if (ceph_argparse_witharg(args
, i
, &val
, "--temp-url-key", (char*)NULL
)) {
3788 temp_url_keys
[0] = val
;
3789 set_temp_url_key
= true;
3790 } else if (ceph_argparse_witharg(args
, i
, &val
, "--temp-url-key2", "--temp-url-key-2", (char*)NULL
)) {
3791 temp_url_keys
[1] = val
;
3792 set_temp_url_key
= true;
3793 } else if (ceph_argparse_witharg(args
, i
, &val
, "--bucket-id", (char*)NULL
)) {
3795 opt_bucket_id
= val
;
3796 if (bucket_id
.empty()) {
3797 cerr
<< "no value for bucket-id" << std::endl
;
3800 } else if (ceph_argparse_witharg(args
, i
, &val
, "--bucket-new-name", (char*)NULL
)) {
3801 new_bucket_name
= val
;
3802 } else if (ceph_argparse_witharg(args
, i
, &val
, "--format", (char*)NULL
)) {
3804 } else if (ceph_argparse_witharg(args
, i
, &val
, "--categories", (char*)NULL
)) {
3805 string cat_str
= val
;
3806 list
<string
> cat_list
;
3807 list
<string
>::iterator iter
;
3808 get_str_list(cat_str
, cat_list
);
3809 for (iter
= cat_list
.begin(); iter
!= cat_list
.end(); ++iter
) {
3810 categories
[*iter
] = true;
3812 } else if (ceph_argparse_binary_flag(args
, i
, &delete_child_objects
, NULL
, "--purge-objects", (char*)NULL
)) {
3814 } else if (ceph_argparse_binary_flag(args
, i
, &pretty_format
, NULL
, "--pretty-format", (char*)NULL
)) {
3816 } else if (ceph_argparse_binary_flag(args
, i
, &purge_data
, NULL
, "--purge-data", (char*)NULL
)) {
3817 delete_child_objects
= purge_data
;
3818 } else if (ceph_argparse_binary_flag(args
, i
, &purge_keys
, NULL
, "--purge-keys", (char*)NULL
)) {
3820 } else if (ceph_argparse_binary_flag(args
, i
, &yes_i_really_mean_it
, NULL
, "--yes-i-really-mean-it", (char*)NULL
)) {
3822 } else if (ceph_argparse_binary_flag(args
, i
, &fix
, NULL
, "--fix", (char*)NULL
)) {
3824 } else if (ceph_argparse_binary_flag(args
, i
, &remove_bad
, NULL
, "--remove-bad", (char*)NULL
)) {
3826 } else if (ceph_argparse_binary_flag(args
, i
, &check_head_obj_locator
, NULL
, "--check-head-obj-locator", (char*)NULL
)) {
3828 } else if (ceph_argparse_binary_flag(args
, i
, &check_objects
, NULL
, "--check-objects", (char*)NULL
)) {
3830 } else if (ceph_argparse_binary_flag(args
, i
, &sync_stats
, NULL
, "--sync-stats", (char*)NULL
)) {
3832 } else if (ceph_argparse_binary_flag(args
, i
, &reset_stats
, NULL
, "--reset-stats", (char*)NULL
)) {
3834 } else if (ceph_argparse_binary_flag(args
, i
, &include_all
, NULL
, "--include-all", (char*)NULL
)) {
3836 } else if (ceph_argparse_binary_flag(args
, i
, &allow_unordered
, NULL
, "--allow-unordered", (char*)NULL
)) {
3838 } else if (ceph_argparse_binary_flag(args
, i
, &extra_info
, NULL
, "--extra-info", (char*)NULL
)) {
3840 } else if (ceph_argparse_binary_flag(args
, i
, &bypass_gc
, NULL
, "--bypass-gc", (char*)NULL
)) {
3842 } else if (ceph_argparse_binary_flag(args
, i
, &warnings_only
, NULL
, "--warnings-only", (char*)NULL
)) {
3844 } else if (ceph_argparse_binary_flag(args
, i
, &inconsistent_index
, NULL
, "--inconsistent-index", (char*)NULL
)) {
3846 } else if (ceph_argparse_witharg(args
, i
, &val
, "--caps", (char*)NULL
)) {
3848 } else if (ceph_argparse_witharg(args
, i
, &val
, "--infile", (char*)NULL
)) {
3850 } else if (ceph_argparse_witharg(args
, i
, &val
, "--metadata-key", (char*)NULL
)) {
3852 } else if (ceph_argparse_witharg(args
, i
, &val
, "--marker", (char*)NULL
)) {
3854 } else if (ceph_argparse_witharg(args
, i
, &val
, "--start-marker", (char*)NULL
)) {
3856 } else if (ceph_argparse_witharg(args
, i
, &val
, "--end-marker", (char*)NULL
)) {
3858 } else if (ceph_argparse_witharg(args
, i
, &val
, "--quota-scope", (char*)NULL
)) {
3860 } else if (ceph_argparse_witharg(args
, i
, &val
, "--ratelimit-scope", (char*)NULL
)) {
3861 ratelimit_scope
= val
;
3862 } else if (ceph_argparse_witharg(args
, i
, &val
, "--index-type", (char*)NULL
)) {
3863 string index_type_str
= val
;
3864 bi_index_type
= get_bi_index_type(index_type_str
);
3865 if (bi_index_type
== BIIndexType::Invalid
) {
3866 cerr
<< "ERROR: invalid bucket index entry type" << std::endl
;
3869 } else if (ceph_argparse_witharg(args
, i
, &val
, "--log-type", (char*)NULL
)) {
3870 string log_type_str
= val
;
3871 auto l
= get_log_type(log_type_str
);
3872 if (l
== static_cast<log_type
>(0xff)) {
3873 cerr
<< "ERROR: invalid log type" << std::endl
;
3877 } else if (ceph_argparse_binary_flag(args
, i
, &is_master_int
, NULL
, "--master", (char*)NULL
)) {
3878 is_master
= (bool)is_master_int
;
3879 is_master_set
= true;
3880 } else if (ceph_argparse_binary_flag(args
, i
, &set_default
, NULL
, "--default", (char*)NULL
)) {
3882 } else if (ceph_argparse_witharg(args
, i
, &val
, "--redirect-zone", (char*)NULL
)) {
3883 redirect_zone
= val
;
3884 redirect_zone_set
= true;
3885 } else if (ceph_argparse_binary_flag(args
, i
, &read_only_int
, NULL
, "--read-only", (char*)NULL
)) {
3886 read_only
= (bool)read_only_int
;
3887 is_read_only_set
= true;
3888 } else if (ceph_argparse_witharg(args
, i
, &val
, "--master-zone", (char*)NULL
)) {
3890 } else if (ceph_argparse_witharg(args
, i
, &val
, "--period", (char*)NULL
)) {
3892 } else if (ceph_argparse_witharg(args
, i
, &val
, "--epoch", (char*)NULL
)) {
3894 } else if (ceph_argparse_witharg(args
, i
, &val
, "--remote", (char*)NULL
)) {
3896 } else if (ceph_argparse_witharg(args
, i
, &val
, "--url", (char*)NULL
)) {
3898 } else if (ceph_argparse_witharg(args
, i
, &val
, "--region", (char*)NULL
)) {
3900 } else if (ceph_argparse_witharg(args
, i
, &val
, "--realm-id", (char*)NULL
)) {
3903 g_conf().set_val("rgw_realm_id", val
);
3904 } else if (ceph_argparse_witharg(args
, i
, &val
, "--realm-new-name", (char*)NULL
)) {
3905 realm_new_name
= val
;
3906 } else if (ceph_argparse_witharg(args
, i
, &val
, "--zonegroup-id", (char*)NULL
)) {
3908 opt_zonegroup_id
= val
;
3909 g_conf().set_val("rgw_zonegroup_id", val
);
3910 } else if (ceph_argparse_witharg(args
, i
, &val
, "--zonegroup-new-name", (char*)NULL
)) {
3911 zonegroup_new_name
= val
;
3912 } else if (ceph_argparse_witharg(args
, i
, &val
, "--placement-id", (char*)NULL
)) {
3914 } else if (ceph_argparse_witharg(args
, i
, &val
, "--storage-class", (char*)NULL
)) {
3915 opt_storage_class
= val
;
3916 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tags", (char*)NULL
)) {
3917 get_str_list(val
, ",", tags
);
3918 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tags-add", (char*)NULL
)) {
3919 get_str_list(val
, ",", tags_add
);
3920 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tags-rm", (char*)NULL
)) {
3921 get_str_list(val
, ",", tags_rm
);
3922 } else if (ceph_argparse_witharg(args
, i
, &val
, "--api-name", (char*)NULL
)) {
3924 } else if (ceph_argparse_witharg(args
, i
, &val
, "--zone-id", (char*)NULL
)) {
3927 g_conf().set_val("rgw_zone_id", val
);
3928 } else if (ceph_argparse_witharg(args
, i
, &val
, "--zone-new-name", (char*)NULL
)) {
3929 zone_new_name
= val
;
3930 } else if (ceph_argparse_witharg(args
, i
, &val
, "--endpoints", (char*)NULL
)) {
3931 get_str_list(val
, endpoints
);
3932 } else if (ceph_argparse_witharg(args
, i
, &val
, "--sync-from", (char*)NULL
)) {
3933 get_str_list(val
, sync_from
);
3934 } else if (ceph_argparse_witharg(args
, i
, &val
, "--sync-from-rm", (char*)NULL
)) {
3935 get_str_list(val
, sync_from_rm
);
3936 } else if (ceph_argparse_binary_flag(args
, i
, &tmp_int
, NULL
, "--sync-from-all", (char*)NULL
)) {
3937 sync_from_all
= (bool)tmp_int
;
3938 sync_from_all_specified
= true;
3939 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-zone", (char*)NULL
)) {
3940 source_zone_name
= val
;
3941 opt_source_zone_name
= val
;
3942 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-zone-id", (char*)NULL
)) {
3943 opt_source_zone_id
= val
;
3944 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-zone", (char*)NULL
)) {
3945 opt_dest_zone_name
= val
;
3946 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-zone-id", (char*)NULL
)) {
3947 opt_dest_zone_id
= val
;
3948 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tier-type", (char*)NULL
)) {
3950 tier_type_specified
= true;
3951 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tier-config", (char*)NULL
)) {
3952 parse_tier_config_param(val
, tier_config_add
);
3953 } else if (ceph_argparse_witharg(args
, i
, &val
, "--tier-config-rm", (char*)NULL
)) {
3954 parse_tier_config_param(val
, tier_config_rm
);
3955 } else if (ceph_argparse_witharg(args
, i
, &val
, "--index-pool", (char*)NULL
)) {
3957 } else if (ceph_argparse_witharg(args
, i
, &val
, "--data-pool", (char*)NULL
)) {
3959 } else if (ceph_argparse_witharg(args
, i
, &val
, "--data-extra-pool", (char*)NULL
)) {
3960 data_extra_pool
= val
;
3961 } else if (ceph_argparse_witharg(args
, i
, &val
, "--placement-index-type", (char*)NULL
)) {
3962 if (val
== "normal") {
3963 placement_index_type
= rgw::BucketIndexType::Normal
;
3964 } else if (val
== "indexless") {
3965 placement_index_type
= rgw::BucketIndexType::Indexless
;
3967 placement_index_type
= (rgw::BucketIndexType
)strict_strtol(val
.c_str(), 10, &err
);
3969 cerr
<< "ERROR: failed to parse index type index: " << err
<< std::endl
;
3973 index_type_specified
= true;
3974 } else if (ceph_argparse_witharg(args
, i
, &val
, "--compression", (char*)NULL
)) {
3975 compression_type
= val
;
3976 } else if (ceph_argparse_witharg(args
, i
, &val
, "--role-name", (char*)NULL
)) {
3978 } else if (ceph_argparse_witharg(args
, i
, &val
, "--path", (char*)NULL
)) {
3980 } else if (ceph_argparse_witharg(args
, i
, &val
, "--assume-role-policy-doc", (char*)NULL
)) {
3981 assume_role_doc
= val
;
3982 } else if (ceph_argparse_witharg(args
, i
, &val
, "--policy-name", (char*)NULL
)) {
3984 } else if (ceph_argparse_witharg(args
, i
, &val
, "--policy-doc", (char*)NULL
)) {
3985 perm_policy_doc
= val
;
3986 } else if (ceph_argparse_witharg(args
, i
, &val
, "--path-prefix", (char*)NULL
)) {
3988 } else if (ceph_argparse_witharg(args
, i
, &val
, "--totp-serial", (char*)NULL
)) {
3990 } else if (ceph_argparse_witharg(args
, i
, &val
, "--totp-pin", (char*)NULL
)) {
3991 totp_pin
.push_back(val
);
3992 } else if (ceph_argparse_witharg(args
, i
, &val
, "--totp-seed", (char*)NULL
)) {
3994 } else if (ceph_argparse_witharg(args
, i
, &val
, "--totp-seed-type", (char*)NULL
)) {
3995 totp_seed_type
= val
;
3996 } else if (ceph_argparse_witharg(args
, i
, &val
, "--totp-seconds", (char*)NULL
)) {
3997 totp_seconds
= atoi(val
.c_str());
3998 } else if (ceph_argparse_witharg(args
, i
, &val
, "--totp-window", (char*)NULL
)) {
3999 totp_window
= atoi(val
.c_str());
4000 } else if (ceph_argparse_witharg(args
, i
, &val
, "--trim-delay-ms", (char*)NULL
)) {
4001 trim_delay_ms
= atoi(val
.c_str());
4002 } else if (ceph_argparse_witharg(args
, i
, &val
, "--topic", (char*)NULL
)) {
4004 } else if (ceph_argparse_witharg(args
, i
, &val
, "--subscription", (char*)NULL
)) {
4006 } else if (ceph_argparse_witharg(args
, i
, &val
, "--event-id", (char*)NULL
)) {
4008 } else if (ceph_argparse_witharg(args
, i
, &val
, "--group-id", (char*)NULL
)) {
4010 } else if (ceph_argparse_witharg(args
, i
, &val
, "--status", (char*)NULL
)) {
4012 } else if (ceph_argparse_witharg(args
, i
, &val
, "--flow-type", (char*)NULL
)) {
4013 opt_flow_type
= val
;
4014 } else if (ceph_argparse_witharg(args
, i
, &val
, "--zones", "--zone-names", (char*)NULL
)) {
4016 get_str_vec(val
, v
);
4017 opt_zone_names
= std::move(v
);
4018 } else if (ceph_argparse_witharg(args
, i
, &val
, "--zone-ids", (char*)NULL
)) {
4019 opt_zone_ids
= zone_ids_from_str(val
);
4020 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-zones", "--source-zone-names", (char*)NULL
)) {
4022 get_str_vec(val
, v
);
4023 opt_source_zone_names
= std::move(v
);
4024 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-zone-ids", (char*)NULL
)) {
4025 opt_source_zone_ids
= zone_ids_from_str(val
);
4026 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-zones", "--dest-zone-names", (char*)NULL
)) {
4028 get_str_vec(val
, v
);
4029 opt_dest_zone_names
= std::move(v
);
4030 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-zone-ids", (char*)NULL
)) {
4031 opt_dest_zone_ids
= zone_ids_from_str(val
);
4032 } else if (ceph_argparse_witharg(args
, i
, &val
, "--flow-id", (char*)NULL
)) {
4034 } else if (ceph_argparse_witharg(args
, i
, &val
, "--pipe-id", (char*)NULL
)) {
4036 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-tenant", (char*)NULL
)) {
4037 opt_source_tenant
= val
;
4038 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-bucket", (char*)NULL
)) {
4039 opt_source_bucket_name
= val
;
4040 } else if (ceph_argparse_witharg(args
, i
, &val
, "--source-bucket-id", (char*)NULL
)) {
4041 opt_source_bucket_id
= val
;
4042 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-tenant", (char*)NULL
)) {
4043 opt_dest_tenant
= val
;
4044 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-bucket", (char*)NULL
)) {
4045 opt_dest_bucket_name
= val
;
4046 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-bucket-id", (char*)NULL
)) {
4047 opt_dest_bucket_id
= val
;
4048 } else if (ceph_argparse_witharg(args
, i
, &val
, "--effective-zone-name", "--effective-zone", (char*)NULL
)) {
4049 opt_effective_zone_name
= val
;
4050 } else if (ceph_argparse_witharg(args
, i
, &val
, "--effective-zone-id", (char*)NULL
)) {
4051 opt_effective_zone_id
= rgw_zone_id(val
);
4052 } else if (ceph_argparse_witharg(args
, i
, &val
, "--prefix", (char*)NULL
)) {
4054 } else if (ceph_argparse_witharg(args
, i
, &val
, "--prefix-rm", (char*)NULL
)) {
4055 opt_prefix_rm
= val
;
4056 } else if (ceph_argparse_witharg(args
, i
, &val
, "--priority", (char*)NULL
)) {
4057 opt_priority
= atoi(val
.c_str());
4058 } else if (ceph_argparse_witharg(args
, i
, &val
, "--mode", (char*)NULL
)) {
4060 } else if (ceph_argparse_witharg(args
, i
, &val
, "--dest-owner", (char*)NULL
)) {
4061 opt_dest_owner
.emplace(val
);
4062 opt_dest_owner
= val
;
4063 } else if (ceph_argparse_witharg(args
, i
, &val
, "--retry-delay-ms", (char*)NULL
)) {
4064 opt_retry_delay_ms
= std::chrono::milliseconds(atoi(val
.c_str()));
4065 } else if (ceph_argparse_witharg(args
, i
, &val
, "--timeout-sec", (char*)NULL
)) {
4066 opt_timeout_sec
= std::chrono::seconds(atoi(val
.c_str()));
4067 } else if (ceph_argparse_binary_flag(args
, i
, &detail
, NULL
, "--detail", (char*)NULL
)) {
4069 } else if (ceph_argparse_witharg(args
, i
, &val
, "--context", (char*)NULL
)) {
4070 str_script_ctx
= val
;
4071 } else if (ceph_argparse_witharg(args
, i
, &val
, "--package", (char*)NULL
)) {
4072 script_package
= val
;
4073 } else if (ceph_argparse_binary_flag(args
, i
, &allow_compilation
, NULL
, "--allow-compilation", (char*)NULL
)) {
4075 } else if (ceph_argparse_witharg(args
, i
, &val
, "--rgw-obj-fs", (char*)NULL
)) {
4077 } else if (strncmp(*i
, "-", 1) == 0) {
4078 cerr
<< "ERROR: invalid flag " << *i
<< std::endl
;
4085 /* common_init_finish needs to be called after g_conf().set_val() */
4086 common_init_finish(g_ceph_context
);
4093 std::vector
<string
> extra_args
;
4094 std::vector
<string
> expected
;
4098 if (!cmd
.find_command(args
, &_opt_cmd
, &extra_args
, &err
, &expected
)) {
4099 if (!expected
.empty()) {
4100 cerr
<< err
<< std::endl
;
4101 cerr
<< "Expected one of the following:" << std::endl
;
4102 for (auto& exp
: expected
) {
4103 if (exp
== "*" || exp
== "[*]") {
4106 cerr
<< " " << exp
<< std::endl
;
4109 cerr
<< "Command not found:";
4110 for (auto& arg
: args
) {
4118 opt_cmd
= std::any_cast
<OPT
>(_opt_cmd
);
4120 /* some commands may have an optional extra param */
4121 if (!extra_args
.empty()) {
4123 case OPT::METADATA_GET
:
4124 case OPT::METADATA_PUT
:
4125 case OPT::METADATA_RM
:
4126 case OPT::METADATA_LIST
:
4127 metadata_key
= extra_args
[0];
4134 // not a raw op if 'period update' needs to commit to master
4135 bool raw_period_update
= opt_cmd
== OPT::PERIOD_UPDATE
&& !commit
;
4136 // not a raw op if 'period pull' needs to read zone/period configuration
4137 bool raw_period_pull
= opt_cmd
== OPT::PERIOD_PULL
&& !url
.empty();
4139 std::set
<OPT
> raw_storage_ops_list
= {OPT::ZONEGROUP_ADD
, OPT::ZONEGROUP_CREATE
,
4140 OPT::ZONEGROUP_DELETE
,
4141 OPT::ZONEGROUP_GET
, OPT::ZONEGROUP_LIST
,
4142 OPT::ZONEGROUP_SET
, OPT::ZONEGROUP_DEFAULT
,
4143 OPT::ZONEGROUP_RENAME
, OPT::ZONEGROUP_MODIFY
,
4144 OPT::ZONEGROUP_REMOVE
,
4145 OPT::ZONEGROUP_PLACEMENT_ADD
, OPT::ZONEGROUP_PLACEMENT_RM
,
4146 OPT::ZONEGROUP_PLACEMENT_MODIFY
, OPT::ZONEGROUP_PLACEMENT_LIST
,
4147 OPT::ZONEGROUP_PLACEMENT_GET
,
4148 OPT::ZONEGROUP_PLACEMENT_DEFAULT
,
4149 OPT::ZONE_CREATE
, OPT::ZONE_DELETE
,
4150 OPT::ZONE_GET
, OPT::ZONE_SET
, OPT::ZONE_RENAME
,
4151 OPT::ZONE_LIST
, OPT::ZONE_MODIFY
, OPT::ZONE_DEFAULT
,
4152 OPT::ZONE_PLACEMENT_ADD
, OPT::ZONE_PLACEMENT_RM
,
4153 OPT::ZONE_PLACEMENT_MODIFY
, OPT::ZONE_PLACEMENT_LIST
,
4154 OPT::ZONE_PLACEMENT_GET
,
4156 OPT::PERIOD_DELETE
, OPT::PERIOD_GET
,
4157 OPT::PERIOD_GET_CURRENT
, OPT::PERIOD_LIST
,
4158 OPT::GLOBAL_QUOTA_GET
, OPT::GLOBAL_QUOTA_SET
,
4159 OPT::GLOBAL_QUOTA_ENABLE
, OPT::GLOBAL_QUOTA_DISABLE
,
4160 OPT::GLOBAL_RATELIMIT_GET
, OPT::GLOBAL_RATELIMIT_SET
,
4161 OPT::GLOBAL_RATELIMIT_ENABLE
, OPT::GLOBAL_RATELIMIT_DISABLE
,
4162 OPT::REALM_DELETE
, OPT::REALM_GET
, OPT::REALM_LIST
,
4163 OPT::REALM_LIST_PERIODS
,
4164 OPT::REALM_GET_DEFAULT
,
4165 OPT::REALM_RENAME
, OPT::REALM_SET
,
4166 OPT::REALM_DEFAULT
, OPT::REALM_PULL
};
4168 std::set
<OPT
> readonly_ops_list
= {
4172 OPT::BUCKET_LIMIT_CHECK
,
4174 OPT::BUCKET_SYNC_CHECKPOINT
,
4175 OPT::BUCKET_SYNC_INFO
,
4176 OPT::BUCKET_SYNC_STATUS
,
4177 OPT::BUCKET_SYNC_MARKERS
,
4188 OPT::ORPHANS_LIST_JOBS
,
4190 OPT::ZONEGROUP_LIST
,
4191 OPT::ZONEGROUP_PLACEMENT_LIST
,
4192 OPT::ZONEGROUP_PLACEMENT_GET
,
4195 OPT::ZONE_PLACEMENT_LIST
,
4196 OPT::ZONE_PLACEMENT_GET
,
4199 OPT::METADATA_SYNC_STATUS
,
4202 OPT::SYNC_ERROR_LIST
,
4203 OPT::SYNC_GROUP_GET
,
4204 OPT::SYNC_POLICY_GET
,
4207 OPT::DATA_SYNC_STATUS
,
4209 OPT::DATALOG_STATUS
,
4211 OPT::REALM_GET_DEFAULT
,
4213 OPT::REALM_LIST_PERIODS
,
4215 OPT::PERIOD_GET_CURRENT
,
4217 OPT::GLOBAL_QUOTA_GET
,
4218 OPT::GLOBAL_RATELIMIT_GET
,
4223 OPT::ROLE_POLICY_LIST
,
4224 OPT::ROLE_POLICY_GET
,
4226 OPT::RESHARD_STATUS
,
4227 OPT::PUBSUB_TOPICS_LIST
,
4228 OPT::PUBSUB_TOPIC_GET
,
4229 OPT::PUBSUB_SUB_GET
,
4230 OPT::PUBSUB_SUB_PULL
,
4234 std::set
<OPT
> gc_ops_list
= {
4238 OPT::BUCKET_RM
, // --purge-objects
4239 OPT::USER_RM
, // --purge-data
4240 OPT::OBJECTS_EXPIRE
,
4241 OPT::OBJECTS_EXPIRE_STALE_RM
,
4243 OPT::BUCKET_SYNC_RUN
,
4247 raw_storage_op
= (raw_storage_ops_list
.find(opt_cmd
) != raw_storage_ops_list
.end() ||
4248 raw_period_update
|| raw_period_pull
);
4249 bool need_cache
= readonly_ops_list
.find(opt_cmd
) == readonly_ops_list
.end();
4250 bool need_gc
= (gc_ops_list
.find(opt_cmd
) != gc_ops_list
.end()) && !bypass_gc
;
4252 if (raw_storage_op
) {
4253 store
= StoreManager::get_raw_storage(dpp(), g_ceph_context
, "rados");
4255 store
= StoreManager::get_storage(dpp(), g_ceph_context
, "rados", false, false, false,
4257 need_cache
&& g_conf()->rgw_cache_enabled
, need_gc
);
4260 cerr
<< "couldn't init storage provider" << std::endl
;
4264 /* Needs to be after the store is initialized. Note, user could be empty here. */
4265 user
= store
->get_user(user_id_arg
);
4267 init_optional_bucket(opt_bucket
, opt_tenant
,
4268 opt_bucket_name
, opt_bucket_id
);
4269 init_optional_bucket(opt_source_bucket
, opt_source_tenant
,
4270 opt_source_bucket_name
, opt_source_bucket_id
);
4271 init_optional_bucket(opt_dest_bucket
, opt_dest_tenant
,
4272 opt_dest_bucket_name
, opt_dest_bucket_id
);
4274 if (tenant
.empty()) {
4275 tenant
= user
->get_tenant();
4277 if (rgw::sal::User::empty(user
) && opt_cmd
!= OPT::ROLE_CREATE
4278 && opt_cmd
!= OPT::ROLE_DELETE
4279 && opt_cmd
!= OPT::ROLE_GET
4280 && opt_cmd
!= OPT::ROLE_MODIFY
4281 && opt_cmd
!= OPT::ROLE_LIST
4282 && opt_cmd
!= OPT::ROLE_POLICY_PUT
4283 && opt_cmd
!= OPT::ROLE_POLICY_LIST
4284 && opt_cmd
!= OPT::ROLE_POLICY_GET
4285 && opt_cmd
!= OPT::ROLE_POLICY_DELETE
4286 && opt_cmd
!= OPT::RESHARD_ADD
4287 && opt_cmd
!= OPT::RESHARD_CANCEL
4288 && opt_cmd
!= OPT::RESHARD_STATUS
) {
4289 cerr
<< "ERROR: --tenant is set, but there's no user ID" << std::endl
;
4292 user
->set_tenant(tenant
);
4294 if (user_ns
.empty()) {
4295 user_ns
= user
->get_id().ns
;
4297 user
->set_ns(user_ns
);
4300 if (!new_user_id
.empty() && !tenant
.empty()) {
4301 new_user_id
.tenant
= tenant
;
4304 /* check key parameter conflict */
4305 if ((!access_key
.empty()) && gen_access_key
) {
4306 cerr
<< "ERROR: key parameter conflict, --access-key & --gen-access-key" << std::endl
;
4309 if ((!secret_key
.empty()) && gen_secret_key
) {
4310 cerr
<< "ERROR: key parameter conflict, --secret & --gen-secret" << std::endl
;
4315 // default to pretty json
4316 if (format
.empty()) {
4318 pretty_format
= true;
4321 if (format
== "xml")
4322 formatter
= make_unique
<XMLFormatter
>(new XMLFormatter(pretty_format
));
4323 else if (format
== "json")
4324 formatter
= make_unique
<JSONFormatter
>(new JSONFormatter(pretty_format
));
4326 cerr
<< "unrecognized format: " << format
<< std::endl
;
4330 zone_formatter
= std::make_unique
<JSONFormatter_PrettyZone
>(pretty_format
);
4332 realm_name
= g_conf()->rgw_realm
;
4333 zone_name
= g_conf()->rgw_zone
;
4334 zonegroup_name
= g_conf()->rgw_zonegroup
;
4336 if (!realm_name
.empty()) {
4337 opt_realm_name
= realm_name
;
4340 if (!zone_name
.empty()) {
4341 opt_zone_name
= zone_name
;
4344 if (!zonegroup_name
.empty()) {
4345 opt_zonegroup_name
= zonegroup_name
;
4348 RGWStreamFlusher
stream_flusher(formatter
.get(), cout
);
4350 RGWUserAdminOpState
user_op(store
);
4351 if (!user_email
.empty()) {
4352 user_op
.user_email_specified
=true;
4355 if (!source_zone_name
.empty()) {
4356 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->find_zone_id_by_name(source_zone_name
, &source_zone
)) {
4357 cerr
<< "WARNING: cannot find source zone id for name=" << source_zone_name
<< std::endl
;
4358 source_zone
= source_zone_name
;
4362 rgw_http_client_init(g_ceph_context
);
4364 struct rgw_curl_setup
{
4366 rgw::curl::setup_curl(boost::none
);
4369 rgw::curl::cleanup_curl();
4375 StoreDestructor
store_destructor(static_cast<rgw::sal::RadosStore
*>(store
));
4377 if (raw_storage_op
) {
4378 try_to_resolve_local_entities(realm_id
, realm_name
,
4379 zonegroup_id
, zonegroup_name
,
4380 zone_id
, zone_name
);
4384 case OPT::PERIOD_DELETE
:
4386 if (period_id
.empty()) {
4387 cerr
<< "missing period id" << std::endl
;
4390 RGWPeriod
period(period_id
);
4391 int ret
= period
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4393 cerr
<< "period.init failed: " << cpp_strerror(-ret
) << std::endl
;
4396 ret
= period
.delete_obj(dpp(), null_yield
);
4398 cerr
<< "ERROR: couldn't delete period: " << cpp_strerror(-ret
) << std::endl
;
4404 case OPT::PERIOD_GET
:
4407 if (!period_epoch
.empty()) {
4408 epoch
= atoi(period_epoch
.c_str());
4411 RGWRealm
realm(realm_id
, realm_name
);
4412 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4414 cerr
<< "Error initializing realm " << cpp_strerror(-ret
) << std::endl
;
4417 realm_id
= realm
.get_id();
4418 realm_name
= realm
.get_name();
4419 period_id
= RGWPeriod::get_staging_id(realm_id
);
4422 RGWPeriod
period(period_id
, epoch
);
4423 int ret
= period
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm_id
,
4424 null_yield
, realm_name
);
4426 cerr
<< "period init failed: " << cpp_strerror(-ret
) << std::endl
;
4429 encode_json("period", period
, formatter
.get());
4430 formatter
->flush(cout
);
4433 case OPT::PERIOD_GET_CURRENT
:
4435 int ret
= read_current_period_id(static_cast<rgw::sal::RadosStore
*>(store
), realm_id
, realm_name
, &period_id
);
4439 formatter
->open_object_section("period_get_current");
4440 encode_json("current_period", period_id
, formatter
.get());
4441 formatter
->close_section();
4442 formatter
->flush(cout
);
4445 case OPT::PERIOD_LIST
:
4447 list
<string
> periods
;
4448 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_periods(dpp(), periods
);
4450 cerr
<< "failed to list periods: " << cpp_strerror(-ret
) << std::endl
;
4453 formatter
->open_object_section("periods_list");
4454 encode_json("periods", periods
, formatter
.get());
4455 formatter
->close_section();
4456 formatter
->flush(cout
);
4459 case OPT::PERIOD_UPDATE
:
4461 int ret
= update_period(realm_id
, realm_name
, period_id
, period_epoch
,
4462 commit
, remote
, url
, opt_region
,
4463 access_key
, secret_key
,
4464 formatter
.get(), yes_i_really_mean_it
);
4470 case OPT::PERIOD_PULL
:
4472 boost::optional
<RGWRESTConn
> conn
;
4473 RGWRESTConn
*remote_conn
= nullptr;
4475 // load current period for endpoints
4476 RGWRealm
realm(realm_id
, realm_name
);
4477 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4479 cerr
<< "failed to init realm: " << cpp_strerror(-ret
) << std::endl
;
4482 RGWPeriod
current_period(realm
.get_current_period());
4483 ret
= current_period
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4485 cerr
<< "failed to init current period: " << cpp_strerror(-ret
) << std::endl
;
4488 if (remote
.empty()) {
4489 // use realm master zone as remote
4490 remote
= current_period
.get_master_zone().id
;
4492 conn
= get_remote_conn(static_cast<rgw::sal::RadosStore
*>(store
), current_period
.get_map(), remote
);
4494 cerr
<< "failed to find a zone or zonegroup for remote "
4495 << remote
<< std::endl
;
4498 remote_conn
= &*conn
;
4502 int ret
= do_period_pull(remote_conn
, url
, opt_region
,
4503 access_key
, secret_key
,
4504 realm_id
, realm_name
, period_id
, period_epoch
,
4507 cerr
<< "period pull failed: " << cpp_strerror(-ret
) << std::endl
;
4511 encode_json("period", period
, formatter
.get());
4512 formatter
->flush(cout
);
4515 case OPT::GLOBAL_RATELIMIT_GET
:
4516 case OPT::GLOBAL_RATELIMIT_SET
:
4517 case OPT::GLOBAL_RATELIMIT_ENABLE
:
4518 case OPT::GLOBAL_RATELIMIT_DISABLE
:
4520 if (realm_id
.empty()) {
4521 RGWRealm
realm(g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
);
4522 if (!realm_name
.empty()) {
4523 // look up realm_id for the given realm_name
4524 int ret
= realm
.read_id(dpp(), realm_name
, realm_id
, null_yield
);
4526 cerr
<< "ERROR: failed to read realm for " << realm_name
4527 << ": " << cpp_strerror(-ret
) << std::endl
;
4531 // use default realm_id when none is given
4532 int ret
= realm
.read_default_id(dpp(), realm_id
, null_yield
);
4533 if (ret
< 0 && ret
!= -ENOENT
) { // on ENOENT, use empty realm_id
4534 cerr
<< "ERROR: failed to read default realm: "
4535 << cpp_strerror(-ret
) << std::endl
;
4541 RGWPeriodConfig period_config
;
4542 int ret
= period_config
.read(dpp(), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm_id
, null_yield
);
4543 if (ret
< 0 && ret
!= -ENOENT
) {
4544 cerr
<< "ERROR: failed to read period config: "
4545 << cpp_strerror(-ret
) << std::endl
;
4548 bool ratelimit_configured
= true;
4549 formatter
->open_object_section("period_config");
4550 if (ratelimit_scope
== "bucket") {
4551 ratelimit_configured
= set_ratelimit_info(period_config
.bucket_ratelimit
, opt_cmd
,
4552 max_read_ops
, max_write_ops
,
4553 max_read_bytes
, max_write_bytes
,
4554 have_max_read_ops
, have_max_write_ops
,
4555 have_max_read_bytes
, have_max_write_bytes
);
4556 encode_json("bucket_ratelimit", period_config
.bucket_ratelimit
, formatter
.get());
4557 } else if (ratelimit_scope
== "user") {
4558 ratelimit_configured
= set_ratelimit_info(period_config
.user_ratelimit
, opt_cmd
,
4559 max_read_ops
, max_write_ops
,
4560 max_read_bytes
, max_write_bytes
,
4561 have_max_read_ops
, have_max_write_ops
,
4562 have_max_read_bytes
, have_max_write_bytes
);
4563 encode_json("user_ratelimit", period_config
.user_ratelimit
, formatter
.get());
4564 } else if (ratelimit_scope
== "anonymous") {
4565 ratelimit_configured
= set_ratelimit_info(period_config
.anon_ratelimit
, opt_cmd
,
4566 max_read_ops
, max_write_ops
,
4567 max_read_bytes
, max_write_bytes
,
4568 have_max_read_ops
, have_max_write_ops
,
4569 have_max_read_bytes
, have_max_write_bytes
);
4570 encode_json("anonymous_ratelimit", period_config
.anon_ratelimit
, formatter
.get());
4571 } else if (ratelimit_scope
.empty() && opt_cmd
== OPT::GLOBAL_RATELIMIT_GET
) {
4572 // if no scope is given for GET, print both
4573 encode_json("bucket_ratelimit", period_config
.bucket_ratelimit
, formatter
.get());
4574 encode_json("user_ratelimit", period_config
.user_ratelimit
, formatter
.get());
4575 encode_json("anonymous_ratelimit", period_config
.anon_ratelimit
, formatter
.get());
4577 cerr
<< "ERROR: invalid rate limit scope specification. Please specify "
4578 "either --ratelimit-scope=bucket, or --ratelimit-scope=user or --ratelimit-scope=anonymous" << std::endl
;
4581 if (!ratelimit_configured
) {
4582 cerr
<< "ERROR: no rate limit values have been specified" << std::endl
;
4586 formatter
->close_section();
4588 if (opt_cmd
!= OPT::GLOBAL_RATELIMIT_GET
) {
4589 // write the modified period config
4590 ret
= period_config
.write(dpp(), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm_id
, null_yield
);
4592 cerr
<< "ERROR: failed to write period config: "
4593 << cpp_strerror(-ret
) << std::endl
;
4596 if (!realm_id
.empty()) {
4597 cout
<< "Global ratelimit changes saved. Use 'period update' to apply "
4598 "them to the staging period, and 'period commit' to commit the "
4599 "new period." << std::endl
;
4601 cout
<< "Global ratelimit changes saved. They will take effect as "
4602 "the gateways are restarted." << std::endl
;
4606 formatter
->flush(cout
);
4609 case OPT::GLOBAL_QUOTA_GET
:
4610 case OPT::GLOBAL_QUOTA_SET
:
4611 case OPT::GLOBAL_QUOTA_ENABLE
:
4612 case OPT::GLOBAL_QUOTA_DISABLE
:
4614 if (realm_id
.empty()) {
4615 RGWRealm
realm(g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
);
4616 if (!realm_name
.empty()) {
4617 // look up realm_id for the given realm_name
4618 int ret
= realm
.read_id(dpp(), realm_name
, realm_id
, null_yield
);
4620 cerr
<< "ERROR: failed to read realm for " << realm_name
4621 << ": " << cpp_strerror(-ret
) << std::endl
;
4625 // use default realm_id when none is given
4626 int ret
= realm
.read_default_id(dpp(), realm_id
, null_yield
);
4627 if (ret
< 0 && ret
!= -ENOENT
) { // on ENOENT, use empty realm_id
4628 cerr
<< "ERROR: failed to read default realm: "
4629 << cpp_strerror(-ret
) << std::endl
;
4635 RGWPeriodConfig period_config
;
4636 int ret
= period_config
.read(dpp(), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm_id
, null_yield
);
4637 if (ret
< 0 && ret
!= -ENOENT
) {
4638 cerr
<< "ERROR: failed to read period config: "
4639 << cpp_strerror(-ret
) << std::endl
;
4643 formatter
->open_object_section("period_config");
4644 if (quota_scope
== "bucket") {
4645 set_quota_info(period_config
.bucket_quota
, opt_cmd
,
4646 max_size
, max_objects
,
4647 have_max_size
, have_max_objects
);
4648 encode_json("bucket quota", period_config
.bucket_quota
, formatter
.get());
4649 } else if (quota_scope
== "user") {
4650 set_quota_info(period_config
.user_quota
, opt_cmd
,
4651 max_size
, max_objects
,
4652 have_max_size
, have_max_objects
);
4653 encode_json("user quota", period_config
.user_quota
, formatter
.get());
4654 } else if (quota_scope
.empty() && opt_cmd
== OPT::GLOBAL_QUOTA_GET
) {
4655 // if no scope is given for GET, print both
4656 encode_json("bucket quota", period_config
.bucket_quota
, formatter
.get());
4657 encode_json("user quota", period_config
.user_quota
, formatter
.get());
4659 cerr
<< "ERROR: invalid quota scope specification. Please specify "
4660 "either --quota-scope=bucket, or --quota-scope=user" << std::endl
;
4663 formatter
->close_section();
4665 if (opt_cmd
!= OPT::GLOBAL_QUOTA_GET
) {
4666 // write the modified period config
4667 ret
= period_config
.write(dpp(), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm_id
, null_yield
);
4669 cerr
<< "ERROR: failed to write period config: "
4670 << cpp_strerror(-ret
) << std::endl
;
4673 if (!realm_id
.empty()) {
4674 cout
<< "Global quota changes saved. Use 'period update' to apply "
4675 "them to the staging period, and 'period commit' to commit the "
4676 "new period." << std::endl
;
4678 cout
<< "Global quota changes saved. They will take effect as "
4679 "the gateways are restarted." << std::endl
;
4683 formatter
->flush(cout
);
4686 case OPT::REALM_CREATE
:
4688 if (realm_name
.empty()) {
4689 cerr
<< "missing realm name" << std::endl
;
4693 RGWRealm
realm(realm_name
, g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
);
4694 int ret
= realm
.create(dpp(), null_yield
);
4696 cerr
<< "ERROR: couldn't create realm " << realm_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
4701 ret
= realm
.set_as_default(dpp(), null_yield
);
4703 cerr
<< "failed to set realm " << realm_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
4707 encode_json("realm", realm
, formatter
.get());
4708 formatter
->flush(cout
);
4711 case OPT::REALM_DELETE
:
4713 if (empty_opt(opt_realm_name
) && empty_opt(opt_realm_id
)) {
4714 cerr
<< "missing realm name or id" << std::endl
;
4717 RGWRealm
realm(safe_opt(opt_realm_id
), safe_opt(opt_realm_name
));
4718 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4720 cerr
<< "realm.init failed: " << cpp_strerror(-ret
) << std::endl
;
4723 ret
= realm
.delete_obj(dpp(), null_yield
);
4725 cerr
<< "ERROR: couldn't : " << cpp_strerror(-ret
) << std::endl
;
4731 case OPT::REALM_GET
:
4733 RGWRealm
realm(realm_id
, realm_name
);
4734 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4736 if (ret
== -ENOENT
&& realm_name
.empty() && realm_id
.empty()) {
4737 cerr
<< "missing realm name or id, or default realm not found" << std::endl
;
4739 cerr
<< "realm.init failed: " << cpp_strerror(-ret
) << std::endl
;
4743 encode_json("realm", realm
, formatter
.get());
4744 formatter
->flush(cout
);
4747 case OPT::REALM_GET_DEFAULT
:
4749 RGWRealm
realm(g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
);
4751 int ret
= realm
.read_default_id(dpp(), default_id
, null_yield
);
4752 if (ret
== -ENOENT
) {
4753 cout
<< "No default realm is set" << std::endl
;
4755 } else if (ret
< 0) {
4756 cerr
<< "Error reading default realm: " << cpp_strerror(-ret
) << std::endl
;
4759 cout
<< "default realm: " << default_id
<< std::endl
;
4762 case OPT::REALM_LIST
:
4764 RGWRealm
realm(g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
);
4766 int ret
= realm
.read_default_id(dpp(), default_id
, null_yield
);
4767 if (ret
< 0 && ret
!= -ENOENT
) {
4768 cerr
<< "could not determine default realm: " << cpp_strerror(-ret
) << std::endl
;
4770 list
<string
> realms
;
4771 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_realms(dpp(), realms
);
4773 cerr
<< "failed to list realms: " << cpp_strerror(-ret
) << std::endl
;
4776 formatter
->open_object_section("realms_list");
4777 encode_json("default_info", default_id
, formatter
.get());
4778 encode_json("realms", realms
, formatter
.get());
4779 formatter
->close_section();
4780 formatter
->flush(cout
);
4783 case OPT::REALM_LIST_PERIODS
:
4785 int ret
= read_current_period_id(static_cast<rgw::sal::RadosStore
*>(store
), realm_id
, realm_name
, &period_id
);
4789 list
<string
> periods
;
4790 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_periods(dpp(), period_id
, periods
, null_yield
);
4792 cerr
<< "list periods failed: " << cpp_strerror(-ret
) << std::endl
;
4795 formatter
->open_object_section("realm_periods_list");
4796 encode_json("current_period", period_id
, formatter
.get());
4797 encode_json("periods", periods
, formatter
.get());
4798 formatter
->close_section();
4799 formatter
->flush(cout
);
4803 case OPT::REALM_RENAME
:
4805 RGWRealm
realm(realm_id
, realm_name
);
4806 if (realm_new_name
.empty()) {
4807 cerr
<< "missing realm new name" << std::endl
;
4810 if (realm_name
.empty() && realm_id
.empty()) {
4811 cerr
<< "missing realm name or id" << std::endl
;
4814 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4816 cerr
<< "realm.init failed: " << cpp_strerror(-ret
) << std::endl
;
4819 ret
= realm
.rename(dpp(), realm_new_name
, null_yield
);
4821 cerr
<< "realm.rename failed: " << cpp_strerror(-ret
) << std::endl
;
4824 cout
<< "Realm name updated. Note that this change only applies to "
4825 "the current cluster, so this command must be run separately "
4826 "on each of the realm's other clusters." << std::endl
;
4829 case OPT::REALM_SET
:
4831 if (realm_id
.empty() && realm_name
.empty()) {
4832 cerr
<< "no realm name or id provided" << std::endl
;
4835 RGWRealm
realm(realm_id
, realm_name
);
4836 bool new_realm
= false;
4837 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4838 if (ret
< 0 && ret
!= -ENOENT
) {
4839 cerr
<< "failed to init realm: " << cpp_strerror(-ret
) << std::endl
;
4841 } else if (ret
== -ENOENT
) {
4844 ret
= read_decode_json(infile
, realm
);
4848 if (!realm_name
.empty() && realm
.get_name() != realm_name
) {
4849 cerr
<< "mismatch between --rgw-realm " << realm_name
<< " and json input file name " <<
4850 realm
.get_name() << std::endl
;
4855 cout
<< "clearing period and epoch for new realm" << std::endl
;
4856 realm
.clear_current_period_and_epoch();
4857 ret
= realm
.create(dpp(), null_yield
);
4859 cerr
<< "ERROR: couldn't create new realm: " << cpp_strerror(-ret
) << std::endl
;
4863 ret
= realm
.update(dpp(), null_yield
);
4865 cerr
<< "ERROR: couldn't store realm info: " << cpp_strerror(-ret
) << std::endl
;
4871 ret
= realm
.set_as_default(dpp(), null_yield
);
4873 cerr
<< "failed to set realm " << realm_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
4876 encode_json("realm", realm
, formatter
.get());
4877 formatter
->flush(cout
);
4881 case OPT::REALM_DEFAULT
:
4883 RGWRealm
realm(realm_id
, realm_name
);
4884 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4886 cerr
<< "failed to init realm: " << cpp_strerror(-ret
) << std::endl
;
4889 ret
= realm
.set_as_default(dpp(), null_yield
);
4891 cerr
<< "failed to set realm as default: " << cpp_strerror(-ret
) << std::endl
;
4896 case OPT::REALM_PULL
:
4899 cerr
<< "A --url must be provided." << std::endl
;
4903 req_info
info(g_ceph_context
, &env
);
4904 info
.method
= "GET";
4905 info
.request_uri
= "/admin/realm";
4907 map
<string
, string
> ¶ms
= info
.args
.get_params();
4908 if (!realm_id
.empty())
4909 params
["id"] = realm_id
;
4910 if (!realm_name
.empty())
4911 params
["name"] = realm_name
;
4915 int ret
= send_to_url(url
, opt_region
, access_key
, secret_key
, info
, bl
, p
);
4917 cerr
<< "request failed: " << cpp_strerror(-ret
) << std::endl
;
4918 if (ret
== -EACCES
) {
4919 cerr
<< "If the realm has been changed on the master zone, the "
4920 "master zone's gateway may need to be restarted to recognize "
4921 "this user." << std::endl
;
4926 realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
, false);
4928 decode_json_obj(realm
, &p
);
4929 } catch (const JSONDecoder::err
& e
) {
4930 cerr
<< "failed to decode JSON response: " << e
.what() << std::endl
;
4934 auto& current_period
= realm
.get_current_period();
4935 if (!current_period
.empty()) {
4936 // pull the latest epoch of the realm's current period
4937 ret
= do_period_pull(nullptr, url
, opt_region
,
4938 access_key
, secret_key
,
4939 realm_id
, realm_name
, current_period
, "",
4942 cerr
<< "could not fetch period " << current_period
<< std::endl
;
4946 ret
= realm
.create(dpp(), null_yield
, false);
4947 if (ret
< 0 && ret
!= -EEXIST
) {
4948 cerr
<< "Error storing realm " << realm
.get_id() << ": "
4949 << cpp_strerror(ret
) << std::endl
;
4951 } else if (ret
==-EEXIST
) {
4952 ret
= realm
.update(dpp(), null_yield
);
4954 cerr
<< "Error storing realm " << realm
.get_id() << ": "
4955 << cpp_strerror(ret
) << std::endl
;
4960 ret
= realm
.set_as_default(dpp(), null_yield
);
4962 cerr
<< "failed to set realm " << realm_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
4966 encode_json("realm", realm
, formatter
.get());
4967 formatter
->flush(cout
);
4971 case OPT::ZONEGROUP_ADD
:
4973 if (zonegroup_id
.empty() && zonegroup_name
.empty()) {
4974 cerr
<< "no zonegroup name or id provided" << std::endl
;
4978 RGWZoneGroup
zonegroup(zonegroup_id
,zonegroup_name
);
4979 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4981 cerr
<< "failed to initialize zonegroup " << zonegroup_name
<< " id " << zonegroup_id
<< ": "
4982 << cpp_strerror(-ret
) << std::endl
;
4985 RGWZoneParams
zone(zone_id
, zone_name
);
4986 ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
4988 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
4991 if (zone
.realm_id
!= zonegroup
.realm_id
) {
4992 zone
.realm_id
= zonegroup
.realm_id
;
4993 ret
= zone
.update(dpp(), null_yield
);
4995 cerr
<< "failed to save zone info: " << cpp_strerror(-ret
) << std::endl
;
5000 string
*ptier_type
= (tier_type_specified
? &tier_type
: nullptr);
5002 for (auto a
: tier_config_add
) {
5003 int r
= zone
.tier_config
.set(a
.first
, a
.second
);
5005 cerr
<< "ERROR: failed to set configurable: " << a
<< std::endl
;
5010 bool *psync_from_all
= (sync_from_all_specified
? &sync_from_all
: nullptr);
5011 string
*predirect_zone
= (redirect_zone_set
? &redirect_zone
: nullptr);
5013 ret
= zonegroup
.add_zone(dpp(), zone
,
5014 (is_master_set
? &is_master
: NULL
),
5015 (is_read_only_set
? &read_only
: NULL
),
5016 endpoints
, ptier_type
,
5017 psync_from_all
, sync_from
, sync_from_rm
,
5018 predirect_zone
, bucket_index_max_shards
,
5019 static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sync_modules
->get_manager(),
5022 cerr
<< "failed to add zone " << zone_name
<< " to zonegroup " << zonegroup
.get_name() << ": "
5023 << cpp_strerror(-ret
) << std::endl
;
5027 encode_json("zonegroup", zonegroup
, formatter
.get());
5028 formatter
->flush(cout
);
5031 case OPT::ZONEGROUP_CREATE
:
5033 if (zonegroup_name
.empty()) {
5034 cerr
<< "Missing zonegroup name" << std::endl
;
5037 RGWRealm
realm(realm_id
, realm_name
);
5038 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5040 cerr
<< "failed to init realm: " << cpp_strerror(-ret
) << std::endl
;
5044 RGWZoneGroup
zonegroup(zonegroup_name
, is_master
, g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm
.get_id(), endpoints
);
5045 zonegroup
.api_name
= (api_name
.empty() ? zonegroup_name
: api_name
);
5046 ret
= zonegroup
.create(dpp(), null_yield
);
5048 cerr
<< "failed to create zonegroup " << zonegroup_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
5053 ret
= zonegroup
.set_as_default(dpp(), null_yield
);
5055 cerr
<< "failed to set zonegroup " << zonegroup_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
5059 encode_json("zonegroup", zonegroup
, formatter
.get());
5060 formatter
->flush(cout
);
5063 case OPT::ZONEGROUP_DEFAULT
:
5065 if (zonegroup_id
.empty() && zonegroup_name
.empty()) {
5066 cerr
<< "no zonegroup name or id provided" << std::endl
;
5070 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5071 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5073 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5077 ret
= zonegroup
.set_as_default(dpp(), null_yield
);
5079 cerr
<< "failed to set zonegroup as default: " << cpp_strerror(-ret
) << std::endl
;
5084 case OPT::ZONEGROUP_DELETE
:
5086 if (empty_opt(opt_zonegroup_id
) && empty_opt(opt_zonegroup_name
)) {
5087 cerr
<< "no zonegroup name or id provided" << std::endl
;
5090 RGWZoneGroup
zonegroup(safe_opt(opt_zonegroup_id
), safe_opt(opt_zonegroup_name
));
5091 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
,
5094 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5097 ret
= zonegroup
.delete_obj(dpp(), null_yield
);
5099 cerr
<< "ERROR: couldn't delete zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5104 case OPT::ZONEGROUP_GET
:
5106 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5107 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5109 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5113 encode_json("zonegroup", zonegroup
, formatter
.get());
5114 formatter
->flush(cout
);
5117 case OPT::ZONEGROUP_LIST
:
5119 RGWZoneGroup zonegroup
;
5120 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
,
5123 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5127 list
<string
> zonegroups
;
5128 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_zonegroups(dpp(), zonegroups
);
5130 cerr
<< "failed to list zonegroups: " << cpp_strerror(-ret
) << std::endl
;
5133 string default_zonegroup
;
5134 ret
= zonegroup
.read_default_id(dpp(), default_zonegroup
, null_yield
);
5135 if (ret
< 0 && ret
!= -ENOENT
) {
5136 cerr
<< "could not determine default zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5138 formatter
->open_object_section("zonegroups_list");
5139 encode_json("default_info", default_zonegroup
, formatter
.get());
5140 encode_json("zonegroups", zonegroups
, formatter
.get());
5141 formatter
->close_section();
5142 formatter
->flush(cout
);
5145 case OPT::ZONEGROUP_MODIFY
:
5147 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5148 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5150 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5154 bool need_update
= false;
5156 if (!master_zone
.empty()) {
5157 zonegroup
.master_zone
= master_zone
;
5161 if (is_master_set
) {
5162 zonegroup
.update_master(dpp(), is_master
, null_yield
);
5166 if (!endpoints
.empty()) {
5167 zonegroup
.endpoints
= endpoints
;
5171 if (!api_name
.empty()) {
5172 zonegroup
.api_name
= api_name
;
5176 if (!realm_id
.empty()) {
5177 zonegroup
.realm_id
= realm_id
;
5179 } else if (!realm_name
.empty()) {
5180 // get realm id from name
5181 RGWRealm realm
{g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
};
5182 ret
= realm
.read_id(dpp(), realm_name
, zonegroup
.realm_id
, null_yield
);
5184 cerr
<< "failed to find realm by name " << realm_name
<< std::endl
;
5190 if (bucket_index_max_shards
) {
5191 for (auto& [name
, zone
] : zonegroup
.zones
) {
5192 zone
.bucket_index_max_shards
= *bucket_index_max_shards
;
5198 ret
= zonegroup
.update(dpp(), null_yield
);
5200 cerr
<< "failed to update zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5206 ret
= zonegroup
.set_as_default(dpp(), null_yield
);
5208 cerr
<< "failed to set zonegroup " << zonegroup_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
5212 encode_json("zonegroup", zonegroup
, formatter
.get());
5213 formatter
->flush(cout
);
5216 case OPT::ZONEGROUP_SET
:
5218 RGWRealm
realm(realm_id
, realm_name
);
5219 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5220 bool default_realm_not_exist
= (ret
== -ENOENT
&& realm_id
.empty() && realm_name
.empty());
5222 if (ret
< 0 && !default_realm_not_exist
) {
5223 cerr
<< "failed to init realm: " << cpp_strerror(-ret
) << std::endl
;
5227 RGWZoneGroup zonegroup
;
5228 ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
,
5231 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5234 ret
= read_decode_json(infile
, zonegroup
);
5238 if (zonegroup
.realm_id
.empty() && !default_realm_not_exist
) {
5239 zonegroup
.realm_id
= realm
.get_id();
5241 ret
= zonegroup
.create(dpp(), null_yield
);
5242 if (ret
< 0 && ret
!= -EEXIST
) {
5243 cerr
<< "ERROR: couldn't create zonegroup info: " << cpp_strerror(-ret
) << std::endl
;
5245 } else if (ret
== -EEXIST
) {
5246 ret
= zonegroup
.update(dpp(), null_yield
);
5248 cerr
<< "ERROR: couldn't store zonegroup info: " << cpp_strerror(-ret
) << std::endl
;
5254 ret
= zonegroup
.set_as_default(dpp(), null_yield
);
5256 cerr
<< "failed to set zonegroup " << zonegroup_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
5260 encode_json("zonegroup", zonegroup
, formatter
.get());
5261 formatter
->flush(cout
);
5264 case OPT::ZONEGROUP_REMOVE
:
5266 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5267 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5269 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5273 if (zone_id
.empty()) {
5274 if (zone_name
.empty()) {
5275 cerr
<< "no --zone-id or --rgw-zone name provided" << std::endl
;
5278 // look up zone id by name
5279 for (auto& z
: zonegroup
.zones
) {
5280 if (zone_name
== z
.second
.name
) {
5281 zone_id
= z
.second
.id
;
5285 if (zone_id
.empty()) {
5286 cerr
<< "zone name " << zone_name
<< " not found in zonegroup "
5287 << zonegroup
.get_name() << std::endl
;
5292 ret
= zonegroup
.remove_zone(dpp(), zone_id
, null_yield
);
5294 cerr
<< "failed to remove zone: " << cpp_strerror(-ret
) << std::endl
;
5298 encode_json("zonegroup", zonegroup
, formatter
.get());
5299 formatter
->flush(cout
);
5302 case OPT::ZONEGROUP_RENAME
:
5304 if (zonegroup_new_name
.empty()) {
5305 cerr
<< " missing zonegroup new name" << std::endl
;
5308 if (zonegroup_id
.empty() && zonegroup_name
.empty()) {
5309 cerr
<< "no zonegroup name or id provided" << std::endl
;
5312 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5313 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5315 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5318 ret
= zonegroup
.rename(dpp(), zonegroup_new_name
, null_yield
);
5320 cerr
<< "failed to rename zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5325 case OPT::ZONEGROUP_PLACEMENT_LIST
:
5327 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5328 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
,
5331 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5335 encode_json("placement_targets", zonegroup
.placement_targets
, formatter
.get());
5336 formatter
->flush(cout
);
5339 case OPT::ZONEGROUP_PLACEMENT_GET
:
5341 if (placement_id
.empty()) {
5342 cerr
<< "ERROR: --placement-id not specified" << std::endl
;
5346 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5347 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5349 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5353 auto p
= zonegroup
.placement_targets
.find(placement_id
);
5354 if (p
== zonegroup
.placement_targets
.end()) {
5355 cerr
<< "failed to find a zonegroup placement target named '" << placement_id
<< "'" << std::endl
;
5358 encode_json("placement_targets", p
->second
, formatter
.get());
5359 formatter
->flush(cout
);
5362 case OPT::ZONEGROUP_PLACEMENT_ADD
:
5363 case OPT::ZONEGROUP_PLACEMENT_MODIFY
:
5364 case OPT::ZONEGROUP_PLACEMENT_RM
:
5365 case OPT::ZONEGROUP_PLACEMENT_DEFAULT
:
5367 if (placement_id
.empty()) {
5368 cerr
<< "ERROR: --placement-id not specified" << std::endl
;
5372 rgw_placement_rule rule
;
5373 rule
.from_str(placement_id
);
5375 if (!rule
.storage_class
.empty() && opt_storage_class
&&
5376 rule
.storage_class
!= *opt_storage_class
) {
5377 cerr
<< "ERROR: provided contradicting storage class configuration" << std::endl
;
5379 } else if (rule
.storage_class
.empty()) {
5380 rule
.storage_class
= opt_storage_class
.value_or(string());
5383 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5384 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5386 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5390 if (opt_cmd
== OPT::ZONEGROUP_PLACEMENT_ADD
||
5391 opt_cmd
== OPT::ZONEGROUP_PLACEMENT_MODIFY
) {
5392 RGWZoneGroupPlacementTarget
& target
= zonegroup
.placement_targets
[placement_id
];
5393 if (!tags
.empty()) {
5394 target
.tags
.clear();
5395 for (auto& t
: tags
) {
5396 target
.tags
.insert(t
);
5400 target
.name
= placement_id
;
5401 for (auto& t
: tags_rm
) {
5402 target
.tags
.erase(t
);
5404 for (auto& t
: tags_add
) {
5405 target
.tags
.insert(t
);
5407 target
.storage_classes
.insert(rule
.get_storage_class());
5410 bool tier_class
= false;
5411 std::string storage_class
= rule
.get_storage_class();
5412 RGWZoneGroupPlacementTier t
{storage_class
};
5413 RGWZoneGroupPlacementTier
*pt
= &t
;
5415 auto ptiter
= target
.tier_targets
.find(storage_class
);
5416 if (ptiter
!= target
.tier_targets
.end()) {
5417 pt
= &ptiter
->second
;
5419 } else if (tier_type_specified
) {
5420 if (tier_type
== "cloud-s3") {
5421 /* we support only cloud-s3 tier-type for now.
5422 * Once set cant be reset. */
5424 pt
->tier_type
= tier_type
;
5425 pt
->storage_class
= storage_class
;
5427 cerr
<< "ERROR: Invalid tier-type specified" << std::endl
;
5433 if (tier_config_add
.size() > 0) {
5434 JSONFormattable tconfig
;
5435 for (auto add
: tier_config_add
) {
5436 int r
= tconfig
.set(add
.first
, add
.second
);
5438 cerr
<< "ERROR: failed to set configurable: " << add
<< std::endl
;
5442 int r
= pt
->update_params(tconfig
);
5444 cerr
<< "ERROR: failed to update tier_config options"<< std::endl
;
5447 if (tier_config_rm
.size() > 0) {
5448 JSONFormattable tconfig
;
5449 for (auto add
: tier_config_rm
) {
5450 int r
= tconfig
.set(add
.first
, add
.second
);
5452 cerr
<< "ERROR: failed to set configurable: " << add
<< std::endl
;
5456 int r
= pt
->clear_params(tconfig
);
5458 cerr
<< "ERROR: failed to update tier_config options"<< std::endl
;
5462 target
.tier_targets
.emplace(std::make_pair(storage_class
, *pt
));
5465 } else if (opt_cmd
== OPT::ZONEGROUP_PLACEMENT_RM
) {
5466 if (!opt_storage_class
|| opt_storage_class
->empty()) {
5467 zonegroup
.placement_targets
.erase(placement_id
);
5469 auto iter
= zonegroup
.placement_targets
.find(placement_id
);
5470 if (iter
!= zonegroup
.placement_targets
.end()) {
5471 RGWZoneGroupPlacementTarget
& info
= zonegroup
.placement_targets
[placement_id
];
5472 info
.storage_classes
.erase(*opt_storage_class
);
5474 auto ptiter
= info
.tier_targets
.find(*opt_storage_class
);
5475 if (ptiter
!= info
.tier_targets
.end()) {
5476 info
.tier_targets
.erase(ptiter
);
5480 } else if (opt_cmd
== OPT::ZONEGROUP_PLACEMENT_DEFAULT
) {
5481 if (!zonegroup
.placement_targets
.count(placement_id
)) {
5482 cerr
<< "failed to find a zonegroup placement target named '"
5483 << placement_id
<< "'" << std::endl
;
5486 zonegroup
.default_placement
= rule
;
5489 zonegroup
.post_process_params(dpp(), null_yield
);
5490 ret
= zonegroup
.update(dpp(), null_yield
);
5492 cerr
<< "failed to update zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5496 encode_json("placement_targets", zonegroup
.placement_targets
, formatter
.get());
5497 formatter
->flush(cout
);
5500 case OPT::ZONE_CREATE
:
5502 if (zone_name
.empty()) {
5503 cerr
<< "zone name not provided" << std::endl
;
5507 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5508 /* if the user didn't provide zonegroup info , create stand alone zone */
5509 if (!zonegroup_id
.empty() || !zonegroup_name
.empty()) {
5510 ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5512 cerr
<< "unable to initialize zonegroup " << zonegroup_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
5515 if (realm_id
.empty() && realm_name
.empty()) {
5516 realm_id
= zonegroup
.realm_id
;
5520 RGWZoneParams
zone(zone_id
, zone_name
);
5521 ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
, false);
5523 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
5527 zone
.system_key
.id
= access_key
;
5528 zone
.system_key
.key
= secret_key
;
5529 zone
.realm_id
= realm_id
;
5530 for (auto a
: tier_config_add
) {
5531 int r
= zone
.tier_config
.set(a
.first
, a
.second
);
5533 cerr
<< "ERROR: failed to set configurable: " << a
<< std::endl
;
5538 ret
= zone
.create(dpp(), null_yield
);
5540 cerr
<< "failed to create zone " << zone_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
5544 if (!zonegroup_id
.empty() || !zonegroup_name
.empty()) {
5545 string
*ptier_type
= (tier_type_specified
? &tier_type
: nullptr);
5546 bool *psync_from_all
= (sync_from_all_specified
? &sync_from_all
: nullptr);
5547 string
*predirect_zone
= (redirect_zone_set
? &redirect_zone
: nullptr);
5548 ret
= zonegroup
.add_zone(dpp(), zone
,
5549 (is_master_set
? &is_master
: NULL
),
5550 (is_read_only_set
? &read_only
: NULL
),
5554 sync_from
, sync_from_rm
,
5555 predirect_zone
, bucket_index_max_shards
,
5556 static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sync_modules
->get_manager(),
5559 cerr
<< "failed to add zone " << zone_name
<< " to zonegroup " << zonegroup
.get_name()
5560 << ": " << cpp_strerror(-ret
) << std::endl
;
5566 ret
= zone
.set_as_default(dpp(), null_yield
);
5568 cerr
<< "failed to set zone " << zone_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
5572 encode_json("zone", zone
, formatter
.get());
5573 formatter
->flush(cout
);
5576 case OPT::ZONE_DEFAULT
:
5578 if (zone_id
.empty() && zone_name
.empty()) {
5579 cerr
<< "no zone name or id provided" << std::endl
;
5582 RGWZoneParams
zone(zone_id
, zone_name
);
5583 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5585 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
5588 ret
= zone
.set_as_default(dpp(), null_yield
);
5590 cerr
<< "failed to set zone as default: " << cpp_strerror(-ret
) << std::endl
;
5595 case OPT::ZONE_DELETE
:
5597 if (empty_opt(opt_zone_id
) && empty_opt(opt_zone_name
)) {
5598 cerr
<< "no zone name or id provided" << std::endl
;
5601 RGWZoneParams
zone(safe_opt(opt_zone_id
), safe_opt(opt_zone_name
));
5602 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5604 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
5608 list
<string
> zonegroups
;
5609 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_zonegroups(dpp(), zonegroups
);
5611 cerr
<< "failed to list zonegroups: " << cpp_strerror(-ret
) << std::endl
;
5615 for (list
<string
>::iterator iter
= zonegroups
.begin(); iter
!= zonegroups
.end(); ++iter
) {
5616 RGWZoneGroup
zonegroup(string(), *iter
);
5617 int ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5619 cerr
<< "WARNING: failed to initialize zonegroup " << zonegroup_name
<< std::endl
;
5622 ret
= zonegroup
.remove_zone(dpp(), zone
.get_id(), null_yield
);
5623 if (ret
< 0 && ret
!= -ENOENT
) {
5624 cerr
<< "failed to remove zone " << zone
.get_name() << " from zonegroup " << zonegroup
.get_name() << ": "
5625 << cpp_strerror(-ret
) << std::endl
;
5629 ret
= zone
.delete_obj(dpp(), null_yield
);
5631 cerr
<< "failed to delete zone " << zone
.get_name() << ": " << cpp_strerror(-ret
) << std::endl
;
5638 RGWZoneParams
zone(zone_id
, zone_name
);
5639 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5641 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
5644 encode_json("zone", zone
, formatter
.get());
5645 formatter
->flush(cout
);
5650 RGWZoneParams
zone(zone_name
);
5651 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
,
5657 ret
= zone
.read(dpp(), null_yield
);
5658 if (ret
< 0 && ret
!= -ENOENT
) {
5659 cerr
<< "zone.read() returned ret=" << ret
<< std::endl
;
5663 string orig_id
= zone
.get_id();
5665 ret
= read_decode_json(infile
, zone
);
5670 if(zone
.realm_id
.empty()) {
5671 RGWRealm
realm(realm_id
, realm_name
);
5672 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5673 if (ret
< 0 && ret
!= -ENOENT
) {
5674 cerr
<< "failed to init realm: " << cpp_strerror(-ret
) << std::endl
;
5677 zone
.realm_id
= realm
.get_id();
5680 if( !zone_name
.empty() && !zone
.get_name().empty() && zone
.get_name() != zone_name
) {
5681 cerr
<< "Error: zone name " << zone_name
<< " is different than the zone name " << zone
.get_name() << " in the provided json " << std::endl
;
5685 if (zone
.get_name().empty()) {
5686 zone
.set_name(zone_name
);
5687 if (zone
.get_name().empty()) {
5688 cerr
<< "no zone name specified" << std::endl
;
5693 zone_name
= zone
.get_name();
5695 if (zone
.get_id().empty()) {
5696 zone
.set_id(orig_id
);
5699 if (zone
.get_id().empty()) {
5700 cerr
<< "no zone name id the json provided, assuming old format" << std::endl
;
5701 if (zone_name
.empty()) {
5702 cerr
<< "missing zone name" << std::endl
;
5705 zone
.set_name(zone_name
);
5706 zone
.set_id(zone_name
);
5709 cerr
<< "zone id " << zone
.get_id();
5710 ret
= zone
.fix_pool_names(dpp(), null_yield
);
5712 cerr
<< "ERROR: couldn't fix zone: " << cpp_strerror(-ret
) << std::endl
;
5715 ret
= zone
.write(dpp(), false, null_yield
);
5717 cerr
<< "ERROR: couldn't create zone: " << cpp_strerror(-ret
) << std::endl
;
5722 ret
= zone
.set_as_default(dpp(), null_yield
);
5724 cerr
<< "failed to set zone " << zone_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
5728 encode_json("zone", zone
, formatter
.get());
5729 formatter
->flush(cout
);
5732 case OPT::ZONE_LIST
:
5735 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_zones(dpp(), zones
);
5737 cerr
<< "failed to list zones: " << cpp_strerror(-ret
) << std::endl
;
5742 ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
, false);
5744 cerr
<< "failed to init zone: " << cpp_strerror(-ret
) << std::endl
;
5747 string default_zone
;
5748 ret
= zone
.read_default_id(dpp(), default_zone
, null_yield
);
5749 if (ret
< 0 && ret
!= -ENOENT
) {
5750 cerr
<< "could not determine default zone: " << cpp_strerror(-ret
) << std::endl
;
5752 formatter
->open_object_section("zones_list");
5753 encode_json("default_info", default_zone
, formatter
.get());
5754 encode_json("zones", zones
, formatter
.get());
5755 formatter
->close_section();
5756 formatter
->flush(cout
);
5759 case OPT::ZONE_MODIFY
:
5761 RGWZoneParams
zone(zone_id
, zone_name
);
5762 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5764 cerr
<< "failed to init zone: " << cpp_strerror(-ret
) << std::endl
;
5768 bool need_zone_update
= false;
5769 if (!access_key
.empty()) {
5770 zone
.system_key
.id
= access_key
;
5771 need_zone_update
= true;
5774 if (!secret_key
.empty()) {
5775 zone
.system_key
.key
= secret_key
;
5776 need_zone_update
= true;
5779 if (!realm_id
.empty()) {
5780 zone
.realm_id
= realm_id
;
5781 need_zone_update
= true;
5782 } else if (!realm_name
.empty()) {
5783 // get realm id from name
5784 RGWRealm realm
{g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
};
5785 ret
= realm
.read_id(dpp(), realm_name
, zone
.realm_id
, null_yield
);
5787 cerr
<< "failed to find realm by name " << realm_name
<< std::endl
;
5790 need_zone_update
= true;
5793 if (tier_config_add
.size() > 0) {
5794 for (auto add
: tier_config_add
) {
5795 int r
= zone
.tier_config
.set(add
.first
, add
.second
);
5797 cerr
<< "ERROR: failed to set configurable: " << add
<< std::endl
;
5801 need_zone_update
= true;
5804 for (auto rm
: tier_config_rm
) {
5805 if (!rm
.first
.empty()) { /* otherwise will remove the entire config */
5806 zone
.tier_config
.erase(rm
.first
);
5807 need_zone_update
= true;
5811 if (need_zone_update
) {
5812 ret
= zone
.update(dpp(), null_yield
);
5814 cerr
<< "failed to save zone info: " << cpp_strerror(-ret
) << std::endl
;
5819 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5820 ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5822 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5825 string
*ptier_type
= (tier_type_specified
? &tier_type
: nullptr);
5827 bool *psync_from_all
= (sync_from_all_specified
? &sync_from_all
: nullptr);
5828 string
*predirect_zone
= (redirect_zone_set
? &redirect_zone
: nullptr);
5830 ret
= zonegroup
.add_zone(dpp(), zone
,
5831 (is_master_set
? &is_master
: NULL
),
5832 (is_read_only_set
? &read_only
: NULL
),
5833 endpoints
, ptier_type
,
5834 psync_from_all
, sync_from
, sync_from_rm
,
5835 predirect_zone
, bucket_index_max_shards
,
5836 static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sync_modules
->get_manager(),
5839 cerr
<< "failed to update zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5843 ret
= zonegroup
.update(dpp(), null_yield
);
5845 cerr
<< "failed to update zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5850 ret
= zone
.set_as_default(dpp(), null_yield
);
5852 cerr
<< "failed to set zone " << zone_name
<< " as default: " << cpp_strerror(-ret
) << std::endl
;
5856 encode_json("zone", zone
, formatter
.get());
5857 formatter
->flush(cout
);
5860 case OPT::ZONE_RENAME
:
5862 if (zone_new_name
.empty()) {
5863 cerr
<< " missing zone new name" << std::endl
;
5866 if (zone_id
.empty() && zone_name
.empty()) {
5867 cerr
<< "no zone name or id provided" << std::endl
;
5870 RGWZoneParams
zone(zone_id
,zone_name
);
5871 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5873 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
5876 ret
= zone
.rename(dpp(), zone_new_name
, null_yield
);
5878 cerr
<< "failed to rename zone " << zone_name
<< " to " << zone_new_name
<< ": " << cpp_strerror(-ret
)
5882 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5883 ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5885 cerr
<< "WARNING: failed to initialize zonegroup " << zonegroup_name
<< std::endl
;
5887 ret
= zonegroup
.rename_zone(dpp(), zone
, null_yield
);
5889 cerr
<< "Error in zonegroup rename for " << zone_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
5895 case OPT::ZONE_PLACEMENT_ADD
:
5896 case OPT::ZONE_PLACEMENT_MODIFY
:
5897 case OPT::ZONE_PLACEMENT_RM
:
5899 if (placement_id
.empty()) {
5900 cerr
<< "ERROR: --placement-id not specified" << std::endl
;
5903 // validate compression type
5904 if (compression_type
&& *compression_type
!= "random"
5905 && !Compressor::get_comp_alg_type(*compression_type
)) {
5906 std::cerr
<< "Unrecognized compression type" << std::endl
;
5910 RGWZoneParams
zone(zone_id
, zone_name
);
5911 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5913 cerr
<< "failed to init zone: " << cpp_strerror(-ret
) << std::endl
;
5917 if (opt_cmd
== OPT::ZONE_PLACEMENT_ADD
||
5918 opt_cmd
== OPT::ZONE_PLACEMENT_MODIFY
) {
5919 RGWZoneGroup
zonegroup(zonegroup_id
, zonegroup_name
);
5920 ret
= zonegroup
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
5922 cerr
<< "failed to init zonegroup: " << cpp_strerror(-ret
) << std::endl
;
5926 auto ptiter
= zonegroup
.placement_targets
.find(placement_id
);
5927 if (ptiter
== zonegroup
.placement_targets
.end()) {
5928 cerr
<< "ERROR: placement id '" << placement_id
<< "' is not configured in zonegroup placement targets" << std::endl
;
5932 string storage_class
= rgw_placement_rule::get_canonical_storage_class(opt_storage_class
.value_or(string()));
5933 if (ptiter
->second
.storage_classes
.find(storage_class
) == ptiter
->second
.storage_classes
.end()) {
5934 cerr
<< "ERROR: storage class '" << storage_class
<< "' is not defined in zonegroup '" << placement_id
<< "' placement target" << std::endl
;
5937 if (ptiter
->second
.tier_targets
.find(storage_class
) != ptiter
->second
.tier_targets
.end()) {
5938 cerr
<< "ERROR: storage class '" << storage_class
<< "' is of tier type in zonegroup '" << placement_id
<< "' placement target" << std::endl
;
5942 RGWZonePlacementInfo
& info
= zone
.placement_pools
[placement_id
];
5944 string opt_index_pool
= index_pool
.value_or(string());
5945 string opt_data_pool
= data_pool
.value_or(string());
5947 if (!opt_index_pool
.empty()) {
5948 info
.index_pool
= opt_index_pool
;
5951 if (info
.index_pool
.empty()) {
5952 cerr
<< "ERROR: index pool not configured, need to specify --index-pool" << std::endl
;
5956 if (opt_data_pool
.empty()) {
5957 const RGWZoneStorageClass
*porig_sc
{nullptr};
5958 if (info
.storage_classes
.find(storage_class
, &porig_sc
)) {
5959 if (porig_sc
->data_pool
) {
5960 opt_data_pool
= porig_sc
->data_pool
->to_str();
5963 if (opt_data_pool
.empty()) {
5964 cerr
<< "ERROR: data pool not configured, need to specify --data-pool" << std::endl
;
5969 rgw_pool dp
= opt_data_pool
;
5970 info
.storage_classes
.set_storage_class(storage_class
, &dp
, compression_type
.get_ptr());
5972 if (data_extra_pool
) {
5973 info
.data_extra_pool
= *data_extra_pool
;
5975 if (index_type_specified
) {
5976 info
.index_type
= placement_index_type
;
5979 ret
= check_pool_support_omap(info
.get_data_extra_pool());
5981 cerr
<< "ERROR: the data extra (non-ec) pool '" << info
.get_data_extra_pool()
5982 << "' does not support omap" << std::endl
;
5985 } else if (opt_cmd
== OPT::ZONE_PLACEMENT_RM
) {
5986 if (!opt_storage_class
||
5987 opt_storage_class
->empty()) {
5988 zone
.placement_pools
.erase(placement_id
);
5990 auto iter
= zone
.placement_pools
.find(placement_id
);
5991 if (iter
!= zone
.placement_pools
.end()) {
5992 RGWZonePlacementInfo
& info
= zone
.placement_pools
[placement_id
];
5993 info
.storage_classes
.remove_storage_class(*opt_storage_class
);
5998 ret
= zone
.update(dpp(), null_yield
);
6000 cerr
<< "failed to save zone info: " << cpp_strerror(-ret
) << std::endl
;
6004 encode_json("zone", zone
, formatter
.get());
6005 formatter
->flush(cout
);
6008 case OPT::ZONE_PLACEMENT_LIST
:
6010 RGWZoneParams
zone(zone_id
, zone_name
);
6011 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
6013 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
6016 encode_json("placement_pools", zone
.placement_pools
, formatter
.get());
6017 formatter
->flush(cout
);
6020 case OPT::ZONE_PLACEMENT_GET
:
6022 if (placement_id
.empty()) {
6023 cerr
<< "ERROR: --placement-id not specified" << std::endl
;
6027 RGWZoneParams
zone(zone_id
, zone_name
);
6028 int ret
= zone
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
6030 cerr
<< "unable to initialize zone: " << cpp_strerror(-ret
) << std::endl
;
6033 auto p
= zone
.placement_pools
.find(placement_id
);
6034 if (p
== zone
.placement_pools
.end()) {
6035 cerr
<< "ERROR: zone placement target '" << placement_id
<< "' not found" << std::endl
;
6038 encode_json("placement_pools", p
->second
, formatter
.get());
6039 formatter
->flush(cout
);
6047 resolve_zone_id_opt(opt_effective_zone_name
, opt_effective_zone_id
);
6048 resolve_zone_id_opt(opt_source_zone_name
, opt_source_zone_id
);
6049 resolve_zone_id_opt(opt_dest_zone_name
, opt_dest_zone_id
);
6050 resolve_zone_ids_opt(opt_zone_names
, opt_zone_ids
);
6051 resolve_zone_ids_opt(opt_source_zone_names
, opt_source_zone_ids
);
6052 resolve_zone_ids_opt(opt_dest_zone_names
, opt_dest_zone_ids
);
6054 bool non_master_cmd
= (!store
->is_meta_master() && !yes_i_really_mean_it
);
6055 std::set
<OPT
> non_master_ops_list
= {OPT::USER_CREATE
, OPT::USER_RM
,
6056 OPT::USER_MODIFY
, OPT::USER_ENABLE
,
6057 OPT::USER_SUSPEND
, OPT::SUBUSER_CREATE
,
6058 OPT::SUBUSER_MODIFY
, OPT::SUBUSER_RM
,
6059 OPT::BUCKET_LINK
, OPT::BUCKET_UNLINK
,
6060 OPT::BUCKET_RESHARD
, OPT::BUCKET_RM
,
6061 OPT::BUCKET_CHOWN
, OPT::METADATA_PUT
,
6062 OPT::METADATA_RM
, OPT::RESHARD_CANCEL
,
6063 OPT::RESHARD_ADD
, OPT::MFA_CREATE
,
6064 OPT::MFA_REMOVE
, OPT::MFA_RESYNC
,
6065 OPT::CAPS_ADD
, OPT::CAPS_RM
};
6067 bool print_warning_message
= (non_master_ops_list
.find(opt_cmd
) != non_master_ops_list
.end() &&
6070 if (print_warning_message
) {
6071 cerr
<< "Please run the command on master zone. Performing this operation on non-master zone leads to inconsistent metadata between zones" << std::endl
;
6072 cerr
<< "Are you sure you want to go ahead? (requires --yes-i-really-mean-it)" << std::endl
;
6076 if (!rgw::sal::User::empty(user
)) {
6077 user_op
.set_user_id(user
->get_id());
6078 bucket_op
.set_user_id(user
->get_id());
6081 if (!display_name
.empty())
6082 user_op
.set_display_name(display_name
);
6084 if (!user_email
.empty())
6085 user_op
.set_user_email(user_email
);
6087 if (!rgw::sal::User::empty(user
)) {
6088 user_op
.set_new_user_id(new_user_id
);
6091 if (!access_key
.empty())
6092 user_op
.set_access_key(access_key
);
6094 if (!secret_key
.empty())
6095 user_op
.set_secret_key(secret_key
);
6097 if (!subuser
.empty())
6098 user_op
.set_subuser(subuser
);
6101 user_op
.set_caps(caps
);
6103 user_op
.set_purge_data(purge_data
);
6106 user_op
.set_purge_keys();
6109 user_op
.set_generate_key();
6112 user_op
.set_gen_secret(); // assume that a key pair should be created
6114 if (max_buckets_specified
)
6115 user_op
.set_max_buckets(max_buckets
);
6117 if (admin_specified
)
6118 user_op
.set_admin(admin
);
6120 if (system_specified
)
6121 user_op
.set_system(system
);
6124 user_op
.set_perm(perm_mask
);
6126 if (set_temp_url_key
) {
6127 map
<int, string
>::iterator iter
= temp_url_keys
.begin();
6128 for (; iter
!= temp_url_keys
.end(); ++iter
) {
6129 user_op
.set_temp_url_key(iter
->second
, iter
->first
);
6133 if (!op_mask_str
.empty()) {
6135 int ret
= rgw_parse_op_type_list(op_mask_str
, &op_mask
);
6137 cerr
<< "failed to parse op_mask: " << cpp_strerror(-ret
) << std::endl
;
6141 user_op
.set_op_mask(op_mask
);
6144 if (key_type
!= KEY_TYPE_UNDEFINED
)
6145 user_op
.set_key_type(key_type
);
6147 // set suspension operation parameters
6148 if (opt_cmd
== OPT::USER_ENABLE
)
6149 user_op
.set_suspension(false);
6150 else if (opt_cmd
== OPT::USER_SUSPEND
)
6151 user_op
.set_suspension(true);
6153 if (!placement_id
.empty() ||
6154 (opt_storage_class
&& !opt_storage_class
->empty())) {
6155 rgw_placement_rule target_rule
;
6156 target_rule
.name
= placement_id
;
6157 target_rule
.storage_class
= *opt_storage_class
;
6158 if (!store
->get_zone()->get_params().valid_placement(target_rule
)) {
6159 cerr
<< "NOTICE: invalid dest placement: " << target_rule
.to_str() << std::endl
;
6162 user_op
.set_default_placement(target_rule
);
6165 if (!tags
.empty()) {
6166 user_op
.set_placement_tags(tags
);
6169 // RGWUser to use for user operations
6172 if (!(rgw::sal::User::empty(user
) && access_key
.empty()) || !subuser
.empty()) {
6173 ret
= ruser
.init(dpp(), store
, user_op
, null_yield
);
6175 cerr
<< "user.init failed: " << cpp_strerror(-ret
) << std::endl
;
6180 /* populate bucket operation */
6181 bucket_op
.set_bucket_name(bucket_name
);
6182 bucket_op
.set_object(object
);
6183 bucket_op
.set_check_objects(check_objects
);
6184 bucket_op
.set_delete_children(delete_child_objects
);
6185 bucket_op
.set_fix_index(fix
);
6186 bucket_op
.set_max_aio(max_concurrent_ios
);
6188 // required to gather errors from operations
6189 std::string err_msg
;
6191 bool output_user_info
= true;
6194 case OPT::USER_INFO
:
6195 if (rgw::sal::User::empty(user
) && access_key
.empty()) {
6196 cerr
<< "ERROR: --uid or --access-key required" << std::endl
;
6200 case OPT::USER_CREATE
:
6201 if (!user_op
.has_existing_user()) {
6202 user_op
.set_generate_key(); // generate a new key by default
6204 ret
= ruser
.add(dpp(), user_op
, null_yield
, &err_msg
);
6206 cerr
<< "could not create user: " << err_msg
<< std::endl
;
6207 if (ret
== -ERR_INVALID_TENANT_NAME
)
6212 if (!subuser
.empty()) {
6213 ret
= ruser
.subusers
.add(dpp(),user_op
, null_yield
, &err_msg
);
6215 cerr
<< "could not create subuser: " << err_msg
<< std::endl
;
6221 ret
= ruser
.remove(dpp(), user_op
, null_yield
, &err_msg
);
6223 cerr
<< "could not remove user: " << err_msg
<< std::endl
;
6227 output_user_info
= false;
6229 case OPT::USER_RENAME
:
6230 if (yes_i_really_mean_it
) {
6231 user_op
.set_overwrite_new_user(true);
6233 ret
= ruser
.rename(user_op
, null_yield
, dpp(), &err_msg
);
6235 if (ret
== -EEXIST
) {
6236 err_msg
+= ". to overwrite this user, add --yes-i-really-mean-it";
6238 cerr
<< "could not rename user: " << err_msg
<< std::endl
;
6243 case OPT::USER_ENABLE
:
6244 case OPT::USER_SUSPEND
:
6245 case OPT::USER_MODIFY
:
6246 ret
= ruser
.modify(dpp(), user_op
, null_yield
, &err_msg
);
6248 cerr
<< "could not modify user: " << err_msg
<< std::endl
;
6253 case OPT::SUBUSER_CREATE
:
6254 ret
= ruser
.subusers
.add(dpp(), user_op
, null_yield
, &err_msg
);
6256 cerr
<< "could not create subuser: " << err_msg
<< std::endl
;
6261 case OPT::SUBUSER_MODIFY
:
6262 ret
= ruser
.subusers
.modify(dpp(), user_op
, null_yield
, &err_msg
);
6264 cerr
<< "could not modify subuser: " << err_msg
<< std::endl
;
6269 case OPT::SUBUSER_RM
:
6270 ret
= ruser
.subusers
.remove(dpp(), user_op
, null_yield
, &err_msg
);
6272 cerr
<< "could not remove subuser: " << err_msg
<< std::endl
;
6278 ret
= ruser
.caps
.add(dpp(), user_op
, null_yield
, &err_msg
);
6280 cerr
<< "could not add caps: " << err_msg
<< std::endl
;
6286 ret
= ruser
.caps
.remove(dpp(), user_op
, null_yield
, &err_msg
);
6288 cerr
<< "could not remove caps: " << err_msg
<< std::endl
;
6293 case OPT::KEY_CREATE
:
6294 ret
= ruser
.keys
.add(dpp(), user_op
, null_yield
, &err_msg
);
6296 cerr
<< "could not create key: " << err_msg
<< std::endl
;
6302 ret
= ruser
.keys
.remove(dpp(), user_op
, null_yield
, &err_msg
);
6304 cerr
<< "could not remove key: " << err_msg
<< std::endl
;
6308 case OPT::PERIOD_PUSH
:
6311 req_info
info(g_ceph_context
, &env
);
6312 info
.method
= "POST";
6313 info
.request_uri
= "/admin/realm/period";
6315 map
<string
, string
> ¶ms
= info
.args
.get_params();
6316 if (!realm_id
.empty())
6317 params
["realm_id"] = realm_id
;
6318 if (!realm_name
.empty())
6319 params
["realm_name"] = realm_name
;
6320 if (!period_id
.empty())
6321 params
["period_id"] = period_id
;
6322 if (!period_epoch
.empty())
6323 params
["epoch"] = period_epoch
;
6326 RGWPeriod
period(period_id
);
6327 int ret
= period
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
6329 cerr
<< "period init failed: " << cpp_strerror(-ret
) << std::endl
;
6332 // json format into a bufferlist
6333 JSONFormatter
jf(false);
6334 encode_json("period", period
, &jf
);
6339 ret
= send_to_remote_or_url(nullptr, url
, opt_region
,
6340 access_key
, secret_key
,
6343 cerr
<< "request failed: " << cpp_strerror(-ret
) << std::endl
;
6348 case OPT::PERIOD_UPDATE
:
6350 int ret
= update_period(realm_id
, realm_name
, period_id
, period_epoch
,
6351 commit
, remote
, url
, opt_region
,
6352 access_key
, secret_key
,
6353 formatter
.get(), yes_i_really_mean_it
);
6359 case OPT::PERIOD_COMMIT
:
6361 // read realm and staging period
6362 RGWRealm
realm(realm_id
, realm_name
);
6363 int ret
= realm
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, null_yield
);
6365 cerr
<< "Error initializing realm: " << cpp_strerror(-ret
) << std::endl
;
6368 RGWPeriod
period(RGWPeriod::get_staging_id(realm
.get_id()), 1);
6369 ret
= period
.init(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sysobj
, realm
.get_id(), null_yield
);
6371 cerr
<< "period init failed: " << cpp_strerror(-ret
) << std::endl
;
6374 ret
= commit_period(realm
, period
, remote
, url
, opt_region
, access_key
, secret_key
,
6375 yes_i_really_mean_it
);
6377 cerr
<< "failed to commit period: " << cpp_strerror(-ret
) << std::endl
;
6381 encode_json("period", period
, formatter
.get());
6382 formatter
->flush(cout
);
6385 case OPT::ROLE_CREATE
:
6387 if (role_name
.empty()) {
6388 cerr
<< "ERROR: role name is empty" << std::endl
;
6392 if (assume_role_doc
.empty()) {
6393 cerr
<< "ERROR: assume role policy document is empty" << std::endl
;
6396 bufferlist bl
= bufferlist::static_from_string(assume_role_doc
);
6398 const rgw::IAM::Policy
p(g_ceph_context
, tenant
, bl
);
6399 } catch (rgw::IAM::PolicyParseException
& e
) {
6400 cerr
<< "failed to parse policy: " << e
.what() << std::endl
;
6403 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
, path
, assume_role_doc
);
6404 ret
= role
->create(dpp(), true, null_yield
);
6408 show_role_info(role
.get(), formatter
.get());
6411 case OPT::ROLE_DELETE
:
6413 if (role_name
.empty()) {
6414 cerr
<< "ERROR: empty role name" << std::endl
;
6417 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6418 ret
= role
->delete_obj(dpp(), null_yield
);
6422 cout
<< "role: " << role_name
<< " successfully deleted" << std::endl
;
6427 if (role_name
.empty()) {
6428 cerr
<< "ERROR: empty role name" << std::endl
;
6431 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6432 ret
= role
->get(dpp(), null_yield
);
6436 show_role_info(role
.get(), formatter
.get());
6439 case OPT::ROLE_MODIFY
:
6441 if (role_name
.empty()) {
6442 cerr
<< "ERROR: role name is empty" << std::endl
;
6446 if (assume_role_doc
.empty()) {
6447 cerr
<< "ERROR: assume role policy document is empty" << std::endl
;
6451 bufferlist bl
= bufferlist::static_from_string(assume_role_doc
);
6453 const rgw::IAM::Policy
p(g_ceph_context
, tenant
, bl
);
6454 } catch (rgw::IAM::PolicyParseException
& e
) {
6455 cerr
<< "failed to parse policy: " << e
.what() << std::endl
;
6459 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6460 ret
= role
->get(dpp(), null_yield
);
6464 role
->update_trust_policy(assume_role_doc
);
6465 ret
= role
->update(dpp(), null_yield
);
6469 cout
<< "Assume role policy document updated successfully for role: " << role_name
<< std::endl
;
6472 case OPT::ROLE_LIST
:
6474 vector
<std::unique_ptr
<rgw::sal::RGWRole
>> result
;
6475 ret
= store
->get_roles(dpp(), null_yield
, path_prefix
, tenant
, result
);
6479 show_roles_info(result
, formatter
.get());
6482 case OPT::ROLE_POLICY_PUT
:
6484 if (role_name
.empty()) {
6485 cerr
<< "role name is empty" << std::endl
;
6489 if (policy_name
.empty()) {
6490 cerr
<< "policy name is empty" << std::endl
;
6494 if (perm_policy_doc
.empty()) {
6495 cerr
<< "permission policy document is empty" << std::endl
;
6499 bufferlist bl
= bufferlist::static_from_string(perm_policy_doc
);
6501 const rgw::IAM::Policy
p(g_ceph_context
, tenant
, bl
);
6502 } catch (rgw::IAM::PolicyParseException
& e
) {
6503 cerr
<< "failed to parse perm policy: " << e
.what() << std::endl
;
6507 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6508 ret
= role
->get(dpp(), null_yield
);
6512 role
->set_perm_policy(policy_name
, perm_policy_doc
);
6513 ret
= role
->update(dpp(), null_yield
);
6517 cout
<< "Permission policy attached successfully" << std::endl
;
6520 case OPT::ROLE_POLICY_LIST
:
6522 if (role_name
.empty()) {
6523 cerr
<< "ERROR: Role name is empty" << std::endl
;
6526 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6527 ret
= role
->get(dpp(), null_yield
);
6531 std::vector
<string
> policy_names
= role
->get_role_policy_names();
6532 show_policy_names(policy_names
, formatter
.get());
6535 case OPT::ROLE_POLICY_GET
:
6537 if (role_name
.empty()) {
6538 cerr
<< "ERROR: role name is empty" << std::endl
;
6542 if (policy_name
.empty()) {
6543 cerr
<< "ERROR: policy name is empty" << std::endl
;
6546 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6547 int ret
= role
->get(dpp(), null_yield
);
6552 ret
= role
->get_role_policy(dpp(), policy_name
, perm_policy
);
6556 show_perm_policy(perm_policy
, formatter
.get());
6559 case OPT::ROLE_POLICY_DELETE
:
6561 if (role_name
.empty()) {
6562 cerr
<< "ERROR: role name is empty" << std::endl
;
6566 if (policy_name
.empty()) {
6567 cerr
<< "ERROR: policy name is empty" << std::endl
;
6570 std::unique_ptr
<rgw::sal::RGWRole
> role
= store
->get_role(role_name
, tenant
);
6571 ret
= role
->get(dpp(), null_yield
);
6575 ret
= role
->delete_policy(dpp(), policy_name
);
6579 ret
= role
->update(dpp(), null_yield
);
6583 cout
<< "Policy: " << policy_name
<< " successfully deleted for role: "
6584 << role_name
<< std::endl
;
6588 output_user_info
= false;
6591 // output the result of a user operation
6592 if (output_user_info
) {
6593 ret
= ruser
.info(info
, &err_msg
);
6595 cerr
<< "could not fetch user info: " << err_msg
<< std::endl
;
6598 show_user_info(info
, formatter
.get());
6601 if (opt_cmd
== OPT::POLICY
) {
6602 if (format
== "xml") {
6603 int ret
= RGWBucketAdminOp::dump_s3_policy(store
, bucket_op
, cout
, dpp());
6605 cerr
<< "ERROR: failed to get policy: " << cpp_strerror(-ret
) << std::endl
;
6609 int ret
= RGWBucketAdminOp::get_policy(store
, bucket_op
, stream_flusher
, dpp());
6611 cerr
<< "ERROR: failed to get policy: " << cpp_strerror(-ret
) << std::endl
;
6617 if (opt_cmd
== OPT::BUCKET_LIMIT_CHECK
) {
6619 std::list
<std::string
> user_ids
;
6620 metadata_key
= "user";
6625 if (!rgw::sal::User::empty(user
)) {
6626 user_ids
.push_back(user
->get_id().id
);
6628 RGWBucketAdminOp::limit_check(store
, bucket_op
, user_ids
, stream_flusher
,
6629 null_yield
, dpp(), warnings_only
);
6631 /* list users in groups of max-keys, then perform user-bucket
6632 * limit-check on each group */
6633 ret
= store
->meta_list_keys_init(dpp(), metadata_key
, string(), &handle
);
6635 cerr
<< "ERROR: buckets limit check can't get user metadata_key: "
6636 << cpp_strerror(-ret
) << std::endl
;
6641 ret
= store
->meta_list_keys_next(dpp(), handle
, max
, user_ids
,
6643 if (ret
< 0 && ret
!= -ENOENT
) {
6644 cerr
<< "ERROR: buckets limit check lists_keys_next(): "
6645 << cpp_strerror(-ret
) << std::endl
;
6648 /* ok, do the limit checks for this group */
6650 RGWBucketAdminOp::limit_check(store
, bucket_op
, user_ids
, stream_flusher
,
6651 null_yield
, dpp(), warnings_only
);
6656 } while (truncated
);
6657 store
->meta_list_keys_complete(handle
);
6660 } /* OPT::BUCKET_LIMIT_CHECK */
6662 if (opt_cmd
== OPT::BUCKETS_LIST
) {
6663 if (bucket_name
.empty()) {
6664 if (!rgw::sal::User::empty(user
)) {
6665 if (!user_op
.has_existing_user()) {
6666 cerr
<< "ERROR: could not find user: " << user
<< std::endl
;
6670 RGWBucketAdminOp::info(store
, bucket_op
, stream_flusher
, null_yield
, dpp());
6672 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
6674 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
6677 formatter
->open_array_section("entries");
6681 static constexpr int MAX_PAGINATE_SIZE
= 10000;
6682 static constexpr int DEFAULT_MAX_ENTRIES
= 1000;
6684 if (max_entries
< 0) {
6685 max_entries
= DEFAULT_MAX_ENTRIES
;
6687 const int paginate_size
= std::min(max_entries
, MAX_PAGINATE_SIZE
);
6693 rgw::sal::Bucket::ListParams params
;
6694 rgw::sal::Bucket::ListResults results
;
6696 params
.prefix
= prefix
;
6697 params
.delim
= delim
;
6698 params
.marker
= rgw_obj_key(marker
);
6700 params
.enforce_ns
= false;
6701 params
.list_versions
= true;
6702 params
.allow_unordered
= bool(allow_unordered
);
6705 const int remaining
= max_entries
- count
;
6706 ret
= bucket
->list(dpp(), params
, std::min(remaining
, paginate_size
), results
,
6709 cerr
<< "ERROR: store->list_objects(): " << cpp_strerror(-ret
) << std::endl
;
6713 count
+= results
.objs
.size();
6715 for (const auto& entry
: results
.objs
) {
6716 encode_json("entry", entry
, formatter
.get());
6718 formatter
->flush(cout
);
6719 } while (results
.is_truncated
&& count
< max_entries
);
6721 formatter
->close_section();
6722 formatter
->flush(cout
);
6723 } /* have bucket_name */
6724 } /* OPT::BUCKETS_LIST */
6726 if (opt_cmd
== OPT::BUCKET_RADOS_LIST
) {
6727 RGWRadosList
lister(static_cast<rgw::sal::RadosStore
*>(store
),
6728 max_concurrent_ios
, orphan_stale_secs
, tenant
);
6730 lister
.set_field_separator(*rgw_obj_fs
);
6733 if (bucket_name
.empty()) {
6734 ret
= lister
.run(dpp());
6736 ret
= lister
.run(dpp(), bucket_name
);
6741 "ERROR: bucket radoslist failed to finish before " <<
6742 "encountering error: " << cpp_strerror(-ret
) << std::endl
;
6743 std::cerr
<< "************************************"
6744 "************************************" << std::endl
;
6745 std::cerr
<< "WARNING: THE RESULTS ARE NOT RELIABLE AND SHOULD NOT " <<
6746 "BE USED IN DELETING ORPHANS" << std::endl
;
6747 std::cerr
<< "************************************"
6748 "************************************" << std::endl
;
6753 if (opt_cmd
== OPT::BUCKET_STATS
) {
6754 if (bucket_name
.empty() && !bucket_id
.empty()) {
6756 if (!rgw_find_bucket_by_id(dpp(), store
->ctx(), store
, marker
, bucket_id
, &bucket
)) {
6757 cerr
<< "failure: no such bucket id" << std::endl
;
6760 bucket_op
.set_tenant(bucket
.tenant
);
6761 bucket_op
.set_bucket_name(bucket
.name
);
6763 bucket_op
.set_fetch_stats(true);
6765 int r
= RGWBucketAdminOp::info(store
, bucket_op
, stream_flusher
, null_yield
, dpp());
6767 cerr
<< "failure: " << cpp_strerror(-r
) << ": " << err
<< std::endl
;
6772 if (opt_cmd
== OPT::BUCKET_LINK
) {
6773 bucket_op
.set_bucket_id(bucket_id
);
6774 bucket_op
.set_new_bucket_name(new_bucket_name
);
6776 int r
= RGWBucketAdminOp::link(store
, bucket_op
, dpp(), &err
);
6778 cerr
<< "failure: " << cpp_strerror(-r
) << ": " << err
<< std::endl
;
6783 if (opt_cmd
== OPT::BUCKET_UNLINK
) {
6784 int r
= RGWBucketAdminOp::unlink(store
, bucket_op
, dpp());
6786 cerr
<< "failure: " << cpp_strerror(-r
) << std::endl
;
6791 if (opt_cmd
== OPT::BUCKET_CHOWN
) {
6792 if (bucket_name
.empty()) {
6793 cerr
<< "ERROR: bucket name not specified" << std::endl
;
6797 bucket_op
.set_bucket_name(bucket_name
);
6798 bucket_op
.set_new_bucket_name(new_bucket_name
);
6802 int r
= RGWBucketAdminOp::chown(store
, bucket_op
, marker
, dpp(), &err
);
6804 cerr
<< "failure: " << cpp_strerror(-r
) << ": " << err
<< std::endl
;
6809 if (opt_cmd
== OPT::LOG_LIST
) {
6811 if (date
.size() && date
.size() != 10) {
6812 cerr
<< "bad date format for '" << date
<< "', expect YYYY-MM-DD" << std::endl
;
6817 formatter
->open_array_section("logs");
6819 int r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->log_list_init(dpp(), date
, &h
);
6824 cerr
<< "log list: error " << r
<< std::endl
;
6829 int r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->log_list_next(h
, &name
);
6833 cerr
<< "log list: error " << r
<< std::endl
;
6836 formatter
->dump_string("object", name
);
6839 formatter
->close_section();
6840 formatter
->flush(cout
);
6844 if (opt_cmd
== OPT::LOG_SHOW
|| opt_cmd
== OPT::LOG_RM
) {
6845 if (object
.empty() && (date
.empty() || bucket_name
.empty() || bucket_id
.empty())) {
6846 cerr
<< "specify an object or a date, bucket and bucket-id" << std::endl
;
6851 if (!object
.empty()) {
6861 if (opt_cmd
== OPT::LOG_SHOW
) {
6864 int r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->log_show_init(dpp(), oid
, &h
);
6866 cerr
<< "error opening log " << oid
<< ": " << cpp_strerror(-r
) << std::endl
;
6871 formatter
->open_object_section("log");
6873 struct rgw_log_entry entry
;
6875 // peek at first entry to get bucket metadata
6876 r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->log_show_next(dpp(), h
, &entry
);
6878 cerr
<< "error reading log " << oid
<< ": " << cpp_strerror(-r
) << std::endl
;
6881 formatter
->dump_string("bucket_id", entry
.bucket_id
);
6882 formatter
->dump_string("bucket_owner", entry
.bucket_owner
.to_str());
6883 formatter
->dump_string("bucket", entry
.bucket
);
6885 uint64_t agg_time
= 0;
6886 uint64_t agg_bytes_sent
= 0;
6887 uint64_t agg_bytes_received
= 0;
6888 uint64_t total_entries
= 0;
6890 if (show_log_entries
)
6891 formatter
->open_array_section("log_entries");
6894 using namespace std::chrono
;
6895 uint64_t total_time
= duration_cast
<milliseconds
>(entry
.total_time
).count();
6897 agg_time
+= total_time
;
6898 agg_bytes_sent
+= entry
.bytes_sent
;
6899 agg_bytes_received
+= entry
.bytes_received
;
6902 if (skip_zero_entries
&& entry
.bytes_sent
== 0 &&
6903 entry
.bytes_received
== 0)
6906 if (show_log_entries
) {
6908 rgw_format_ops_log_entry(entry
, formatter
.get());
6909 formatter
->flush(cout
);
6912 r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->log_show_next(dpp(), h
, &entry
);
6916 cerr
<< "error reading log " << oid
<< ": " << cpp_strerror(-r
) << std::endl
;
6919 if (show_log_entries
)
6920 formatter
->close_section();
6923 formatter
->open_object_section("log_sum");
6924 formatter
->dump_int("bytes_sent", agg_bytes_sent
);
6925 formatter
->dump_int("bytes_received", agg_bytes_received
);
6926 formatter
->dump_int("total_time", agg_time
);
6927 formatter
->dump_int("total_entries", total_entries
);
6928 formatter
->close_section();
6930 formatter
->close_section();
6931 formatter
->flush(cout
);
6934 if (opt_cmd
== OPT::LOG_RM
) {
6935 int r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->log_remove(dpp(), oid
);
6937 cerr
<< "error removing log " << oid
<< ": " << cpp_strerror(-r
) << std::endl
;
6943 if (opt_cmd
== OPT::POOL_ADD
) {
6944 if (pool_name
.empty()) {
6945 cerr
<< "need to specify pool to add!" << std::endl
;
6949 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->add_bucket_placement(dpp(), pool
, null_yield
);
6951 cerr
<< "failed to add bucket placement: " << cpp_strerror(-ret
) << std::endl
;
6954 if (opt_cmd
== OPT::POOL_RM
) {
6955 if (pool_name
.empty()) {
6956 cerr
<< "need to specify pool to remove!" << std::endl
;
6960 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->remove_bucket_placement(dpp(), pool
, null_yield
);
6962 cerr
<< "failed to remove bucket placement: " << cpp_strerror(-ret
) << std::endl
;
6965 if (opt_cmd
== OPT::POOLS_LIST
) {
6966 set
<rgw_pool
> pools
;
6967 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->list_placement_set(dpp(), pools
, null_yield
);
6969 cerr
<< "could not list placement set: " << cpp_strerror(-ret
) << std::endl
;
6973 formatter
->open_array_section("pools");
6974 for (auto siter
= pools
.begin(); siter
!= pools
.end(); ++siter
) {
6975 formatter
->open_object_section("pool");
6976 formatter
->dump_string("name", siter
->to_str());
6977 formatter
->close_section();
6979 formatter
->close_section();
6980 formatter
->flush(cout
);
6984 if (opt_cmd
== OPT::USAGE_SHOW
) {
6985 uint64_t start_epoch
= 0;
6986 uint64_t end_epoch
= (uint64_t)-1;
6990 if (!start_date
.empty()) {
6991 ret
= utime_t::parse_date(start_date
, &start_epoch
, NULL
);
6993 cerr
<< "ERROR: failed to parse start date" << std::endl
;
6997 if (!end_date
.empty()) {
6998 ret
= utime_t::parse_date(end_date
, &end_epoch
, NULL
);
7000 cerr
<< "ERROR: failed to parse end date" << std::endl
;
7006 if (!bucket_name
.empty()) {
7007 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7009 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7013 ret
= RGWUsage::show(dpp(), store
, user
.get(), bucket
.get(), start_epoch
,
7014 end_epoch
, show_log_entries
, show_log_sum
, &categories
,
7017 cerr
<< "ERROR: failed to show usage" << std::endl
;
7022 if (opt_cmd
== OPT::USAGE_TRIM
) {
7023 if (rgw::sal::User::empty(user
) && bucket_name
.empty() &&
7024 start_date
.empty() && end_date
.empty() && !yes_i_really_mean_it
) {
7025 cerr
<< "usage trim without user/date/bucket specified will remove *all* users data" << std::endl
;
7026 cerr
<< "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl
;
7030 uint64_t start_epoch
= 0;
7031 uint64_t end_epoch
= (uint64_t)-1;
7034 if (!start_date
.empty()) {
7035 ret
= utime_t::parse_date(start_date
, &start_epoch
, NULL
);
7037 cerr
<< "ERROR: failed to parse start date" << std::endl
;
7042 if (!end_date
.empty()) {
7043 ret
= utime_t::parse_date(end_date
, &end_epoch
, NULL
);
7045 cerr
<< "ERROR: failed to parse end date" << std::endl
;
7050 if (!bucket_name
.empty()) {
7051 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7053 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7057 ret
= RGWUsage::trim(dpp(), store
, user
.get(), bucket
.get(), start_epoch
, end_epoch
);
7059 cerr
<< "ERROR: read_usage() returned ret=" << ret
<< std::endl
;
7064 if (opt_cmd
== OPT::USAGE_CLEAR
) {
7065 if (!yes_i_really_mean_it
) {
7066 cerr
<< "usage clear would remove *all* users usage data for all time" << std::endl
;
7067 cerr
<< "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl
;
7071 ret
= RGWUsage::clear(dpp(), store
);
7078 if (opt_cmd
== OPT::OLH_GET
|| opt_cmd
== OPT::OLH_READLOG
) {
7079 if (bucket_name
.empty()) {
7080 cerr
<< "ERROR: bucket not specified" << std::endl
;
7083 if (object
.empty()) {
7084 cerr
<< "ERROR: object not specified" << std::endl
;
7089 if (opt_cmd
== OPT::OLH_GET
) {
7090 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7092 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7096 rgw_obj
obj(bucket
->get_key(), object
);
7097 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->get_olh(dpp(), bucket
->get_info(), obj
, &olh
);
7099 cerr
<< "ERROR: failed reading olh: " << cpp_strerror(-ret
) << std::endl
;
7102 encode_json("olh", olh
, formatter
.get());
7103 formatter
->flush(cout
);
7106 if (opt_cmd
== OPT::OLH_READLOG
) {
7107 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7109 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7112 map
<uint64_t, vector
<rgw_bucket_olh_log_entry
> > log
;
7115 RGWObjectCtx
rctx(store
);
7116 std::unique_ptr
<rgw::sal::Object
> obj
= bucket
->get_object(object
);
7120 ret
= obj
->get_obj_state(dpp(), &rctx
, &state
, null_yield
);
7125 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->bucket_index_read_olh_log(dpp(), bucket
->get_info(), *state
, obj
->get_obj(), 0, &log
, &is_truncated
);
7127 cerr
<< "ERROR: failed reading olh: " << cpp_strerror(-ret
) << std::endl
;
7130 formatter
->open_object_section("result");
7131 encode_json("is_truncated", is_truncated
, formatter
.get());
7132 encode_json("log", log
, formatter
.get());
7133 formatter
->close_section();
7134 formatter
->flush(cout
);
7137 if (opt_cmd
== OPT::BI_GET
) {
7138 if (bucket_name
.empty()) {
7139 cerr
<< "ERROR: bucket name not specified" << std::endl
;
7142 if (object
.empty()) {
7143 cerr
<< "ERROR: object not specified" << std::endl
;
7146 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7148 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7151 rgw_obj
obj(bucket
->get_key(), object
);
7152 if (!object_version
.empty()) {
7153 obj
.key
.set_instance(object_version
);
7156 rgw_cls_bi_entry entry
;
7158 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->bi_get(dpp(), bucket
->get_info(), obj
, bi_index_type
, &entry
);
7160 cerr
<< "ERROR: bi_get(): " << cpp_strerror(-ret
) << std::endl
;
7164 encode_json("entry", entry
, formatter
.get());
7165 formatter
->flush(cout
);
7168 if (opt_cmd
== OPT::BI_PUT
) {
7169 if (bucket_name
.empty()) {
7170 cerr
<< "ERROR: bucket name not specified" << std::endl
;
7173 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7175 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7179 rgw_cls_bi_entry entry
;
7180 cls_rgw_obj_key key
;
7181 ret
= read_decode_json(infile
, entry
, &key
);
7186 rgw_obj
obj(bucket
->get_key(), key
);
7188 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->bi_put(dpp(), bucket
->get_key(), obj
, entry
);
7190 cerr
<< "ERROR: bi_put(): " << cpp_strerror(-ret
) << std::endl
;
7195 if (opt_cmd
== OPT::BI_LIST
) {
7196 if (bucket_name
.empty()) {
7197 cerr
<< "ERROR: bucket name not specified" << std::endl
;
7200 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7202 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7206 list
<rgw_cls_bi_entry
> entries
;
7208 if (max_entries
< 0) {
7212 int max_shards
= (bucket
->get_info().layout
.current_index
.layout
.normal
.num_shards
> 0 ? bucket
->get_info().layout
.current_index
.layout
.normal
.num_shards
: 1);
7214 formatter
->open_array_section("entries");
7216 int i
= (specified_shard_id
? shard_id
: 0);
7217 for (; i
< max_shards
; i
++) {
7218 RGWRados::BucketShard
bs(static_cast<rgw::sal::RadosStore
*>(store
)->getRados());
7219 int shard_id
= (bucket
->get_info().layout
.current_index
.layout
.normal
.num_shards
> 0 ? i
: -1);
7221 int ret
= bs
.init(bucket
->get_key(), shard_id
, bucket
->get_info().layout
.current_index
, nullptr /* no RGWBucketInfo */, dpp());
7225 cerr
<< "ERROR: bs.init(bucket=" << bucket
<< ", shard=" << shard_id
<< "): " << cpp_strerror(-ret
) << std::endl
;
7231 // if object is specified, we use that as a filter to only retrieve some some entries
7232 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->bi_list(bs
, object
, marker
, max_entries
, &entries
, &is_truncated
);
7234 cerr
<< "ERROR: bi_list(): " << cpp_strerror(-ret
) << std::endl
;
7238 list
<rgw_cls_bi_entry
>::iterator iter
;
7239 for (iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
7240 rgw_cls_bi_entry
& entry
= *iter
;
7241 encode_json("entry", entry
, formatter
.get());
7244 formatter
->flush(cout
);
7245 } while (is_truncated
);
7246 formatter
->flush(cout
);
7248 if (specified_shard_id
)
7251 formatter
->close_section();
7252 formatter
->flush(cout
);
7255 if (opt_cmd
== OPT::BI_PURGE
) {
7256 if (bucket_name
.empty()) {
7257 cerr
<< "ERROR: bucket name not specified" << std::endl
;
7260 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7262 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7266 std::unique_ptr
<rgw::sal::Bucket
> cur_bucket
;
7267 ret
= init_bucket(user
.get(), tenant
, bucket_name
, string(), &cur_bucket
);
7268 if (ret
== -ENOENT
) {
7269 // no bucket entrypoint
7270 } else if (ret
< 0) {
7271 cerr
<< "ERROR: could not init current bucket info for bucket_name=" << bucket_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
7273 } else if (cur_bucket
->get_bucket_id() == bucket
->get_bucket_id() &&
7274 !yes_i_really_mean_it
) {
7275 cerr
<< "specified bucket instance points to a current bucket instance" << std::endl
;
7276 cerr
<< "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl
;
7280 ret
= bucket
->purge_instance(dpp());
7286 if (opt_cmd
== OPT::OBJECT_PUT
) {
7287 if (bucket_name
.empty()) {
7288 cerr
<< "ERROR: bucket not specified" << std::endl
;
7291 if (object
.empty()) {
7292 cerr
<< "ERROR: object not specified" << std::endl
;
7296 RGWDataAccess
data_access(store
);
7297 rgw_obj_key
key(object
, object_version
);
7299 RGWDataAccess::BucketRef b
;
7300 RGWDataAccess::ObjectRef obj
;
7302 int ret
= data_access
.get_bucket(dpp(), tenant
, bucket_name
, bucket_id
, &b
, null_yield
);
7304 cerr
<< "ERROR: failed to init bucket: " << cpp_strerror(-ret
) << std::endl
;
7308 ret
= b
->get_object(key
, &obj
);
7310 cerr
<< "ERROR: failed to get object: " << cpp_strerror(-ret
) << std::endl
;
7315 ret
= read_input(infile
, bl
);
7317 cerr
<< "ERROR: failed to read input: " << cpp_strerror(-ret
) << std::endl
;
7320 map
<string
, bufferlist
> attrs
;
7321 ret
= obj
->put(bl
, attrs
, dpp(), null_yield
);
7323 cerr
<< "ERROR: put object returned error: " << cpp_strerror(-ret
) << std::endl
;
7327 if (opt_cmd
== OPT::OBJECT_RM
) {
7328 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7330 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7333 rgw_obj_key
key(object
, object_version
);
7334 ret
= rgw_remove_object(dpp(), store
, bucket
.get(), key
);
7337 cerr
<< "ERROR: object remove returned: " << cpp_strerror(-ret
) << std::endl
;
7342 if (opt_cmd
== OPT::OBJECT_REWRITE
) {
7343 if (bucket_name
.empty()) {
7344 cerr
<< "ERROR: bucket not specified" << std::endl
;
7347 if (object
.empty()) {
7348 cerr
<< "ERROR: object not specified" << std::endl
;
7352 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7354 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7358 std::unique_ptr
<rgw::sal::Object
> obj
= bucket
->get_object(object
);
7359 obj
->set_instance(object_version
);
7360 bool need_rewrite
= true;
7361 if (min_rewrite_stripe_size
> 0) {
7362 ret
= check_min_obj_stripe_size(store
, obj
.get(), min_rewrite_stripe_size
, &need_rewrite
);
7364 ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret
<< dendl
;
7368 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->rewrite_obj(obj
.get(), dpp(), null_yield
);
7370 cerr
<< "ERROR: object rewrite returned: " << cpp_strerror(-ret
) << std::endl
;
7374 ldpp_dout(dpp(), 20) << "skipped object" << dendl
;
7378 if (opt_cmd
== OPT::OBJECTS_EXPIRE
) {
7379 if (!static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->process_expire_objects(dpp())) {
7380 cerr
<< "ERROR: process_expire_objects() processing returned error." << std::endl
;
7385 if (opt_cmd
== OPT::OBJECTS_EXPIRE_STALE_LIST
) {
7386 ret
= RGWBucketAdminOp::fix_obj_expiry(store
, bucket_op
, stream_flusher
, dpp(), true);
7388 cerr
<< "ERROR: listing returned " << cpp_strerror(-ret
) << std::endl
;
7393 if (opt_cmd
== OPT::OBJECTS_EXPIRE_STALE_RM
) {
7394 ret
= RGWBucketAdminOp::fix_obj_expiry(store
, bucket_op
, stream_flusher
, dpp(), false);
7396 cerr
<< "ERROR: removing returned " << cpp_strerror(-ret
) << std::endl
;
7401 if (opt_cmd
== OPT::BUCKET_REWRITE
) {
7402 if (bucket_name
.empty()) {
7403 cerr
<< "ERROR: bucket not specified" << std::endl
;
7407 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7409 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7413 uint64_t start_epoch
= 0;
7414 uint64_t end_epoch
= 0;
7416 if (!end_date
.empty()) {
7417 int ret
= utime_t::parse_date(end_date
, &end_epoch
, NULL
);
7419 cerr
<< "ERROR: failed to parse end date" << std::endl
;
7423 if (!start_date
.empty()) {
7424 int ret
= utime_t::parse_date(start_date
, &start_epoch
, NULL
);
7426 cerr
<< "ERROR: failed to parse start date" << std::endl
;
7431 bool is_truncated
= true;
7432 bool cls_filtered
= true;
7434 rgw_obj_index_key marker
;
7435 string empty_prefix
;
7436 string empty_delimiter
;
7438 formatter
->open_object_section("result");
7439 formatter
->dump_string("bucket", bucket_name
);
7440 formatter
->open_array_section("objects");
7442 constexpr uint32_t NUM_ENTRIES
= 1000;
7443 uint16_t expansion_factor
= 1;
7444 while (is_truncated
) {
7445 RGWRados::ent_map_t result
;
7446 result
.reserve(NUM_ENTRIES
);
7448 int r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->cls_bucket_list_ordered(
7449 dpp(), bucket
->get_info(), RGW_NO_SHARD
,
7450 marker
, empty_prefix
, empty_delimiter
,
7451 NUM_ENTRIES
, true, expansion_factor
,
7452 result
, &is_truncated
, &cls_filtered
, &marker
,
7454 rgw_bucket_object_check_filter
);
7455 if (r
< 0 && r
!= -ENOENT
) {
7456 cerr
<< "ERROR: failed operation r=" << r
<< std::endl
;
7457 } else if (r
== -ENOENT
) {
7461 if (result
.size() < NUM_ENTRIES
/ 8) {
7463 } else if (result
.size() > NUM_ENTRIES
* 7 / 8 &&
7464 expansion_factor
> 1) {
7468 for (auto iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
7469 rgw_obj_key key
= iter
->second
.key
;
7470 rgw_bucket_dir_entry
& entry
= iter
->second
;
7472 formatter
->open_object_section("object");
7473 formatter
->dump_string("name", key
.name
);
7474 formatter
->dump_string("instance", key
.instance
);
7475 formatter
->dump_int("size", entry
.meta
.size
);
7476 utime_t
ut(entry
.meta
.mtime
);
7477 ut
.gmtime(formatter
->dump_stream("mtime"));
7479 if ((entry
.meta
.size
< min_rewrite_size
) ||
7480 (entry
.meta
.size
> max_rewrite_size
) ||
7481 (start_epoch
> 0 && start_epoch
> (uint64_t)ut
.sec()) ||
7482 (end_epoch
> 0 && end_epoch
< (uint64_t)ut
.sec())) {
7483 formatter
->dump_string("status", "Skipped");
7485 std::unique_ptr
<rgw::sal::Object
> obj
= bucket
->get_object(key
);
7487 bool need_rewrite
= true;
7488 if (min_rewrite_stripe_size
> 0) {
7489 r
= check_min_obj_stripe_size(store
, obj
.get(), min_rewrite_stripe_size
, &need_rewrite
);
7491 ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r
<< dendl
;
7494 if (!need_rewrite
) {
7495 formatter
->dump_string("status", "Skipped");
7497 r
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->rewrite_obj(obj
.get(), dpp(), null_yield
);
7499 formatter
->dump_string("status", "Success");
7501 formatter
->dump_string("status", cpp_strerror(-r
));
7505 formatter
->dump_int("flags", entry
.flags
);
7507 formatter
->close_section();
7508 formatter
->flush(cout
);
7511 formatter
->close_section();
7512 formatter
->close_section();
7513 formatter
->flush(cout
);
7516 if (opt_cmd
== OPT::BUCKET_RESHARD
) {
7517 int ret
= check_reshard_bucket_params(static_cast<rgw::sal::RadosStore
*>(store
),
7521 num_shards_specified
,
7523 yes_i_really_mean_it
,
7529 RGWBucketReshard
br(static_cast<rgw::sal::RadosStore
*>(store
), bucket
->get_info(), bucket
->get_attrs(), nullptr /* no callback */);
7531 #define DEFAULT_RESHARD_MAX_ENTRIES 1000
7532 if (max_entries
< 1) {
7533 max_entries
= DEFAULT_RESHARD_MAX_ENTRIES
;
7536 return br
.execute(num_shards
, max_entries
, dpp(),
7537 verbose
, &cout
, formatter
.get());
7540 if (opt_cmd
== OPT::RESHARD_ADD
) {
7541 int ret
= check_reshard_bucket_params(static_cast<rgw::sal::RadosStore
*>(store
),
7545 num_shards_specified
,
7547 yes_i_really_mean_it
,
7553 int num_source_shards
= (bucket
->get_info().layout
.current_index
.layout
.normal
.num_shards
> 0 ? bucket
->get_info().layout
.current_index
.layout
.normal
.num_shards
: 1);
7555 RGWReshard
reshard(static_cast<rgw::sal::RadosStore
*>(store
), dpp());
7556 cls_rgw_reshard_entry entry
;
7557 entry
.time
= real_clock::now();
7558 entry
.tenant
= tenant
;
7559 entry
.bucket_name
= bucket_name
;
7560 entry
.bucket_id
= bucket
->get_info().bucket
.bucket_id
;
7561 entry
.old_num_shards
= num_source_shards
;
7562 entry
.new_num_shards
= num_shards
;
7564 return reshard
.add(dpp(), entry
);
7567 if (opt_cmd
== OPT::RESHARD_LIST
) {
7570 if (max_entries
< 0) {
7575 store
->ctx()->_conf
.get_val
<uint64_t>("rgw_reshard_num_logs");
7577 RGWReshard
reshard(static_cast<rgw::sal::RadosStore
*>(store
), dpp());
7579 formatter
->open_array_section("reshard");
7580 for (int i
= 0; i
< num_logshards
; i
++) {
7581 bool is_truncated
= true;
7584 std::list
<cls_rgw_reshard_entry
> entries
;
7585 ret
= reshard
.list(dpp(), i
, marker
, max_entries
- count
, entries
, &is_truncated
);
7587 cerr
<< "Error listing resharding buckets: " << cpp_strerror(-ret
) << std::endl
;
7590 for (const auto& entry
: entries
) {
7591 encode_json("entry", entry
, formatter
.get());
7594 entries
.crbegin()->get_key(&marker
); // last entry's key becomes marker
7596 count
+= entries
.size();
7597 formatter
->flush(cout
);
7598 } while (is_truncated
&& count
< max_entries
);
7600 if (count
>= max_entries
) {
7605 formatter
->close_section();
7606 formatter
->flush(cout
);
7611 if (opt_cmd
== OPT::RESHARD_STATUS
) {
7612 if (bucket_name
.empty()) {
7613 cerr
<< "ERROR: bucket not specified" << std::endl
;
7617 ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7619 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7623 RGWBucketReshard
br(static_cast<rgw::sal::RadosStore
*>(store
), bucket
->get_info(), bucket
->get_attrs(), nullptr /* no callback */);
7624 list
<cls_rgw_bucket_instance_entry
> status
;
7625 int r
= br
.get_status(dpp(), &status
);
7627 cerr
<< "ERROR: could not get resharding status for bucket " <<
7628 bucket_name
<< std::endl
;
7632 show_reshard_status(status
, formatter
.get());
7635 if (opt_cmd
== OPT::RESHARD_PROCESS
) {
7636 RGWReshard
reshard(static_cast<rgw::sal::RadosStore
*>(store
), true, &cout
);
7638 int ret
= reshard
.process_all_logshards(dpp());
7640 cerr
<< "ERROR: failed to process reshard logs, error=" << cpp_strerror(-ret
) << std::endl
;
7645 if (opt_cmd
== OPT::RESHARD_CANCEL
) {
7646 if (bucket_name
.empty()) {
7647 cerr
<< "ERROR: bucket not specified" << std::endl
;
7651 bool bucket_initable
= true;
7652 ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7654 if (yes_i_really_mean_it
) {
7655 bucket_initable
= false;
7657 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) <<
7658 "; if you want to cancel the reshard request nonetheless, please "
7659 "use the --yes-i-really-mean-it option" << std::endl
;
7664 if (bucket_initable
) {
7665 // we did not encounter an error, so let's work with the bucket
7666 RGWBucketReshard
br(static_cast<rgw::sal::RadosStore
*>(store
), bucket
->get_info(), bucket
->get_attrs(),
7667 nullptr /* no callback */);
7668 int ret
= br
.cancel(dpp());
7670 if (ret
== -EBUSY
) {
7671 cerr
<< "There is ongoing resharding, please retry after " <<
7672 store
->ctx()->_conf
.get_val
<uint64_t>(
7673 "rgw_reshard_bucket_lock_duration") <<
7674 " seconds " << std::endl
;
7676 cerr
<< "Error canceling bucket " << bucket_name
<<
7677 " resharding: " << cpp_strerror(-ret
) << std::endl
;
7683 RGWReshard
reshard(static_cast<rgw::sal::RadosStore
*>(store
), dpp());
7685 cls_rgw_reshard_entry entry
;
7686 entry
.tenant
= tenant
;
7687 entry
.bucket_name
= bucket_name
;
7688 //entry.bucket_id = bucket_id;
7690 ret
= reshard
.remove(dpp(), entry
);
7691 if (ret
< 0 && ret
!= -ENOENT
) {
7692 cerr
<< "Error in updating reshard log with bucket " <<
7693 bucket_name
<< ": " << cpp_strerror(-ret
) << std::endl
;
7696 } // OPT_RESHARD_CANCEL
7698 if (opt_cmd
== OPT::OBJECT_UNLINK
) {
7699 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7701 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7704 list
<rgw_obj_index_key
> oid_list
;
7705 rgw_obj_key
key(object
, object_version
);
7706 rgw_obj_index_key index_key
;
7707 key
.get_index_key(&index_key
);
7708 oid_list
.push_back(index_key
);
7709 ret
= bucket
->remove_objs_from_index(dpp(), oid_list
);
7711 cerr
<< "ERROR: remove_obj_from_index() returned error: " << cpp_strerror(-ret
) << std::endl
;
7716 if (opt_cmd
== OPT::OBJECT_STAT
) {
7717 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7719 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7722 std::unique_ptr
<rgw::sal::Object
> obj
= bucket
->get_object(object
);
7723 obj
->set_instance(object_version
);
7725 RGWObjectCtx
obj_ctx(store
);
7727 ret
= obj
->get_obj_attrs(&obj_ctx
, null_yield
, dpp());
7729 cerr
<< "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret
) << std::endl
;
7732 formatter
->open_object_section("object_metadata");
7733 formatter
->dump_string("name", object
);
7734 formatter
->dump_unsigned("size", obj
->get_obj_size());
7736 map
<string
, bufferlist
>::iterator iter
;
7737 map
<string
, bufferlist
> other_attrs
;
7738 for (iter
= obj
->get_attrs().begin(); iter
!= obj
->get_attrs().end(); ++iter
) {
7739 bufferlist
& bl
= iter
->second
;
7740 bool handled
= false;
7741 if (iter
->first
== RGW_ATTR_MANIFEST
) {
7742 handled
= decode_dump
<RGWObjManifest
>("manifest", bl
, formatter
.get());
7743 } else if (iter
->first
== RGW_ATTR_ACL
) {
7744 handled
= decode_dump
<RGWAccessControlPolicy
>("policy", bl
, formatter
.get());
7745 } else if (iter
->first
== RGW_ATTR_ID_TAG
) {
7746 handled
= dump_string("tag", bl
, formatter
.get());
7747 } else if (iter
->first
== RGW_ATTR_ETAG
) {
7748 handled
= dump_string("etag", bl
, formatter
.get());
7749 } else if (iter
->first
== RGW_ATTR_COMPRESSION
) {
7750 handled
= decode_dump
<RGWCompressionInfo
>("compression", bl
, formatter
.get());
7751 } else if (iter
->first
== RGW_ATTR_DELETE_AT
) {
7752 handled
= decode_dump
<utime_t
>("delete_at", bl
, formatter
.get());
7756 other_attrs
[iter
->first
] = bl
;
7759 formatter
->open_object_section("attrs");
7760 for (iter
= other_attrs
.begin(); iter
!= other_attrs
.end(); ++iter
) {
7761 dump_string(iter
->first
.c_str(), iter
->second
, formatter
.get());
7763 formatter
->close_section();
7764 formatter
->close_section();
7765 formatter
->flush(cout
);
7768 if (opt_cmd
== OPT::BUCKET_CHECK
) {
7769 if (check_head_obj_locator
) {
7770 if (bucket_name
.empty()) {
7771 cerr
<< "ERROR: need to specify bucket name" << std::endl
;
7774 do_check_object_locator(tenant
, bucket_name
, fix
, remove_bad
, formatter
.get());
7776 RGWBucketAdminOp::check_index(store
, bucket_op
, stream_flusher
, null_yield
, dpp());
7780 if (opt_cmd
== OPT::BUCKET_RM
) {
7781 if (!inconsistent_index
) {
7782 RGWBucketAdminOp::remove_bucket(store
, bucket_op
, null_yield
, dpp(), bypass_gc
, true);
7784 if (!yes_i_really_mean_it
) {
7785 cerr
<< "using --inconsistent_index can corrupt the bucket index " << std::endl
7786 << "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl
;
7789 RGWBucketAdminOp::remove_bucket(store
, bucket_op
, null_yield
, dpp(), bypass_gc
, false);
7793 if (opt_cmd
== OPT::GC_LIST
) {
7796 bool processing_queue
= false;
7797 formatter
->open_array_section("entries");
7800 list
<cls_rgw_gc_obj_info
> result
;
7801 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->list_gc_objs(&index
, marker
, 1000, !include_all
, result
, &truncated
, processing_queue
);
7803 cerr
<< "ERROR: failed to list objs: " << cpp_strerror(-ret
) << std::endl
;
7808 list
<cls_rgw_gc_obj_info
>::iterator iter
;
7809 for (iter
= result
.begin(); iter
!= result
.end(); ++iter
) {
7810 cls_rgw_gc_obj_info
& info
= *iter
;
7811 formatter
->open_object_section("chain_info");
7812 formatter
->dump_string("tag", info
.tag
);
7813 formatter
->dump_stream("time") << info
.time
;
7814 formatter
->open_array_section("objs");
7815 list
<cls_rgw_obj
>::iterator liter
;
7816 cls_rgw_obj_chain
& chain
= info
.chain
;
7817 for (liter
= chain
.objs
.begin(); liter
!= chain
.objs
.end(); ++liter
) {
7818 cls_rgw_obj
& obj
= *liter
;
7819 encode_json("obj", obj
, formatter
.get());
7821 formatter
->close_section(); // objs
7822 formatter
->close_section(); // obj_chain
7823 formatter
->flush(cout
);
7825 } while (truncated
);
7826 formatter
->close_section();
7827 formatter
->flush(cout
);
7830 if (opt_cmd
== OPT::GC_PROCESS
) {
7831 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->process_gc(!include_all
);
7833 cerr
<< "ERROR: gc processing returned error: " << cpp_strerror(-ret
) << std::endl
;
7838 if (opt_cmd
== OPT::LC_LIST
) {
7839 formatter
->open_array_section("lifecycle_list");
7840 vector
<rgw::sal::Lifecycle::LCEntry
> bucket_lc_map
;
7843 #define MAX_LC_LIST_ENTRIES 100
7844 if (max_entries
< 0) {
7845 max_entries
= MAX_LC_LIST_ENTRIES
;
7848 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->list_lc_progress(marker
, max_entries
,
7849 bucket_lc_map
, index
);
7851 cerr
<< "ERROR: failed to list objs: " << cpp_strerror(-ret
)
7855 for (const auto& entry
: bucket_lc_map
) {
7856 formatter
->open_object_section("bucket_lc_info");
7857 formatter
->dump_string("bucket", entry
.bucket
);
7859 time_t t
{time_t(entry
.start_time
)};
7861 exp_buf
, sizeof(exp_buf
),
7862 "%a, %d %b %Y %T %Z", std::gmtime(&t
))) {
7863 formatter
->dump_string("started", exp_buf
);
7865 string lc_status
= LC_STATUS
[entry
.status
];
7866 formatter
->dump_string("status", lc_status
);
7867 formatter
->close_section(); // objs
7868 formatter
->flush(cout
);
7870 } while (!bucket_lc_map
.empty());
7872 formatter
->close_section(); //lifecycle list
7873 formatter
->flush(cout
);
7877 if (opt_cmd
== OPT::LC_GET
) {
7878 if (bucket_name
.empty()) {
7879 cerr
<< "ERROR: bucket not specified" << std::endl
;
7883 RGWLifecycleConfiguration config
;
7884 ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
7886 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
7890 auto aiter
= bucket
->get_attrs().find(RGW_ATTR_LC
);
7891 if (aiter
== bucket
->get_attrs().end()) {
7895 bufferlist::const_iterator iter
{&aiter
->second
};
7897 config
.decode(iter
);
7898 } catch (const buffer::error
& e
) {
7899 cerr
<< "ERROR: decode life cycle config failed" << std::endl
;
7903 encode_json("result", config
, formatter
.get());
7904 formatter
->flush(cout
);
7907 if (opt_cmd
== OPT::LC_PROCESS
) {
7908 if ((! bucket_name
.empty()) ||
7909 (! bucket_id
.empty())) {
7910 int ret
= init_bucket(nullptr, tenant
, bucket_name
, bucket_id
, &bucket
);
7912 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
)
7919 static_cast<rgw::sal::RadosStore
*>(store
)->getRados()->process_lc(bucket
);
7921 cerr
<< "ERROR: lc processing returned error: " << cpp_strerror(-ret
) << std::endl
;
7926 if (opt_cmd
== OPT::LC_RESHARD_FIX
) {
7927 ret
= RGWBucketAdminOp::fix_lc_shards(store
, bucket_op
, stream_flusher
, dpp());
7929 cerr
<< "ERROR: fixing lc shards: " << cpp_strerror(-ret
) << std::endl
;
7934 if (opt_cmd
== OPT::ORPHANS_FIND
) {
7935 if (!yes_i_really_mean_it
) {
7936 cerr
<< "this command is now deprecated; please consider using the rgw-orphan-list tool; "
7937 << "accidental removal of active objects cannot be reversed; "
7938 << "do you really mean it? (requires --yes-i-really-mean-it)"
7942 cerr
<< "IMPORTANT: this command is now deprecated; please consider using the rgw-orphan-list tool"
7946 RGWOrphanSearch
search(static_cast<rgw::sal::RadosStore
*>(store
), max_concurrent_ios
, orphan_stale_secs
);
7948 if (job_id
.empty()) {
7949 cerr
<< "ERROR: --job-id not specified" << std::endl
;
7952 if (pool_name
.empty()) {
7953 cerr
<< "ERROR: --pool not specified" << std::endl
;
7957 RGWOrphanSearchInfo info
;
7960 info
.job_name
= job_id
;
7961 info
.num_shards
= num_shards
;
7963 int ret
= search
.init(dpp(), job_id
, &info
, detail
);
7965 cerr
<< "could not init search, ret=" << ret
<< std::endl
;
7968 ret
= search
.run(dpp());
7974 if (opt_cmd
== OPT::ORPHANS_FINISH
) {
7975 if (!yes_i_really_mean_it
) {
7976 cerr
<< "this command is now deprecated; please consider using the rgw-orphan-list tool; "
7977 << "accidental removal of active objects cannot be reversed; "
7978 << "do you really mean it? (requires --yes-i-really-mean-it)"
7982 cerr
<< "IMPORTANT: this command is now deprecated; please consider using the rgw-orphan-list tool"
7986 RGWOrphanSearch
search(static_cast<rgw::sal::RadosStore
*>(store
), max_concurrent_ios
, orphan_stale_secs
);
7988 if (job_id
.empty()) {
7989 cerr
<< "ERROR: --job-id not specified" << std::endl
;
7992 int ret
= search
.init(dpp(), job_id
, NULL
);
7994 if (ret
== -ENOENT
) {
7995 cerr
<< "job not found" << std::endl
;
7999 ret
= search
.finish();
8005 if (opt_cmd
== OPT::ORPHANS_LIST_JOBS
){
8006 if (!yes_i_really_mean_it
) {
8007 cerr
<< "this command is now deprecated; please consider using the rgw-orphan-list tool; "
8008 << "do you really mean it? (requires --yes-i-really-mean-it)"
8012 cerr
<< "IMPORTANT: this command is now deprecated; please consider using the rgw-orphan-list tool"
8016 RGWOrphanStore
orphan_store(static_cast<rgw::sal::RadosStore
*>(store
));
8017 int ret
= orphan_store
.init(dpp());
8019 cerr
<< "connection to cluster failed!" << std::endl
;
8023 map
<string
,RGWOrphanSearchState
> m
;
8024 ret
= orphan_store
.list_jobs(m
);
8026 cerr
<< "job list failed" << std::endl
;
8029 formatter
->open_array_section("entries");
8030 for (const auto &it
: m
){
8032 formatter
->dump_string("job-id",it
.first
);
8034 encode_json("orphan_search_state", it
.second
, formatter
.get());
8037 formatter
->close_section();
8038 formatter
->flush(cout
);
8041 if (opt_cmd
== OPT::USER_CHECK
) {
8042 check_bad_user_bucket_mapping(store
, user
.get(), fix
, null_yield
, dpp());
8045 if (opt_cmd
== OPT::USER_STATS
) {
8046 if (rgw::sal::User::empty(user
)) {
8047 cerr
<< "ERROR: uid not specified" << std::endl
;
8051 if (!bucket_name
.empty()) {
8052 cerr
<< "ERROR: --reset-stats does not work on buckets and "
8053 "bucket specified" << std::endl
;
8057 cerr
<< "ERROR: sync-stats includes the reset-stats functionality, "
8058 "so at most one of the two should be specified" << std::endl
;
8061 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->user
->reset_stats(dpp(), user
->get_id(), null_yield
);
8063 cerr
<< "ERROR: could not reset user stats: " << cpp_strerror(-ret
) <<
8070 if (!bucket_name
.empty()) {
8071 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8073 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
8076 ret
= bucket
->sync_user_stats(dpp(), null_yield
);
8078 cerr
<< "ERROR: could not sync bucket stats: " <<
8079 cpp_strerror(-ret
) << std::endl
;
8083 int ret
= rgw_user_sync_all_stats(dpp(), store
, user
.get(), null_yield
);
8085 cerr
<< "ERROR: could not sync user stats: " <<
8086 cpp_strerror(-ret
) << std::endl
;
8092 constexpr bool omit_utilized_stats
= false;
8093 RGWStorageStats
stats(omit_utilized_stats
);
8094 ceph::real_time last_stats_sync
;
8095 ceph::real_time last_stats_update
;
8096 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->user
->read_stats(dpp(), user
->get_id(), &stats
, null_yield
,
8098 &last_stats_update
);
8100 if (ret
== -ENOENT
) { /* in case of ENOENT */
8101 cerr
<< "User has not been initialized or user does not exist" << std::endl
;
8103 cerr
<< "ERROR: can't read user: " << cpp_strerror(ret
) << std::endl
;
8110 Formatter::ObjectSection
os(*formatter
, "result");
8111 encode_json("stats", stats
, formatter
.get());
8112 utime_t
last_sync_ut(last_stats_sync
);
8113 encode_json("last_stats_sync", last_sync_ut
, formatter
.get());
8114 utime_t
last_update_ut(last_stats_update
);
8115 encode_json("last_stats_update", last_update_ut
, formatter
.get());
8117 formatter
->flush(cout
);
8120 if (opt_cmd
== OPT::METADATA_GET
) {
8121 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->get(metadata_key
, formatter
.get(), null_yield
, dpp());
8123 cerr
<< "ERROR: can't get key: " << cpp_strerror(-ret
) << std::endl
;
8127 formatter
->flush(cout
);
8130 if (opt_cmd
== OPT::METADATA_PUT
) {
8132 int ret
= read_input(infile
, bl
);
8134 cerr
<< "ERROR: failed to read input: " << cpp_strerror(-ret
) << std::endl
;
8137 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->put(metadata_key
, bl
, null_yield
, dpp(), RGWMDLogSyncType::APPLY_ALWAYS
, false);
8139 cerr
<< "ERROR: can't put key: " << cpp_strerror(-ret
) << std::endl
;
8144 if (opt_cmd
== OPT::METADATA_RM
) {
8145 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->remove(metadata_key
, null_yield
, dpp());
8147 cerr
<< "ERROR: can't remove key: " << cpp_strerror(-ret
) << std::endl
;
8152 if (opt_cmd
== OPT::METADATA_LIST
|| opt_cmd
== OPT::USER_LIST
) {
8153 if (opt_cmd
== OPT::USER_LIST
) {
8154 metadata_key
= "user";
8158 int ret
= store
->meta_list_keys_init(dpp(), metadata_key
, marker
, &handle
);
8160 cerr
<< "ERROR: can't get key: " << cpp_strerror(-ret
) << std::endl
;
8167 if (max_entries_specified
) {
8168 formatter
->open_object_section("result");
8170 formatter
->open_array_section("keys");
8175 left
= (max_entries_specified
? max_entries
- count
: max
);
8176 ret
= store
->meta_list_keys_next(dpp(), handle
, left
, keys
, &truncated
);
8177 if (ret
< 0 && ret
!= -ENOENT
) {
8178 cerr
<< "ERROR: lists_keys_next(): " << cpp_strerror(-ret
) << std::endl
;
8180 } if (ret
!= -ENOENT
) {
8181 for (list
<string
>::iterator iter
= keys
.begin(); iter
!= keys
.end(); ++iter
) {
8182 formatter
->dump_string("key", *iter
);
8185 formatter
->flush(cout
);
8187 } while (truncated
&& left
> 0);
8189 formatter
->close_section();
8191 if (max_entries_specified
) {
8192 encode_json("truncated", truncated
, formatter
.get());
8193 encode_json("count", count
, formatter
.get());
8195 encode_json("marker", store
->meta_get_marker(handle
), formatter
.get());
8197 formatter
->close_section();
8199 formatter
->flush(cout
);
8201 store
->meta_list_keys_complete(handle
);
8204 if (opt_cmd
== OPT::MDLOG_LIST
) {
8205 if (!start_date
.empty()) {
8206 std::cerr
<< "start-date not allowed." << std::endl
;
8209 if (!end_date
.empty()) {
8210 std::cerr
<< "end-date not allowed." << std::endl
;
8213 if (!end_marker
.empty()) {
8214 std::cerr
<< "end-marker not allowed." << std::endl
;
8217 if (!start_marker
.empty()) {
8218 if (marker
.empty()) {
8219 marker
= start_marker
;
8221 std::cerr
<< "start-marker and marker not both allowed." << std::endl
;
8226 int i
= (specified_shard_id
? shard_id
: 0);
8228 if (period_id
.empty()) {
8229 int ret
= read_current_period_id(static_cast<rgw::sal::RadosStore
*>(store
), realm_id
, realm_name
, &period_id
);
8233 std::cerr
<< "No --period given, using current period="
8234 << period_id
<< std::endl
;
8236 RGWMetadataLog
*meta_log
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->mdlog
->get_log(period_id
);
8238 formatter
->open_array_section("entries");
8239 for (; i
< g_ceph_context
->_conf
->rgw_md_log_max_shards
; i
++) {
8241 list
<cls_log_entry
> entries
;
8243 meta_log
->init_list_entries(i
, {}, {}, marker
, &handle
);
8246 int ret
= meta_log
->list_entries(dpp(), handle
, 1000, entries
, NULL
, &truncated
);
8248 cerr
<< "ERROR: meta_log->list_entries(): " << cpp_strerror(-ret
) << std::endl
;
8252 for (list
<cls_log_entry
>::iterator iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
8253 cls_log_entry
& entry
= *iter
;
8254 static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->dump_log_entry(entry
, formatter
.get());
8256 formatter
->flush(cout
);
8257 } while (truncated
);
8259 meta_log
->complete_list_entries(handle
);
8261 if (specified_shard_id
)
8266 formatter
->close_section();
8267 formatter
->flush(cout
);
8270 if (opt_cmd
== OPT::MDLOG_STATUS
) {
8271 int i
= (specified_shard_id
? shard_id
: 0);
8273 if (period_id
.empty()) {
8274 int ret
= read_current_period_id(static_cast<rgw::sal::RadosStore
*>(store
), realm_id
, realm_name
, &period_id
);
8278 std::cerr
<< "No --period given, using current period="
8279 << period_id
<< std::endl
;
8281 RGWMetadataLog
*meta_log
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->mdlog
->get_log(period_id
);
8283 formatter
->open_array_section("entries");
8285 for (; i
< g_ceph_context
->_conf
->rgw_md_log_max_shards
; i
++) {
8286 RGWMetadataLogInfo info
;
8287 meta_log
->get_info(dpp(), i
, &info
);
8289 ::encode_json("info", info
, formatter
.get());
8291 if (specified_shard_id
)
8296 formatter
->close_section();
8297 formatter
->flush(cout
);
8300 if (opt_cmd
== OPT::MDLOG_AUTOTRIM
) {
8301 // need a full history for purging old mdlog periods
8302 static_cast<rgw::sal::RadosStore
*>(store
)->svc()->mdlog
->init_oldest_log_period(null_yield
, dpp());
8304 RGWCoroutinesManager
crs(store
->ctx(), store
->get_cr_registry());
8305 RGWHTTPManager
http(store
->ctx(), crs
.get_completion_mgr());
8306 int ret
= http
.start();
8308 cerr
<< "failed to initialize http client with " << cpp_strerror(ret
) << std::endl
;
8312 auto num_shards
= g_conf()->rgw_md_log_max_shards
;
8313 auto mltcr
= create_admin_meta_log_trim_cr(
8314 dpp(), static_cast<rgw::sal::RadosStore
*>(store
), &http
, num_shards
);
8316 cerr
<< "Cluster misconfigured! Unable to trim." << std::endl
;
8319 ret
= crs
.run(dpp(), mltcr
);
8321 cerr
<< "automated mdlog trim failed with " << cpp_strerror(ret
) << std::endl
;
8326 if (opt_cmd
== OPT::MDLOG_TRIM
) {
8327 if (!start_date
.empty()) {
8328 std::cerr
<< "start-date not allowed." << std::endl
;
8331 if (!end_date
.empty()) {
8332 std::cerr
<< "end-date not allowed." << std::endl
;
8335 if (!start_marker
.empty()) {
8336 std::cerr
<< "start-marker not allowed." << std::endl
;
8339 if (!end_marker
.empty()) {
8340 if (marker
.empty()) {
8341 marker
= end_marker
;
8343 std::cerr
<< "end-marker and marker not both allowed." << std::endl
;
8348 if (!specified_shard_id
) {
8349 cerr
<< "ERROR: shard-id must be specified for trim operation" << std::endl
;
8353 if (marker
.empty()) {
8354 cerr
<< "ERROR: marker must be specified for trim operation" << std::endl
;
8358 if (period_id
.empty()) {
8359 std::cerr
<< "missing --period argument" << std::endl
;
8362 RGWMetadataLog
*meta_log
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->mdlog
->get_log(period_id
);
8364 // trim until -ENODATA
8366 ret
= meta_log
->trim(dpp(), shard_id
, {}, {}, {}, marker
);
8368 if (ret
< 0 && ret
!= -ENODATA
) {
8369 cerr
<< "ERROR: meta_log->trim(): " << cpp_strerror(-ret
) << std::endl
;
8374 if (opt_cmd
== OPT::SYNC_INFO
) {
8375 sync_info(opt_effective_zone_id
, opt_bucket
, zone_formatter
.get());
8378 if (opt_cmd
== OPT::SYNC_STATUS
) {
8379 sync_status(formatter
.get());
8382 if (opt_cmd
== OPT::METADATA_SYNC_STATUS
) {
8383 RGWMetaSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor());
8385 int ret
= sync
.init(dpp());
8387 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8391 rgw_meta_sync_status sync_status
;
8392 ret
= sync
.read_sync_status(dpp(), &sync_status
);
8394 cerr
<< "ERROR: sync.read_sync_status() returned ret=" << ret
<< std::endl
;
8398 formatter
->open_object_section("summary");
8399 encode_json("sync_status", sync_status
, formatter
.get());
8401 uint64_t full_total
= 0;
8402 uint64_t full_complete
= 0;
8404 for (auto marker_iter
: sync_status
.sync_markers
) {
8405 full_total
+= marker_iter
.second
.total_entries
;
8406 if (marker_iter
.second
.state
== rgw_meta_sync_marker::SyncState::FullSync
) {
8407 full_complete
+= marker_iter
.second
.pos
;
8409 full_complete
+= marker_iter
.second
.total_entries
;
8413 formatter
->open_object_section("full_sync");
8414 encode_json("total", full_total
, formatter
.get());
8415 encode_json("complete", full_complete
, formatter
.get());
8416 formatter
->close_section();
8417 formatter
->close_section();
8419 formatter
->flush(cout
);
8423 if (opt_cmd
== OPT::METADATA_SYNC_INIT
) {
8424 RGWMetaSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor());
8426 int ret
= sync
.init(dpp());
8428 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8431 ret
= sync
.init_sync_status(dpp());
8433 cerr
<< "ERROR: sync.init_sync_status() returned ret=" << ret
<< std::endl
;
8439 if (opt_cmd
== OPT::METADATA_SYNC_RUN
) {
8440 RGWMetaSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor());
8442 int ret
= sync
.init(dpp());
8444 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8448 ret
= sync
.run(dpp(), null_yield
);
8450 cerr
<< "ERROR: sync.run() returned ret=" << ret
<< std::endl
;
8455 if (opt_cmd
== OPT::DATA_SYNC_STATUS
) {
8456 if (source_zone
.empty()) {
8457 cerr
<< "ERROR: source zone not specified" << std::endl
;
8460 RGWDataSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor(), source_zone
, nullptr);
8462 int ret
= sync
.init(dpp());
8464 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8468 rgw_data_sync_status sync_status
;
8469 if (specified_shard_id
) {
8470 set
<string
> pending_buckets
;
8471 set
<string
> recovering_buckets
;
8472 rgw_data_sync_marker sync_marker
;
8473 ret
= sync
.read_shard_status(dpp(), shard_id
, pending_buckets
, recovering_buckets
, &sync_marker
,
8474 max_entries_specified
? max_entries
: 20);
8475 if (ret
< 0 && ret
!= -ENOENT
) {
8476 cerr
<< "ERROR: sync.read_shard_status() returned ret=" << ret
<< std::endl
;
8479 formatter
->open_object_section("summary");
8480 encode_json("shard_id", shard_id
, formatter
.get());
8481 encode_json("marker", sync_marker
, formatter
.get());
8482 encode_json("pending_buckets", pending_buckets
, formatter
.get());
8483 encode_json("recovering_buckets", recovering_buckets
, formatter
.get());
8484 formatter
->close_section();
8485 formatter
->flush(cout
);
8487 ret
= sync
.read_sync_status(dpp(), &sync_status
);
8488 if (ret
< 0 && ret
!= -ENOENT
) {
8489 cerr
<< "ERROR: sync.read_sync_status() returned ret=" << ret
<< std::endl
;
8493 formatter
->open_object_section("summary");
8494 encode_json("sync_status", sync_status
, formatter
.get());
8496 uint64_t full_total
= 0;
8497 uint64_t full_complete
= 0;
8499 for (auto marker_iter
: sync_status
.sync_markers
) {
8500 full_total
+= marker_iter
.second
.total_entries
;
8501 if (marker_iter
.second
.state
== rgw_meta_sync_marker::SyncState::FullSync
) {
8502 full_complete
+= marker_iter
.second
.pos
;
8504 full_complete
+= marker_iter
.second
.total_entries
;
8508 formatter
->open_object_section("full_sync");
8509 encode_json("total", full_total
, formatter
.get());
8510 encode_json("complete", full_complete
, formatter
.get());
8511 formatter
->close_section();
8512 formatter
->close_section();
8514 formatter
->flush(cout
);
8518 if (opt_cmd
== OPT::DATA_SYNC_INIT
) {
8519 if (source_zone
.empty()) {
8520 cerr
<< "ERROR: source zone not specified" << std::endl
;
8524 RGWDataSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor(), source_zone
, nullptr);
8526 int ret
= sync
.init(dpp());
8528 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8532 ret
= sync
.init_sync_status(dpp());
8534 cerr
<< "ERROR: sync.init_sync_status() returned ret=" << ret
<< std::endl
;
8539 if (opt_cmd
== OPT::DATA_SYNC_RUN
) {
8540 if (source_zone
.empty()) {
8541 cerr
<< "ERROR: source zone not specified" << std::endl
;
8545 RGWSyncModuleInstanceRef sync_module
;
8546 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->sync_modules
->get_manager()->create_instance(dpp(), g_ceph_context
, static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->get_zone().tier_type
,
8547 store
->get_zone()->get_params().tier_config
, &sync_module
);
8549 ldpp_dout(dpp(), -1) << "ERROR: failed to init sync module instance, ret=" << ret
<< dendl
;
8553 RGWDataSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), static_cast<rgw::sal::RadosStore
*>(store
)->svc()->rados
->get_async_processor(), source_zone
, nullptr, sync_module
);
8555 ret
= sync
.init(dpp());
8557 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8561 ret
= sync
.run(dpp());
8563 cerr
<< "ERROR: sync.run() returned ret=" << ret
<< std::endl
;
8568 if (opt_cmd
== OPT::BUCKET_SYNC_INIT
) {
8569 if (source_zone
.empty()) {
8570 cerr
<< "ERROR: source zone not specified" << std::endl
;
8573 if (bucket_name
.empty()) {
8574 cerr
<< "ERROR: bucket not specified" << std::endl
;
8577 int ret
= init_bucket_for_sync(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8581 auto opt_sb
= opt_source_bucket
;
8582 if (opt_sb
&& opt_sb
->bucket_id
.empty()) {
8584 std::unique_ptr
<rgw::sal::Bucket
> sbuck
;
8585 int ret
= init_bucket_for_sync(user
.get(), opt_sb
->tenant
, opt_sb
->name
, sbid
, &sbuck
);
8589 opt_sb
= sbuck
->get_key();
8592 RGWBucketPipeSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), source_zone
, opt_sb
, bucket
->get_key());
8594 ret
= sync
.init(dpp());
8596 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8599 ret
= sync
.init_sync_status(dpp());
8601 cerr
<< "ERROR: sync.init_sync_status() returned ret=" << ret
<< std::endl
;
8606 if (opt_cmd
== OPT::BUCKET_SYNC_CHECKPOINT
) {
8607 std::optional
<rgw_zone_id
> opt_source_zone
;
8608 if (!source_zone
.empty()) {
8609 opt_source_zone
= source_zone
;
8611 if (bucket_name
.empty()) {
8612 cerr
<< "ERROR: bucket not specified" << std::endl
;
8615 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8620 if (!static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->bucket
->bucket_imports_data(bucket
->get_key(), null_yield
, dpp())) {
8621 std::cout
<< "Sync is disabled for bucket " << bucket_name
<< std::endl
;
8625 RGWBucketSyncPolicyHandlerRef handler
;
8626 ret
= store
->get_sync_policy_handler(dpp(), std::nullopt
, bucket
->get_key(), &handler
, null_yield
);
8628 std::cerr
<< "ERROR: failed to get policy handler for bucket ("
8629 << bucket
<< "): r=" << ret
<< ": " << cpp_strerror(-ret
) << std::endl
;
8633 auto timeout_at
= ceph::coarse_mono_clock::now() + opt_timeout_sec
;
8634 ret
= rgw_bucket_sync_checkpoint(dpp(), static_cast<rgw::sal::RadosStore
*>(store
), *handler
, bucket
->get_info(),
8635 opt_source_zone
, opt_source_bucket
,
8636 opt_retry_delay_ms
, timeout_at
);
8638 ldpp_dout(dpp(), -1) << "bucket sync checkpoint failed: " << cpp_strerror(ret
) << dendl
;
8643 if ((opt_cmd
== OPT::BUCKET_SYNC_DISABLE
) || (opt_cmd
== OPT::BUCKET_SYNC_ENABLE
)) {
8644 if (bucket_name
.empty()) {
8645 cerr
<< "ERROR: bucket not specified" << std::endl
;
8648 if (opt_cmd
== OPT::BUCKET_SYNC_DISABLE
) {
8649 bucket_op
.set_sync_bucket(false);
8651 bucket_op
.set_sync_bucket(true);
8653 bucket_op
.set_tenant(tenant
);
8655 ret
= RGWBucketAdminOp::sync_bucket(store
, bucket_op
, dpp(), &err_msg
);
8657 cerr
<< err_msg
<< std::endl
;
8662 if (opt_cmd
== OPT::BUCKET_SYNC_INFO
) {
8663 if (bucket_name
.empty()) {
8664 cerr
<< "ERROR: bucket not specified" << std::endl
;
8667 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8671 bucket_sync_info(static_cast<rgw::sal::RadosStore
*>(store
), bucket
->get_info(), std::cout
);
8674 if (opt_cmd
== OPT::BUCKET_SYNC_STATUS
) {
8675 if (bucket_name
.empty()) {
8676 cerr
<< "ERROR: bucket not specified" << std::endl
;
8679 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8683 bucket_sync_status(static_cast<rgw::sal::RadosStore
*>(store
), bucket
->get_info(), source_zone
, opt_source_bucket
, std::cout
);
8686 if (opt_cmd
== OPT::BUCKET_SYNC_MARKERS
) {
8687 if (source_zone
.empty()) {
8688 cerr
<< "ERROR: source zone not specified" << std::endl
;
8691 if (bucket_name
.empty()) {
8692 cerr
<< "ERROR: bucket not specified" << std::endl
;
8695 int ret
= init_bucket_for_sync(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8699 RGWBucketPipeSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), source_zone
, opt_source_bucket
, bucket
->get_key());
8701 ret
= sync
.init(dpp());
8703 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8706 ret
= sync
.read_sync_status(dpp());
8708 cerr
<< "ERROR: sync.read_sync_status() returned ret=" << ret
<< std::endl
;
8712 map
<int, rgw_bucket_shard_sync_info
>& sync_status
= sync
.get_sync_status();
8714 encode_json("sync_status", sync_status
, formatter
.get());
8715 formatter
->flush(cout
);
8718 if (opt_cmd
== OPT::BUCKET_SYNC_RUN
) {
8719 if (source_zone
.empty()) {
8720 cerr
<< "ERROR: source zone not specified" << std::endl
;
8723 if (bucket_name
.empty()) {
8724 cerr
<< "ERROR: bucket not specified" << std::endl
;
8727 int ret
= init_bucket_for_sync(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8731 RGWBucketPipeSyncStatusManager
sync(static_cast<rgw::sal::RadosStore
*>(store
), source_zone
, opt_source_bucket
, bucket
->get_key());
8733 ret
= sync
.init(dpp());
8735 cerr
<< "ERROR: sync.init() returned ret=" << ret
<< std::endl
;
8739 ret
= sync
.run(dpp());
8741 cerr
<< "ERROR: sync.run() returned ret=" << ret
<< std::endl
;
8746 if (opt_cmd
== OPT::BILOG_LIST
) {
8747 if (bucket_name
.empty()) {
8748 cerr
<< "ERROR: bucket not specified" << std::endl
;
8751 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
8753 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
8756 formatter
->open_array_section("entries");
8759 if (max_entries
< 0)
8763 list
<rgw_bi_log_entry
> entries
;
8764 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->bilog_rados
->log_list(dpp(), bucket
->get_info(), shard_id
, marker
, max_entries
- count
, entries
, &truncated
);
8766 cerr
<< "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret
) << std::endl
;
8770 count
+= entries
.size();
8772 for (list
<rgw_bi_log_entry
>::iterator iter
= entries
.begin(); iter
!= entries
.end(); ++iter
) {
8773 rgw_bi_log_entry
& entry
= *iter
;
8774 encode_json("entry", entry
, formatter
.get());
8778 formatter
->flush(cout
);
8779 } while (truncated
&& count
< max_entries
);
8781 formatter
->close_section();
8782 formatter
->flush(cout
);
8785 if (opt_cmd
== OPT::SYNC_ERROR_LIST
) {
8786 if (max_entries
< 0) {
8789 if (!start_date
.empty()) {
8790 std::cerr
<< "start-date not allowed." << std::endl
;
8793 if (!end_date
.empty()) {
8794 std::cerr
<< "end-date not allowed." << std::endl
;
8797 if (!end_marker
.empty()) {
8798 std::cerr
<< "end-marker not allowed." << std::endl
;
8801 if (!start_marker
.empty()) {
8802 if (marker
.empty()) {
8803 marker
= start_marker
;
8805 std::cerr
<< "start-marker and marker not both allowed." << std::endl
;
8816 formatter
->open_array_section("entries");
8818 for (; shard_id
< ERROR_LOGGER_SHARDS
; ++shard_id
) {
8819 formatter
->open_object_section("shard");
8820 encode_json("shard_id", shard_id
, formatter
.get());
8821 formatter
->open_array_section("entries");
8824 string oid
= RGWSyncErrorLogger::get_shard_oid(RGW_SYNC_ERROR_LOG_SHARD_PREFIX
, shard_id
);
8827 list
<cls_log_entry
> entries
;
8828 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->timelog
.list(dpp(), oid
, {}, {}, max_entries
- count
, entries
, marker
, &marker
, &truncated
,
8830 if (ret
== -ENOENT
) {
8834 cerr
<< "ERROR: svc.cls->timelog.list(): " << cpp_strerror(-ret
) << std::endl
;
8838 count
+= entries
.size();
8840 for (auto& cls_entry
: entries
) {
8841 rgw_sync_error_info log_entry
;
8843 auto iter
= cls_entry
.data
.cbegin();
8845 decode(log_entry
, iter
);
8846 } catch (buffer::error
& err
) {
8847 cerr
<< "ERROR: failed to decode log entry" << std::endl
;
8850 formatter
->open_object_section("entry");
8851 encode_json("id", cls_entry
.id
, formatter
.get());
8852 encode_json("section", cls_entry
.section
, formatter
.get());
8853 encode_json("name", cls_entry
.name
, formatter
.get());
8854 encode_json("timestamp", cls_entry
.timestamp
, formatter
.get());
8855 encode_json("info", log_entry
, formatter
.get());
8856 formatter
->close_section();
8857 formatter
->flush(cout
);
8859 } while (truncated
&& count
< max_entries
);
8861 formatter
->close_section();
8862 formatter
->close_section();
8864 if (specified_shard_id
) {
8869 formatter
->close_section();
8870 formatter
->flush(cout
);
8873 if (opt_cmd
== OPT::SYNC_ERROR_TRIM
) {
8874 if (!start_date
.empty()) {
8875 std::cerr
<< "start-date not allowed." << std::endl
;
8878 if (!end_date
.empty()) {
8879 std::cerr
<< "end-date not allowed." << std::endl
;
8882 if (!start_marker
.empty()) {
8883 std::cerr
<< "end-date not allowed." << std::endl
;
8886 if (!end_marker
.empty()) {
8887 std::cerr
<< "end-date not allowed." << std::endl
;
8895 for (; shard_id
< ERROR_LOGGER_SHARDS
; ++shard_id
) {
8896 ret
= trim_sync_error_log(shard_id
, marker
, trim_delay_ms
);
8898 cerr
<< "ERROR: sync error trim: " << cpp_strerror(-ret
) << std::endl
;
8901 if (specified_shard_id
) {
8907 if (opt_cmd
== OPT::SYNC_GROUP_CREATE
||
8908 opt_cmd
== OPT::SYNC_GROUP_MODIFY
) {
8909 CHECK_TRUE(require_opt(opt_group_id
), "ERROR: --group-id not specified", EINVAL
);
8910 CHECK_TRUE(require_opt(opt_status
), "ERROR: --status is not specified (options: forbidden, allowed, enabled)", EINVAL
);
8912 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
8913 ret
= sync_policy_ctx
.init();
8917 auto& sync_policy
= sync_policy_ctx
.get_policy();
8919 if (opt_cmd
== OPT::SYNC_GROUP_MODIFY
) {
8920 auto iter
= sync_policy
.groups
.find(*opt_group_id
);
8921 if (iter
== sync_policy
.groups
.end()) {
8922 cerr
<< "ERROR: could not find group '" << *opt_group_id
<< "'" << std::endl
;
8927 auto& group
= sync_policy
.groups
[*opt_group_id
];
8928 group
.id
= *opt_group_id
;
8931 if (!group
.set_status(*opt_status
)) {
8932 cerr
<< "ERROR: unrecognized status (options: forbidden, allowed, enabled)" << std::endl
;
8937 ret
= sync_policy_ctx
.write_policy();
8942 show_result(sync_policy
, zone_formatter
.get(), cout
);
8945 if (opt_cmd
== OPT::SYNC_GROUP_GET
) {
8946 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
8947 ret
= sync_policy_ctx
.init();
8951 auto& sync_policy
= sync_policy_ctx
.get_policy();
8953 auto& groups
= sync_policy
.groups
;
8955 if (!opt_group_id
) {
8956 show_result(groups
, zone_formatter
.get(), cout
);
8958 auto iter
= sync_policy
.groups
.find(*opt_group_id
);
8959 if (iter
== sync_policy
.groups
.end()) {
8960 cerr
<< "ERROR: could not find group '" << *opt_group_id
<< "'" << std::endl
;
8964 show_result(iter
->second
, zone_formatter
.get(), cout
);
8968 if (opt_cmd
== OPT::SYNC_GROUP_REMOVE
) {
8969 CHECK_TRUE(require_opt(opt_group_id
), "ERROR: --group-id not specified", EINVAL
);
8971 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
8972 ret
= sync_policy_ctx
.init();
8976 auto& sync_policy
= sync_policy_ctx
.get_policy();
8978 sync_policy
.groups
.erase(*opt_group_id
);
8980 ret
= sync_policy_ctx
.write_policy();
8986 Formatter::ObjectSection
os(*zone_formatter
.get(), "result");
8987 encode_json("sync_policy", sync_policy
, zone_formatter
.get());
8990 zone_formatter
->flush(cout
);
8993 if (opt_cmd
== OPT::SYNC_GROUP_FLOW_CREATE
) {
8994 CHECK_TRUE(require_opt(opt_group_id
), "ERROR: --group-id not specified", EINVAL
);
8995 CHECK_TRUE(require_opt(opt_flow_id
), "ERROR: --flow-id not specified", EINVAL
);
8996 CHECK_TRUE(require_opt(opt_flow_type
),
8997 "ERROR: --flow-type not specified (options: symmetrical, directional)", EINVAL
);
8998 CHECK_TRUE((symmetrical_flow_opt(*opt_flow_type
) ||
8999 directional_flow_opt(*opt_flow_type
)),
9000 "ERROR: --flow-type invalid (options: symmetrical, directional)", EINVAL
);
9002 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
9003 ret
= sync_policy_ctx
.init();
9007 auto& sync_policy
= sync_policy_ctx
.get_policy();
9009 auto iter
= sync_policy
.groups
.find(*opt_group_id
);
9010 if (iter
== sync_policy
.groups
.end()) {
9011 cerr
<< "ERROR: could not find group '" << *opt_group_id
<< "'" << std::endl
;
9015 auto& group
= iter
->second
;
9017 if (symmetrical_flow_opt(*opt_flow_type
)) {
9018 CHECK_TRUE(require_non_empty_opt(opt_zone_ids
), "ERROR: --zones not provided for symmetrical flow, or is empty", EINVAL
);
9020 rgw_sync_symmetric_group
*flow_group
;
9022 group
.data_flow
.find_or_create_symmetrical(*opt_flow_id
, &flow_group
);
9024 for (auto& z
: *opt_zone_ids
) {
9025 flow_group
->zones
.insert(z
);
9027 } else { /* directional */
9028 CHECK_TRUE(require_non_empty_opt(opt_source_zone_id
), "ERROR: --source-zone not provided for directional flow rule, or is empty", EINVAL
);
9029 CHECK_TRUE(require_non_empty_opt(opt_dest_zone_id
), "ERROR: --dest-zone not provided for directional flow rule, or is empty", EINVAL
);
9031 rgw_sync_directional_rule
*flow_rule
;
9033 group
.data_flow
.find_or_create_directional(*opt_source_zone_id
, *opt_dest_zone_id
, &flow_rule
);
9036 ret
= sync_policy_ctx
.write_policy();
9041 show_result(sync_policy
, zone_formatter
.get(), cout
);
9044 if (opt_cmd
== OPT::SYNC_GROUP_FLOW_REMOVE
) {
9045 CHECK_TRUE(require_opt(opt_group_id
), "ERROR: --group-id not specified", EINVAL
);
9046 CHECK_TRUE(require_opt(opt_flow_id
), "ERROR: --flow-id not specified", EINVAL
);
9047 CHECK_TRUE(require_opt(opt_flow_type
),
9048 "ERROR: --flow-type not specified (options: symmetrical, directional)", EINVAL
);
9049 CHECK_TRUE((symmetrical_flow_opt(*opt_flow_type
) ||
9050 directional_flow_opt(*opt_flow_type
)),
9051 "ERROR: --flow-type invalid (options: symmetrical, directional)", EINVAL
);
9053 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
9054 ret
= sync_policy_ctx
.init();
9058 auto& sync_policy
= sync_policy_ctx
.get_policy();
9060 auto iter
= sync_policy
.groups
.find(*opt_group_id
);
9061 if (iter
== sync_policy
.groups
.end()) {
9062 cerr
<< "ERROR: could not find group '" << *opt_group_id
<< "'" << std::endl
;
9066 auto& group
= iter
->second
;
9068 if (symmetrical_flow_opt(*opt_flow_type
)) {
9069 group
.data_flow
.remove_symmetrical(*opt_flow_id
, opt_zone_ids
);
9070 } else { /* directional */
9071 CHECK_TRUE(require_non_empty_opt(opt_source_zone_id
), "ERROR: --source-zone not provided for directional flow rule, or is empty", EINVAL
);
9072 CHECK_TRUE(require_non_empty_opt(opt_dest_zone_id
), "ERROR: --dest-zone not provided for directional flow rule, or is empty", EINVAL
);
9074 group
.data_flow
.remove_directional(*opt_source_zone_id
, *opt_dest_zone_id
);
9077 ret
= sync_policy_ctx
.write_policy();
9082 show_result(sync_policy
, zone_formatter
.get(), cout
);
9085 if (opt_cmd
== OPT::SYNC_GROUP_PIPE_CREATE
||
9086 opt_cmd
== OPT::SYNC_GROUP_PIPE_MODIFY
) {
9087 CHECK_TRUE(require_opt(opt_group_id
), "ERROR: --group-id not specified", EINVAL
);
9088 CHECK_TRUE(require_opt(opt_pipe_id
), "ERROR: --pipe-id not specified", EINVAL
);
9089 if (opt_cmd
== OPT::SYNC_GROUP_PIPE_CREATE
) {
9090 CHECK_TRUE(require_non_empty_opt(opt_source_zone_ids
), "ERROR: --source-zones not provided or is empty; should be list of zones or '*'", EINVAL
);
9091 CHECK_TRUE(require_non_empty_opt(opt_dest_zone_ids
), "ERROR: --dest-zones not provided or is empty; should be list of zones or '*'", EINVAL
);
9094 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
9095 ret
= sync_policy_ctx
.init();
9099 auto& sync_policy
= sync_policy_ctx
.get_policy();
9101 auto iter
= sync_policy
.groups
.find(*opt_group_id
);
9102 if (iter
== sync_policy
.groups
.end()) {
9103 cerr
<< "ERROR: could not find group '" << *opt_group_id
<< "'" << std::endl
;
9107 auto& group
= iter
->second
;
9109 rgw_sync_bucket_pipes
*pipe
;
9111 if (opt_cmd
== OPT::SYNC_GROUP_PIPE_CREATE
) {
9112 group
.find_pipe(*opt_pipe_id
, true, &pipe
);
9114 if (!group
.find_pipe(*opt_pipe_id
, false, &pipe
)) {
9115 cerr
<< "ERROR: could not find pipe '" << *opt_pipe_id
<< "'" << std::endl
;
9120 pipe
->source
.add_zones(*opt_source_zone_ids
);
9121 pipe
->source
.set_bucket(opt_source_tenant
,
9122 opt_source_bucket_name
,
9123 opt_source_bucket_id
);
9124 pipe
->dest
.add_zones(*opt_dest_zone_ids
);
9125 pipe
->dest
.set_bucket(opt_dest_tenant
,
9126 opt_dest_bucket_name
,
9127 opt_dest_bucket_id
);
9129 pipe
->params
.source
.filter
.set_prefix(opt_prefix
, !!opt_prefix_rm
);
9130 pipe
->params
.source
.filter
.set_tags(tags_add
, tags_rm
);
9131 if (opt_dest_owner
) {
9132 pipe
->params
.dest
.set_owner(*opt_dest_owner
);
9134 if (opt_storage_class
) {
9135 pipe
->params
.dest
.set_storage_class(*opt_storage_class
);
9138 pipe
->params
.priority
= *opt_priority
;
9141 if (*opt_mode
== "system") {
9142 pipe
->params
.mode
= rgw_sync_pipe_params::MODE_SYSTEM
;
9143 } else if (*opt_mode
== "user") {
9144 pipe
->params
.mode
= rgw_sync_pipe_params::MODE_USER
;
9146 cerr
<< "ERROR: bad mode value: should be one of the following: system, user" << std::endl
;
9151 if (!rgw::sal::User::empty(user
)) {
9152 pipe
->params
.user
= user
->get_id();
9153 } else if (pipe
->params
.user
.empty()) {
9154 auto owner
= sync_policy_ctx
.get_owner();
9156 pipe
->params
.user
= *owner
;
9160 ret
= sync_policy_ctx
.write_policy();
9165 show_result(sync_policy
, zone_formatter
.get(), cout
);
9168 if (opt_cmd
== OPT::SYNC_GROUP_PIPE_REMOVE
) {
9169 CHECK_TRUE(require_opt(opt_group_id
), "ERROR: --group-id not specified", EINVAL
);
9170 CHECK_TRUE(require_opt(opt_pipe_id
), "ERROR: --pipe-id not specified", EINVAL
);
9172 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
9173 ret
= sync_policy_ctx
.init();
9177 auto& sync_policy
= sync_policy_ctx
.get_policy();
9179 auto iter
= sync_policy
.groups
.find(*opt_group_id
);
9180 if (iter
== sync_policy
.groups
.end()) {
9181 cerr
<< "ERROR: could not find group '" << *opt_group_id
<< "'" << std::endl
;
9185 auto& group
= iter
->second
;
9187 rgw_sync_bucket_pipes
*pipe
;
9189 if (!group
.find_pipe(*opt_pipe_id
, false, &pipe
)) {
9190 cerr
<< "ERROR: could not find pipe '" << *opt_pipe_id
<< "'" << std::endl
;
9194 if (opt_source_zone_ids
) {
9195 pipe
->source
.remove_zones(*opt_source_zone_ids
);
9198 pipe
->source
.remove_bucket(opt_source_tenant
,
9199 opt_source_bucket_name
,
9200 opt_source_bucket_id
);
9201 if (opt_dest_zone_ids
) {
9202 pipe
->dest
.remove_zones(*opt_dest_zone_ids
);
9204 pipe
->dest
.remove_bucket(opt_dest_tenant
,
9205 opt_dest_bucket_name
,
9206 opt_dest_bucket_id
);
9208 if (!(opt_source_zone_ids
||
9209 opt_source_tenant
||
9210 opt_source_bucket
||
9211 opt_source_bucket_id
||
9212 opt_dest_zone_ids
||
9215 opt_dest_bucket_id
)) {
9216 group
.remove_pipe(*opt_pipe_id
);
9219 ret
= sync_policy_ctx
.write_policy();
9224 show_result(sync_policy
, zone_formatter
.get(), cout
);
9227 if (opt_cmd
== OPT::SYNC_POLICY_GET
) {
9228 SyncPolicyContext
sync_policy_ctx(zonegroup_id
, zonegroup_name
, opt_bucket
);
9229 ret
= sync_policy_ctx
.init();
9233 auto& sync_policy
= sync_policy_ctx
.get_policy();
9235 show_result(sync_policy
, zone_formatter
.get(), cout
);
9238 if (opt_cmd
== OPT::BILOG_TRIM
) {
9239 if (bucket_name
.empty()) {
9240 cerr
<< "ERROR: bucket not specified" << std::endl
;
9243 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
9245 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
9248 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->bilog_rados
->log_trim(dpp(), bucket
->get_info(), shard_id
, start_marker
, end_marker
);
9250 cerr
<< "ERROR: trim_bi_log_entries(): " << cpp_strerror(-ret
) << std::endl
;
9255 if (opt_cmd
== OPT::BILOG_STATUS
) {
9256 if (bucket_name
.empty()) {
9257 cerr
<< "ERROR: bucket not specified" << std::endl
;
9260 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
9262 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
9265 map
<int, string
> markers
;
9266 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->bilog_rados
->get_log_status(dpp(), bucket
->get_info(), shard_id
,
9267 &markers
, null_yield
);
9269 cerr
<< "ERROR: get_bi_log_status(): " << cpp_strerror(-ret
) << std::endl
;
9272 formatter
->open_object_section("entries");
9273 encode_json("markers", markers
, formatter
.get());
9274 formatter
->close_section();
9275 formatter
->flush(cout
);
9278 if (opt_cmd
== OPT::BILOG_AUTOTRIM
) {
9279 RGWCoroutinesManager
crs(store
->ctx(), store
->get_cr_registry());
9280 RGWHTTPManager
http(store
->ctx(), crs
.get_completion_mgr());
9281 int ret
= http
.start();
9283 cerr
<< "failed to initialize http client with " << cpp_strerror(ret
) << std::endl
;
9287 rgw::BucketTrimConfig config
;
9288 configure_bucket_trim(store
->ctx(), config
);
9290 rgw::BucketTrimManager
trim(static_cast<rgw::sal::RadosStore
*>(store
), config
);
9293 cerr
<< "trim manager init failed with " << cpp_strerror(ret
) << std::endl
;
9296 ret
= crs
.run(dpp(), trim
.create_admin_bucket_trim_cr(&http
));
9298 cerr
<< "automated bilog trim failed with " << cpp_strerror(ret
) << std::endl
;
9303 if (opt_cmd
== OPT::DATALOG_LIST
) {
9304 formatter
->open_array_section("entries");
9307 if (max_entries
< 0)
9309 if (!start_date
.empty()) {
9310 std::cerr
<< "start-date not allowed." << std::endl
;
9313 if (!end_date
.empty()) {
9314 std::cerr
<< "end-date not allowed." << std::endl
;
9317 if (!end_marker
.empty()) {
9318 std::cerr
<< "end-marker not allowed." << std::endl
;
9321 if (!start_marker
.empty()) {
9322 if (marker
.empty()) {
9323 marker
= start_marker
;
9325 std::cerr
<< "start-marker and marker not both allowed." << std::endl
;
9330 auto datalog_svc
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->datalog_rados
;
9331 RGWDataChangesLog::LogMarker log_marker
;
9334 std::vector
<rgw_data_change_log_entry
> entries
;
9335 if (specified_shard_id
) {
9336 ret
= datalog_svc
->list_entries(dpp(), shard_id
, max_entries
- count
,
9338 &marker
, &truncated
);
9340 ret
= datalog_svc
->list_entries(dpp(), max_entries
- count
, entries
,
9341 log_marker
, &truncated
);
9344 cerr
<< "ERROR: datalog_svc->list_entries(): " << cpp_strerror(-ret
) << std::endl
;
9348 count
+= entries
.size();
9350 for (const auto& entry
: entries
) {
9352 encode_json("entry", entry
.entry
, formatter
.get());
9354 encode_json("entry", entry
, formatter
.get());
9357 formatter
.get()->flush(cout
);
9358 } while (truncated
&& count
< max_entries
);
9360 formatter
->close_section();
9361 formatter
->flush(cout
);
9364 if (opt_cmd
== OPT::DATALOG_STATUS
) {
9365 int i
= (specified_shard_id
? shard_id
: 0);
9367 formatter
->open_array_section("entries");
9368 for (; i
< g_ceph_context
->_conf
->rgw_data_log_num_shards
; i
++) {
9369 list
<cls_log_entry
> entries
;
9371 RGWDataChangesLogInfo info
;
9372 static_cast<rgw::sal::RadosStore
*>(store
)->svc()->datalog_rados
->get_info(dpp(), i
, &info
);
9374 ::encode_json("info", info
, formatter
.get());
9376 if (specified_shard_id
)
9380 formatter
->close_section();
9381 formatter
->flush(cout
);
9384 if (opt_cmd
== OPT::DATALOG_AUTOTRIM
) {
9385 RGWCoroutinesManager
crs(store
->ctx(), store
->get_cr_registry());
9386 RGWHTTPManager
http(store
->ctx(), crs
.get_completion_mgr());
9387 int ret
= http
.start();
9389 cerr
<< "failed to initialize http client with " << cpp_strerror(ret
) << std::endl
;
9393 auto num_shards
= g_conf()->rgw_data_log_num_shards
;
9394 std::vector
<std::string
> markers(num_shards
);
9395 ret
= crs
.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore
*>(store
), &http
, num_shards
, markers
));
9397 cerr
<< "automated datalog trim failed with " << cpp_strerror(ret
) << std::endl
;
9402 if (opt_cmd
== OPT::DATALOG_TRIM
) {
9403 if (!start_date
.empty()) {
9404 std::cerr
<< "start-date not allowed." << std::endl
;
9407 if (!end_date
.empty()) {
9408 std::cerr
<< "end-date not allowed." << std::endl
;
9411 if (!start_marker
.empty()) {
9412 std::cerr
<< "start-marker not allowed." << std::endl
;
9415 if (!end_marker
.empty()) {
9416 if (marker
.empty()) {
9417 marker
= end_marker
;
9419 std::cerr
<< "end-marker and marker not both allowed." << std::endl
;
9424 if (!specified_shard_id
) {
9425 cerr
<< "ERROR: requires a --shard-id" << std::endl
;
9429 if (marker
.empty()) {
9430 cerr
<< "ERROR: requires a --marker" << std::endl
;
9434 auto datalog
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->datalog_rados
;
9435 ret
= datalog
->trim_entries(dpp(), shard_id
, marker
);
9437 if (ret
< 0 && ret
!= -ENODATA
) {
9438 cerr
<< "ERROR: trim_entries(): " << cpp_strerror(-ret
) << std::endl
;
9443 if (opt_cmd
== OPT::DATALOG_TYPE
) {
9444 if (!opt_log_type
) {
9445 std::cerr
<< "log-type not specified." << std::endl
;
9448 auto datalog
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->datalog_rados
;
9449 ret
= datalog
->change_format(dpp(), *opt_log_type
, null_yield
);
9451 cerr
<< "ERROR: change_format(): " << cpp_strerror(-ret
) << std::endl
;
9456 if (opt_cmd
== OPT::DATALOG_PRUNE
) {
9457 auto datalog
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->datalog_rados
;
9458 std::optional
<uint64_t> through
;
9459 ret
= datalog
->trim_generations(dpp(), through
);
9462 cerr
<< "ERROR: trim_generations(): " << cpp_strerror(-ret
) << std::endl
;
9467 std::cout
<< "Pruned " << *through
<< " empty generations." << std::endl
;
9469 std::cout
<< "No empty generations." << std::endl
;
9473 bool quota_op
= (opt_cmd
== OPT::QUOTA_SET
|| opt_cmd
== OPT::QUOTA_ENABLE
|| opt_cmd
== OPT::QUOTA_DISABLE
);
9476 if (bucket_name
.empty() && rgw::sal::User::empty(user
)) {
9477 cerr
<< "ERROR: bucket name or uid is required for quota operation" << std::endl
;
9481 if (!bucket_name
.empty()) {
9482 if (!quota_scope
.empty() && quota_scope
!= "bucket") {
9483 cerr
<< "ERROR: invalid quota scope specification." << std::endl
;
9486 set_bucket_quota(store
, opt_cmd
, tenant
, bucket_name
,
9487 max_size
, max_objects
, have_max_size
, have_max_objects
);
9488 } else if (!rgw::sal::User::empty(user
)) {
9489 if (quota_scope
== "bucket") {
9490 return set_user_bucket_quota(opt_cmd
, ruser
, user_op
, max_size
, max_objects
, have_max_size
, have_max_objects
);
9491 } else if (quota_scope
== "user") {
9492 return set_user_quota(opt_cmd
, ruser
, user_op
, max_size
, max_objects
, have_max_size
, have_max_objects
);
9494 cerr
<< "ERROR: invalid quota scope specification. Please specify either --quota-scope=bucket, or --quota-scope=user" << std::endl
;
9500 bool ratelimit_op_set
= (opt_cmd
== OPT::RATELIMIT_SET
|| opt_cmd
== OPT::RATELIMIT_ENABLE
|| opt_cmd
== OPT::RATELIMIT_DISABLE
);
9501 bool ratelimit_op_get
= opt_cmd
== OPT::RATELIMIT_GET
;
9502 if (ratelimit_op_set
) {
9503 if (bucket_name
.empty() && rgw::sal::User::empty(user
)) {
9504 cerr
<< "ERROR: bucket name or uid is required for ratelimit operation" << std::endl
;
9508 if (!bucket_name
.empty()) {
9509 if (!ratelimit_scope
.empty() && ratelimit_scope
!= "bucket") {
9510 cerr
<< "ERROR: invalid ratelimit scope specification. (bucket scope is not bucket but bucket has been specified)" << std::endl
;
9513 return set_bucket_ratelimit(store
, opt_cmd
, tenant
, bucket_name
,
9514 max_read_ops
, max_write_ops
,
9515 max_read_bytes
, max_write_bytes
,
9516 have_max_read_ops
, have_max_write_ops
,
9517 have_max_read_bytes
, have_max_write_bytes
);
9518 } else if (!rgw::sal::User::empty(user
)) {
9519 } if (ratelimit_scope
== "user") {
9520 return set_user_ratelimit(opt_cmd
, user
, max_read_ops
, max_write_ops
,
9521 max_read_bytes
, max_write_bytes
,
9522 have_max_read_ops
, have_max_write_ops
,
9523 have_max_read_bytes
, have_max_write_bytes
);
9525 cerr
<< "ERROR: invalid ratelimit scope specification. Please specify either --ratelimit-scope=bucket, or --ratelimit-scope=user" << std::endl
;
9530 if (ratelimit_op_get
) {
9531 if (bucket_name
.empty() && rgw::sal::User::empty(user
)) {
9532 cerr
<< "ERROR: bucket name or uid is required for ratelimit operation" << std::endl
;
9536 if (!bucket_name
.empty()) {
9537 if (!ratelimit_scope
.empty() && ratelimit_scope
!= "bucket") {
9538 cerr
<< "ERROR: invalid ratelimit scope specification. (bucket scope is not bucket but bucket has been specified)" << std::endl
;
9541 return show_bucket_ratelimit(store
, tenant
, bucket_name
, formatter
.get());
9542 } else if (!rgw::sal::User::empty(user
)) {
9543 } if (ratelimit_scope
== "user") {
9544 return show_user_ratelimit(user
, formatter
.get());
9546 cerr
<< "ERROR: invalid ratelimit scope specification. Please specify either --ratelimit-scope=bucket, or --ratelimit-scope=user" << std::endl
;
9551 if (opt_cmd
== OPT::MFA_CREATE
) {
9552 rados::cls::otp::otp_info_t config
;
9554 if (rgw::sal::User::empty(user
)) {
9555 cerr
<< "ERROR: user id was not provided (via --uid)" << std::endl
;
9559 if (totp_serial
.empty()) {
9560 cerr
<< "ERROR: TOTP device serial number was not provided (via --totp-serial)" << std::endl
;
9564 if (totp_seed
.empty()) {
9565 cerr
<< "ERROR: TOTP device seed was not provided (via --totp-seed)" << std::endl
;
9570 rados::cls::otp::SeedType seed_type
;
9571 if (totp_seed_type
== "hex") {
9572 seed_type
= rados::cls::otp::OTP_SEED_HEX
;
9573 } else if (totp_seed_type
== "base32") {
9574 seed_type
= rados::cls::otp::OTP_SEED_BASE32
;
9576 cerr
<< "ERROR: invalid seed type: " << totp_seed_type
<< std::endl
;
9580 config
.id
= totp_serial
;
9581 config
.seed
= totp_seed
;
9582 config
.seed_type
= seed_type
;
9584 if (totp_seconds
> 0) {
9585 config
.step_size
= totp_seconds
;
9588 if (totp_window
> 0) {
9589 config
.window
= totp_window
;
9592 real_time mtime
= real_clock::now();
9593 string oid
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.get_mfa_oid(user
->get_id());
9595 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user
->get_id()),
9596 mtime
, &objv_tracker
,
9600 return static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.create_mfa(dpp(), user
->get_id(), config
, &objv_tracker
, mtime
, null_yield
);
9603 cerr
<< "MFA creation failed, error: " << cpp_strerror(-ret
) << std::endl
;
9607 RGWUserInfo
& user_info
= user_op
.get_user_info();
9608 user_info
.mfa_ids
.insert(totp_serial
);
9609 user_op
.set_mfa_ids(user_info
.mfa_ids
);
9611 ret
= ruser
.modify(dpp(), user_op
, null_yield
, &err
);
9613 cerr
<< "ERROR: failed storing user info, error: " << err
<< std::endl
;
9618 if (opt_cmd
== OPT::MFA_REMOVE
) {
9619 if (rgw::sal::User::empty(user
)) {
9620 cerr
<< "ERROR: user id was not provided (via --uid)" << std::endl
;
9624 if (totp_serial
.empty()) {
9625 cerr
<< "ERROR: TOTP device serial number was not provided (via --totp-serial)" << std::endl
;
9629 real_time mtime
= real_clock::now();
9631 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user
->get_id()),
9632 mtime
, &objv_tracker
,
9636 return static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.remove_mfa(dpp(), user
->get_id(), totp_serial
, &objv_tracker
, mtime
, null_yield
);
9639 cerr
<< "MFA removal failed, error: " << cpp_strerror(-ret
) << std::endl
;
9643 RGWUserInfo
& user_info
= user_op
.get_user_info();
9644 user_info
.mfa_ids
.erase(totp_serial
);
9645 user_op
.set_mfa_ids(user_info
.mfa_ids
);
9647 ret
= ruser
.modify(dpp(), user_op
, null_yield
, &err
);
9649 cerr
<< "ERROR: failed storing user info, error: " << err
<< std::endl
;
9654 if (opt_cmd
== OPT::MFA_GET
) {
9655 if (rgw::sal::User::empty(user
)) {
9656 cerr
<< "ERROR: user id was not provided (via --uid)" << std::endl
;
9660 if (totp_serial
.empty()) {
9661 cerr
<< "ERROR: TOTP device serial number was not provided (via --totp-serial)" << std::endl
;
9665 rados::cls::otp::otp_info_t result
;
9666 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.get_mfa(dpp(), user
->get_id(), totp_serial
, &result
, null_yield
);
9668 if (ret
== -ENOENT
|| ret
== -ENODATA
) {
9669 cerr
<< "MFA serial id not found" << std::endl
;
9671 cerr
<< "MFA retrieval failed, error: " << cpp_strerror(-ret
) << std::endl
;
9675 formatter
->open_object_section("result");
9676 encode_json("entry", result
, formatter
.get());
9677 formatter
->close_section();
9678 formatter
->flush(cout
);
9681 if (opt_cmd
== OPT::MFA_LIST
) {
9682 if (rgw::sal::User::empty(user
)) {
9683 cerr
<< "ERROR: user id was not provided (via --uid)" << std::endl
;
9687 list
<rados::cls::otp::otp_info_t
> result
;
9688 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.list_mfa(dpp(), user
->get_id(), &result
, null_yield
);
9690 cerr
<< "MFA listing failed, error: " << cpp_strerror(-ret
) << std::endl
;
9693 formatter
->open_object_section("result");
9694 encode_json("entries", result
, formatter
.get());
9695 formatter
->close_section();
9696 formatter
->flush(cout
);
9699 if (opt_cmd
== OPT::MFA_CHECK
) {
9700 if (rgw::sal::User::empty(user
)) {
9701 cerr
<< "ERROR: user id was not provided (via --uid)" << std::endl
;
9705 if (totp_serial
.empty()) {
9706 cerr
<< "ERROR: TOTP device serial number was not provided (via --totp-serial)" << std::endl
;
9710 if (totp_pin
.empty()) {
9711 cerr
<< "ERROR: TOTP device serial number was not provided (via --totp-pin)" << std::endl
;
9715 list
<rados::cls::otp::otp_info_t
> result
;
9716 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.check_mfa(dpp(), user
->get_id(), totp_serial
, totp_pin
.front(), null_yield
);
9718 cerr
<< "MFA check failed, error: " << cpp_strerror(-ret
) << std::endl
;
9722 cout
<< "ok" << std::endl
;
9725 if (opt_cmd
== OPT::MFA_RESYNC
) {
9726 if (rgw::sal::User::empty(user
)) {
9727 cerr
<< "ERROR: user id was not provided (via --uid)" << std::endl
;
9731 if (totp_serial
.empty()) {
9732 cerr
<< "ERROR: TOTP device serial number was not provided (via --totp-serial)" << std::endl
;
9736 if (totp_pin
.size() != 2) {
9737 cerr
<< "ERROR: missing two --totp-pin params (--totp-pin=<first> --totp-pin=<second>)" << std::endl
;
9741 rados::cls::otp::otp_info_t config
;
9742 int ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.get_mfa(dpp(), user
->get_id(), totp_serial
, &config
, null_yield
);
9744 if (ret
== -ENOENT
|| ret
== -ENODATA
) {
9745 cerr
<< "MFA serial id not found" << std::endl
;
9747 cerr
<< "MFA retrieval failed, error: " << cpp_strerror(-ret
) << std::endl
;
9752 ceph::real_time now
;
9754 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.otp_get_current_time(dpp(), user
->get_id(), &now
, null_yield
);
9756 cerr
<< "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret
) << std::endl
;
9761 ret
= scan_totp(store
->ctx(), now
, config
, totp_pin
, &time_ofs
);
9763 if (ret
== -ENOENT
) {
9764 cerr
<< "failed to resync, TOTP values not found in range" << std::endl
;
9766 cerr
<< "ERROR: failed to scan for TOTP values: " << cpp_strerror(-ret
) << std::endl
;
9771 config
.time_ofs
= time_ofs
;
9773 /* now update the backend */
9774 real_time mtime
= real_clock::now();
9776 ret
= static_cast<rgw::sal::RadosStore
*>(store
)->ctl()->meta
.mgr
->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user
->get_id()),
9777 mtime
, &objv_tracker
,
9781 return static_cast<rgw::sal::RadosStore
*>(store
)->svc()->cls
->mfa
.create_mfa(dpp(), user
->get_id(), config
, &objv_tracker
, mtime
, null_yield
);
9784 cerr
<< "MFA update failed, error: " << cpp_strerror(-ret
) << std::endl
;
9790 if (opt_cmd
== OPT::RESHARD_STALE_INSTANCES_LIST
) {
9791 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->can_reshard() && !yes_i_really_mean_it
) {
9792 cerr
<< "Resharding disabled in a multisite env, stale instances unlikely from resharding" << std::endl
;
9793 cerr
<< "These instances may not be safe to delete." << std::endl
;
9794 cerr
<< "Use --yes-i-really-mean-it to force displaying these instances." << std::endl
;
9798 ret
= RGWBucketAdminOp::list_stale_instances(store
, bucket_op
, stream_flusher
, dpp());
9800 cerr
<< "ERROR: listing stale instances" << cpp_strerror(-ret
) << std::endl
;
9804 if (opt_cmd
== OPT::RESHARD_STALE_INSTANCES_DELETE
) {
9805 if (!static_cast<rgw::sal::RadosStore
*>(store
)->svc()->zone
->can_reshard()) {
9806 cerr
<< "Resharding disabled in a multisite env. Stale instances are not safe to be deleted." << std::endl
;
9810 ret
= RGWBucketAdminOp::clear_stale_instances(store
, bucket_op
, stream_flusher
, dpp());
9812 cerr
<< "ERROR: deleting stale instances" << cpp_strerror(-ret
) << std::endl
;
9816 if (opt_cmd
== OPT::PUBSUB_TOPICS_LIST
) {
9818 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9820 if (!bucket_name
.empty()) {
9821 rgw_pubsub_bucket_topics result
;
9822 int ret
= init_bucket(user
.get(), tenant
, bucket_name
, bucket_id
, &bucket
);
9824 cerr
<< "ERROR: could not init bucket: " << cpp_strerror(-ret
) << std::endl
;
9828 auto b
= ps
.get_bucket(bucket
->get_key());
9829 ret
= b
->get_topics(&result
);
9831 cerr
<< "ERROR: could not get topics: " << cpp_strerror(-ret
) << std::endl
;
9834 encode_json("result", result
, formatter
.get());
9836 rgw_pubsub_topics result
;
9837 int ret
= ps
.get_topics(&result
);
9839 cerr
<< "ERROR: could not get topics: " << cpp_strerror(-ret
) << std::endl
;
9842 encode_json("result", result
, formatter
.get());
9844 formatter
->flush(cout
);
9847 if (opt_cmd
== OPT::PUBSUB_TOPIC_GET
) {
9848 if (topic_name
.empty()) {
9849 cerr
<< "ERROR: topic name was not provided (via --topic)" << std::endl
;
9853 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9855 rgw_pubsub_topic_subs topic
;
9856 ret
= ps
.get_topic(topic_name
, &topic
);
9858 cerr
<< "ERROR: could not get topic: " << cpp_strerror(-ret
) << std::endl
;
9861 encode_json("topic", topic
, formatter
.get());
9862 formatter
->flush(cout
);
9865 if (opt_cmd
== OPT::PUBSUB_TOPIC_RM
) {
9866 if (topic_name
.empty()) {
9867 cerr
<< "ERROR: topic name was not provided (via --topic)" << std::endl
;
9871 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9873 ret
= ps
.remove_topic(dpp(), topic_name
, null_yield
);
9875 cerr
<< "ERROR: could not remove topic: " << cpp_strerror(-ret
) << std::endl
;
9880 if (opt_cmd
== OPT::PUBSUB_SUB_GET
) {
9881 if (get_tier_type(static_cast<rgw::sal::RadosStore
*>(store
)) != "pubsub") {
9882 cerr
<< "ERROR: only pubsub tier type supports this command" << std::endl
;
9885 if (sub_name
.empty()) {
9886 cerr
<< "ERROR: subscription name was not provided (via --subscription)" << std::endl
;
9890 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9892 rgw_pubsub_sub_config sub_conf
;
9894 auto sub
= ps
.get_sub(sub_name
);
9895 ret
= sub
->get_conf(&sub_conf
);
9897 cerr
<< "ERROR: could not get subscription info: " << cpp_strerror(-ret
) << std::endl
;
9900 encode_json("sub", sub_conf
, formatter
.get());
9901 formatter
->flush(cout
);
9904 if (opt_cmd
== OPT::PUBSUB_SUB_RM
) {
9905 if (get_tier_type(static_cast<rgw::sal::RadosStore
*>(store
)) != "pubsub") {
9906 cerr
<< "ERROR: only pubsub tier type supports this command" << std::endl
;
9909 if (sub_name
.empty()) {
9910 cerr
<< "ERROR: subscription name was not provided (via --subscription)" << std::endl
;
9914 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9916 auto sub
= ps
.get_sub(sub_name
);
9917 ret
= sub
->unsubscribe(dpp(), topic_name
, null_yield
);
9919 cerr
<< "ERROR: could not get subscription info: " << cpp_strerror(-ret
) << std::endl
;
9924 if (opt_cmd
== OPT::PUBSUB_SUB_PULL
) {
9925 if (get_tier_type(static_cast<rgw::sal::RadosStore
*>(store
)) != "pubsub") {
9926 cerr
<< "ERROR: only pubsub tier type supports this command" << std::endl
;
9929 if (sub_name
.empty()) {
9930 cerr
<< "ERROR: subscription name was not provided (via --subscription)" << std::endl
;
9934 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9936 if (!max_entries_specified
) {
9937 max_entries
= RGWPubSub::Sub::DEFAULT_MAX_EVENTS
;
9939 auto sub
= ps
.get_sub_with_events(sub_name
);
9940 ret
= sub
->list_events(dpp(), marker
, max_entries
);
9942 cerr
<< "ERROR: could not list events: " << cpp_strerror(-ret
) << std::endl
;
9945 encode_json("result", *sub
, formatter
.get());
9946 formatter
->flush(cout
);
9949 if (opt_cmd
== OPT::PUBSUB_EVENT_RM
) {
9950 if (get_tier_type(static_cast<rgw::sal::RadosStore
*>(store
)) != "pubsub") {
9951 cerr
<< "ERROR: only pubsub tier type supports this command" << std::endl
;
9954 if (sub_name
.empty()) {
9955 cerr
<< "ERROR: subscription name was not provided (via --subscription)" << std::endl
;
9958 if (event_id
.empty()) {
9959 cerr
<< "ERROR: event id was not provided (via --event-id)" << std::endl
;
9963 RGWPubSub
ps(static_cast<rgw::sal::RadosStore
*>(store
), tenant
);
9965 auto sub
= ps
.get_sub_with_events(sub_name
);
9966 ret
= sub
->remove_event(dpp(), event_id
);
9968 cerr
<< "ERROR: could not remove event: " << cpp_strerror(-ret
) << std::endl
;
9973 if (opt_cmd
== OPT::SCRIPT_PUT
) {
9974 if (!str_script_ctx
) {
9975 cerr
<< "ERROR: context was not provided (via --context)" << std::endl
;
9978 if (infile
.empty()) {
9979 cerr
<< "ERROR: infile was not provided (via --infile)" << std::endl
;
9983 auto rc
= read_input(infile
, bl
);
9985 cerr
<< "ERROR: failed to read script: '" << infile
<< "'. error: " << rc
<< std::endl
;
9988 const std::string script
= bl
.to_str();
9989 std::string err_msg
;
9990 if (!rgw::lua::verify(script
, err_msg
)) {
9991 cerr
<< "ERROR: script: '" << infile
<< "' has error: " << std::endl
<< err_msg
<< std::endl
;
9994 const rgw::lua::context script_ctx
= rgw::lua::to_context(*str_script_ctx
);
9995 if (script_ctx
== rgw::lua::context::none
) {
9996 cerr
<< "ERROR: invalid script context: " << *str_script_ctx
<< ". must be one of: preRequest, postRequest" << std::endl
;
9999 rc
= rgw::lua::write_script(dpp(), store
, tenant
, null_yield
, script_ctx
, script
);
10001 cerr
<< "ERROR: failed to put script. error: " << rc
<< std::endl
;
10006 if (opt_cmd
== OPT::SCRIPT_GET
) {
10007 if (!str_script_ctx
) {
10008 cerr
<< "ERROR: context was not provided (via --context)" << std::endl
;
10011 const rgw::lua::context script_ctx
= rgw::lua::to_context(*str_script_ctx
);
10012 if (script_ctx
== rgw::lua::context::none
) {
10013 cerr
<< "ERROR: invalid script context: " << *str_script_ctx
<< ". must be one of: preRequest, postRequest" << std::endl
;
10016 std::string script
;
10017 const auto rc
= rgw::lua::read_script(dpp(), store
, tenant
, null_yield
, script_ctx
, script
);
10018 if (rc
== -ENOENT
) {
10019 std::cout
<< "no script exists for context: " << *str_script_ctx
<<
10020 (tenant
.empty() ? "" : (" in tenant: " + tenant
)) << std::endl
;
10021 } else if (rc
< 0) {
10022 cerr
<< "ERROR: failed to read script. error: " << rc
<< std::endl
;
10025 std::cout
<< script
<< std::endl
;
10029 if (opt_cmd
== OPT::SCRIPT_RM
) {
10030 if (!str_script_ctx
) {
10031 cerr
<< "ERROR: context was not provided (via --context)" << std::endl
;
10034 const rgw::lua::context script_ctx
= rgw::lua::to_context(*str_script_ctx
);
10035 if (script_ctx
== rgw::lua::context::none
) {
10036 cerr
<< "ERROR: invalid script context: " << *str_script_ctx
<< ". must be one of: preRequest, postRequest" << std::endl
;
10039 const auto rc
= rgw::lua::delete_script(dpp(), store
, tenant
, null_yield
, script_ctx
);
10041 cerr
<< "ERROR: failed to remove script. error: " << rc
<< std::endl
;
10046 if (opt_cmd
== OPT::SCRIPT_PACKAGE_ADD
) {
10047 #ifdef WITH_RADOSGW_LUA_PACKAGES
10048 if (!script_package
) {
10049 cerr
<< "ERROR: lua package name was not provided (via --package)" << std::endl
;
10052 const auto rc
= rgw::lua::add_package(dpp(), static_cast<rgw::sal::RadosStore
*>(store
), null_yield
, *script_package
, bool(allow_compilation
));
10054 cerr
<< "ERROR: failed to add lua package: " << script_package
<< " .error: " << rc
<< std::endl
;
10058 cerr
<< "ERROR: adding lua packages is not permitted" << std::endl
;
10063 if (opt_cmd
== OPT::SCRIPT_PACKAGE_RM
) {
10064 #ifdef WITH_RADOSGW_LUA_PACKAGES
10065 if (!script_package
) {
10066 cerr
<< "ERROR: lua package name was not provided (via --package)" << std::endl
;
10069 const auto rc
= rgw::lua::remove_package(dpp(), static_cast<rgw::sal::RadosStore
*>(store
), null_yield
, *script_package
);
10070 if (rc
== -ENOENT
) {
10071 cerr
<< "WARNING: package " << script_package
<< " did not exists or already removed" << std::endl
;
10075 cerr
<< "ERROR: failed to remove lua package: " << script_package
<< " .error: " << rc
<< std::endl
;
10079 cerr
<< "ERROR: removing lua packages in not permitted" << std::endl
;
10084 if (opt_cmd
== OPT::SCRIPT_PACKAGE_LIST
) {
10085 #ifdef WITH_RADOSGW_LUA_PACKAGES
10086 rgw::lua::packages_t packages
;
10087 const auto rc
= rgw::lua::list_packages(dpp(), static_cast<rgw::sal::RadosStore
*>(store
), null_yield
, packages
);
10088 if (rc
== -ENOENT
) {
10089 std::cout
<< "no lua packages in allowlist" << std::endl
;
10090 } else if (rc
< 0) {
10091 cerr
<< "ERROR: failed to read lua packages allowlist. error: " << rc
<< std::endl
;
10094 for (const auto& package
: packages
) {
10095 std::cout
<< package
<< std::endl
;
10099 cerr
<< "ERROR: listing lua packages in not permitted" << std::endl
;