]> git.proxmox.com Git - rustc.git/blob - vendor/base64/src/encode.rs
New upstream version 1.52.1+dfsg1
[rustc.git] / vendor / base64 / src / encode.rs
1 use crate::Config;
2 #[cfg(any(feature = "alloc", feature = "std", test))]
3 use crate::{chunked_encoder, STANDARD};
4 #[cfg(any(feature = "alloc", feature = "std", test))]
5 use alloc::{string::String, vec};
6 use core::convert::TryInto;
7
8 ///Encode arbitrary octets as base64.
9 ///Returns a String.
10 ///Convenience for `encode_config(input, base64::STANDARD);`.
11 ///
12 ///# Example
13 ///
14 ///```rust
15 ///extern crate base64;
16 ///
17 ///fn main() {
18 /// let b64 = base64::encode(b"hello world");
19 /// println!("{}", b64);
20 ///}
21 ///```
22 #[cfg(any(feature = "alloc", feature = "std", test))]
23 pub fn encode<T: ?Sized + AsRef<[u8]>>(input: &T) -> String {
24 encode_config(input, STANDARD)
25 }
26
27 ///Encode arbitrary octets as base64.
28 ///Returns a String.
29 ///
30 ///# Example
31 ///
32 ///```rust
33 ///extern crate base64;
34 ///
35 ///fn main() {
36 /// let b64 = base64::encode_config(b"hello world~", base64::STANDARD);
37 /// println!("{}", b64);
38 ///
39 /// let b64_url = base64::encode_config(b"hello internet~", base64::URL_SAFE);
40 /// println!("{}", b64_url);
41 ///}
42 ///```
43 #[cfg(any(feature = "alloc", feature = "std", test))]
44 pub fn encode_config<T: ?Sized + AsRef<[u8]>>(input: &T, config: Config) -> String {
45 let mut buf = match encoded_size(input.as_ref().len(), config) {
46 Some(n) => vec![0; n],
47 None => panic!("integer overflow when calculating buffer size"),
48 };
49
50 let encoded_len = encode_config_slice(input.as_ref(), config, &mut buf[..]);
51 debug_assert_eq!(encoded_len, buf.len());
52
53 String::from_utf8(buf).expect("Invalid UTF8")
54 }
55
56 ///Encode arbitrary octets as base64.
57 ///Writes into the supplied output buffer, which will grow the buffer if needed.
58 ///
59 ///# Example
60 ///
61 ///```rust
62 ///extern crate base64;
63 ///
64 ///fn main() {
65 /// let mut buf = String::new();
66 /// base64::encode_config_buf(b"hello world~", base64::STANDARD, &mut buf);
67 /// println!("{}", buf);
68 ///
69 /// buf.clear();
70 /// base64::encode_config_buf(b"hello internet~", base64::URL_SAFE, &mut buf);
71 /// println!("{}", buf);
72 ///}
73 ///```
74 #[cfg(any(feature = "alloc", feature = "std", test))]
75 pub fn encode_config_buf<T: ?Sized + AsRef<[u8]>>(input: &T, config: Config, buf: &mut String) {
76 let input_bytes = input.as_ref();
77
78 {
79 let mut sink = chunked_encoder::StringSink::new(buf);
80 let encoder = chunked_encoder::ChunkedEncoder::new(config);
81
82 encoder
83 .encode(input_bytes, &mut sink)
84 .expect("Writing to a String shouldn't fail")
85 }
86 }
87
88 /// Encode arbitrary octets as base64.
89 /// Writes into the supplied output buffer.
90 ///
91 /// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
92 /// or statically-allocated buffer).
93 ///
94 /// # Panics
95 ///
96 /// If `output` is too small to hold the encoded version of `input`, a panic will result.
97 ///
98 /// # Example
99 ///
100 /// ```rust
101 /// extern crate base64;
102 ///
103 /// fn main() {
104 /// let s = b"hello internet!";
105 /// let mut buf = Vec::new();
106 /// // make sure we'll have a slice big enough for base64 + padding
107 /// buf.resize(s.len() * 4 / 3 + 4, 0);
108 ///
109 /// let bytes_written = base64::encode_config_slice(s,
110 /// base64::STANDARD, &mut buf);
111 ///
112 /// // shorten our vec down to just what was written
113 /// buf.resize(bytes_written, 0);
114 ///
115 /// assert_eq!(s, base64::decode(&buf).unwrap().as_slice());
116 /// }
117 /// ```
118 pub fn encode_config_slice<T: ?Sized + AsRef<[u8]>>(
119 input: &T,
120 config: Config,
121 output: &mut [u8],
122 ) -> usize {
123 let input_bytes = input.as_ref();
124
125 let encoded_size = encoded_size(input_bytes.len(), config)
126 .expect("usize overflow when calculating buffer size");
127
128 let mut b64_output = &mut output[0..encoded_size];
129
130 encode_with_padding(&input_bytes, config, encoded_size, &mut b64_output);
131
132 encoded_size
133 }
134
135 /// B64-encode and pad (if configured).
136 ///
137 /// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short
138 /// inputs.
139 ///
140 /// `encoded_size` is the encoded size calculated for `input`.
141 ///
142 /// `output` must be of size `encoded_size`.
143 ///
144 /// All bytes in `output` will be written to since it is exactly the size of the output.
145 fn encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output: &mut [u8]) {
146 debug_assert_eq!(encoded_size, output.len());
147
148 let b64_bytes_written = encode_to_slice(input, output, config.char_set.encode_table());
149
150 let padding_bytes = if config.pad {
151 add_padding(input.len(), &mut output[b64_bytes_written..])
152 } else {
153 0
154 };
155
156 let encoded_bytes = b64_bytes_written
157 .checked_add(padding_bytes)
158 .expect("usize overflow when calculating b64 length");
159
160 debug_assert_eq!(encoded_size, encoded_bytes);
161 }
162
163 #[inline]
164 fn read_u64(s: &[u8]) -> u64 {
165 u64::from_be_bytes(s[..8].try_into().unwrap())
166 }
167
168 /// Encode input bytes to utf8 base64 bytes. Does not pad.
169 /// `output` must be long enough to hold the encoded `input` without padding.
170 /// Returns the number of bytes written.
171 #[inline]
172 pub fn encode_to_slice(input: &[u8], output: &mut [u8], encode_table: &[u8; 64]) -> usize {
173 let mut input_index: usize = 0;
174
175 const BLOCKS_PER_FAST_LOOP: usize = 4;
176 const LOW_SIX_BITS: u64 = 0x3F;
177
178 // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
179 // 2 trailing bytes to be available to read..
180 let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
181 let mut output_index = 0;
182
183 if last_fast_index > 0 {
184 while input_index <= last_fast_index {
185 // Major performance wins from letting the optimizer do the bounds check once, mostly
186 // on the output side
187 let input_chunk = &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
188 let output_chunk = &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
189
190 // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
191 // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
192 // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
193 // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
194 // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
195 // Plus, single-digit percentage performance differences might well be quite different
196 // on different hardware.
197
198 let input_u64 = read_u64(&input_chunk[0..]);
199
200 output_chunk[0] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
201 output_chunk[1] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
202 output_chunk[2] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
203 output_chunk[3] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
204 output_chunk[4] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
205 output_chunk[5] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
206 output_chunk[6] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
207 output_chunk[7] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
208
209 let input_u64 = read_u64(&input_chunk[6..]);
210
211 output_chunk[8] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
212 output_chunk[9] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
213 output_chunk[10] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
214 output_chunk[11] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
215 output_chunk[12] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
216 output_chunk[13] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
217 output_chunk[14] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
218 output_chunk[15] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
219
220 let input_u64 = read_u64(&input_chunk[12..]);
221
222 output_chunk[16] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
223 output_chunk[17] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
224 output_chunk[18] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
225 output_chunk[19] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
226 output_chunk[20] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
227 output_chunk[21] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
228 output_chunk[22] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
229 output_chunk[23] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
230
231 let input_u64 = read_u64(&input_chunk[18..]);
232
233 output_chunk[24] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
234 output_chunk[25] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
235 output_chunk[26] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
236 output_chunk[27] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
237 output_chunk[28] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
238 output_chunk[29] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
239 output_chunk[30] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
240 output_chunk[31] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
241
242 output_index += BLOCKS_PER_FAST_LOOP * 8;
243 input_index += BLOCKS_PER_FAST_LOOP * 6;
244 }
245 }
246
247 // Encode what's left after the fast loop.
248
249 const LOW_SIX_BITS_U8: u8 = 0x3F;
250
251 let rem = input.len() % 3;
252 let start_of_rem = input.len() - rem;
253
254 // start at the first index not handled by fast loop, which may be 0.
255
256 while input_index < start_of_rem {
257 let input_chunk = &input[input_index..(input_index + 3)];
258 let output_chunk = &mut output[output_index..(output_index + 4)];
259
260 output_chunk[0] = encode_table[(input_chunk[0] >> 2) as usize];
261 output_chunk[1] =
262 encode_table[((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
263 output_chunk[2] =
264 encode_table[((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
265 output_chunk[3] = encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
266
267 input_index += 3;
268 output_index += 4;
269 }
270
271 if rem == 2 {
272 output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
273 output[output_index + 1] = encode_table[((input[start_of_rem] << 4
274 | input[start_of_rem + 1] >> 4)
275 & LOW_SIX_BITS_U8) as usize];
276 output[output_index + 2] =
277 encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
278 output_index += 3;
279 } else if rem == 1 {
280 output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
281 output[output_index + 1] =
282 encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
283 output_index += 2;
284 }
285
286 output_index
287 }
288
289 /// calculate the base64 encoded string size, including padding if appropriate
290 pub fn encoded_size(bytes_len: usize, config: Config) -> Option<usize> {
291 let rem = bytes_len % 3;
292
293 let complete_input_chunks = bytes_len / 3;
294 let complete_chunk_output = complete_input_chunks.checked_mul(4);
295
296 if rem > 0 {
297 if config.pad {
298 complete_chunk_output.and_then(|c| c.checked_add(4))
299 } else {
300 let encoded_rem = match rem {
301 1 => 2,
302 2 => 3,
303 _ => unreachable!("Impossible remainder"),
304 };
305 complete_chunk_output.and_then(|c| c.checked_add(encoded_rem))
306 }
307 } else {
308 complete_chunk_output
309 }
310 }
311
312 /// Write padding characters.
313 /// `output` is the slice where padding should be written, of length at least 2.
314 ///
315 /// Returns the number of padding bytes written.
316 pub fn add_padding(input_len: usize, output: &mut [u8]) -> usize {
317 let rem = input_len % 3;
318 let mut bytes_written = 0;
319 for _ in 0..((3 - rem) % 3) {
320 output[bytes_written] = b'=';
321 bytes_written += 1;
322 }
323
324 bytes_written
325 }
326
327 #[cfg(test)]
328 mod tests {
329 use super::*;
330 use crate::{
331 decode::decode_config_buf,
332 tests::{assert_encode_sanity, random_config},
333 Config, STANDARD, URL_SAFE_NO_PAD,
334 };
335
336 use rand::{
337 distributions::{Distribution, Uniform},
338 FromEntropy, Rng,
339 };
340 use std;
341 use std::str;
342
343 #[test]
344 fn encoded_size_correct_standard() {
345 assert_encoded_length(0, 0, STANDARD);
346
347 assert_encoded_length(1, 4, STANDARD);
348 assert_encoded_length(2, 4, STANDARD);
349 assert_encoded_length(3, 4, STANDARD);
350
351 assert_encoded_length(4, 8, STANDARD);
352 assert_encoded_length(5, 8, STANDARD);
353 assert_encoded_length(6, 8, STANDARD);
354
355 assert_encoded_length(7, 12, STANDARD);
356 assert_encoded_length(8, 12, STANDARD);
357 assert_encoded_length(9, 12, STANDARD);
358
359 assert_encoded_length(54, 72, STANDARD);
360
361 assert_encoded_length(55, 76, STANDARD);
362 assert_encoded_length(56, 76, STANDARD);
363 assert_encoded_length(57, 76, STANDARD);
364
365 assert_encoded_length(58, 80, STANDARD);
366 }
367
368 #[test]
369 fn encoded_size_correct_no_pad() {
370 assert_encoded_length(0, 0, URL_SAFE_NO_PAD);
371
372 assert_encoded_length(1, 2, URL_SAFE_NO_PAD);
373 assert_encoded_length(2, 3, URL_SAFE_NO_PAD);
374 assert_encoded_length(3, 4, URL_SAFE_NO_PAD);
375
376 assert_encoded_length(4, 6, URL_SAFE_NO_PAD);
377 assert_encoded_length(5, 7, URL_SAFE_NO_PAD);
378 assert_encoded_length(6, 8, URL_SAFE_NO_PAD);
379
380 assert_encoded_length(7, 10, URL_SAFE_NO_PAD);
381 assert_encoded_length(8, 11, URL_SAFE_NO_PAD);
382 assert_encoded_length(9, 12, URL_SAFE_NO_PAD);
383
384 assert_encoded_length(54, 72, URL_SAFE_NO_PAD);
385
386 assert_encoded_length(55, 74, URL_SAFE_NO_PAD);
387 assert_encoded_length(56, 75, URL_SAFE_NO_PAD);
388 assert_encoded_length(57, 76, URL_SAFE_NO_PAD);
389
390 assert_encoded_length(58, 78, URL_SAFE_NO_PAD);
391 }
392
393 #[test]
394 fn encoded_size_overflow() {
395 assert_eq!(None, encoded_size(std::usize::MAX, STANDARD));
396 }
397
398 #[test]
399 fn encode_config_buf_into_nonempty_buffer_doesnt_clobber_prefix() {
400 let mut orig_data = Vec::new();
401 let mut prefix = String::new();
402 let mut encoded_data_no_prefix = String::new();
403 let mut encoded_data_with_prefix = String::new();
404 let mut decoded = Vec::new();
405
406 let prefix_len_range = Uniform::new(0, 1000);
407 let input_len_range = Uniform::new(0, 1000);
408
409 let mut rng = rand::rngs::SmallRng::from_entropy();
410
411 for _ in 0..10_000 {
412 orig_data.clear();
413 prefix.clear();
414 encoded_data_no_prefix.clear();
415 encoded_data_with_prefix.clear();
416 decoded.clear();
417
418 let input_len = input_len_range.sample(&mut rng);
419
420 for _ in 0..input_len {
421 orig_data.push(rng.gen());
422 }
423
424 let prefix_len = prefix_len_range.sample(&mut rng);
425 for _ in 0..prefix_len {
426 // getting convenient random single-byte printable chars that aren't base64 is
427 // annoying
428 prefix.push('#');
429 }
430 encoded_data_with_prefix.push_str(&prefix);
431
432 let config = random_config(&mut rng);
433 encode_config_buf(&orig_data, config, &mut encoded_data_no_prefix);
434 encode_config_buf(&orig_data, config, &mut encoded_data_with_prefix);
435
436 assert_eq!(
437 encoded_data_no_prefix.len() + prefix_len,
438 encoded_data_with_prefix.len()
439 );
440 assert_encode_sanity(&encoded_data_no_prefix, config, input_len);
441 assert_encode_sanity(&encoded_data_with_prefix[prefix_len..], config, input_len);
442
443 // append plain encode onto prefix
444 prefix.push_str(&mut encoded_data_no_prefix);
445
446 assert_eq!(prefix, encoded_data_with_prefix);
447
448 decode_config_buf(&encoded_data_no_prefix, config, &mut decoded).unwrap();
449 assert_eq!(orig_data, decoded);
450 }
451 }
452
453 #[test]
454 fn encode_config_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
455 let mut orig_data = Vec::new();
456 let mut encoded_data = Vec::new();
457 let mut encoded_data_original_state = Vec::new();
458 let mut decoded = Vec::new();
459
460 let input_len_range = Uniform::new(0, 1000);
461
462 let mut rng = rand::rngs::SmallRng::from_entropy();
463
464 for _ in 0..10_000 {
465 orig_data.clear();
466 encoded_data.clear();
467 encoded_data_original_state.clear();
468 decoded.clear();
469
470 let input_len = input_len_range.sample(&mut rng);
471
472 for _ in 0..input_len {
473 orig_data.push(rng.gen());
474 }
475
476 // plenty of existing garbage in the encoded buffer
477 for _ in 0..10 * input_len {
478 encoded_data.push(rng.gen());
479 }
480
481 encoded_data_original_state.extend_from_slice(&encoded_data);
482
483 let config = random_config(&mut rng);
484
485 let encoded_size = encoded_size(input_len, config).unwrap();
486
487 assert_eq!(
488 encoded_size,
489 encode_config_slice(&orig_data, config, &mut encoded_data)
490 );
491
492 assert_encode_sanity(
493 std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
494 config,
495 input_len,
496 );
497
498 assert_eq!(
499 &encoded_data[encoded_size..],
500 &encoded_data_original_state[encoded_size..]
501 );
502
503 decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
504 assert_eq!(orig_data, decoded);
505 }
506 }
507
508 #[test]
509 fn encode_config_slice_fits_into_precisely_sized_slice() {
510 let mut orig_data = Vec::new();
511 let mut encoded_data = Vec::new();
512 let mut decoded = Vec::new();
513
514 let input_len_range = Uniform::new(0, 1000);
515
516 let mut rng = rand::rngs::SmallRng::from_entropy();
517
518 for _ in 0..10_000 {
519 orig_data.clear();
520 encoded_data.clear();
521 decoded.clear();
522
523 let input_len = input_len_range.sample(&mut rng);
524
525 for _ in 0..input_len {
526 orig_data.push(rng.gen());
527 }
528
529 let config = random_config(&mut rng);
530
531 let encoded_size = encoded_size(input_len, config).unwrap();
532
533 encoded_data.resize(encoded_size, 0);
534
535 assert_eq!(
536 encoded_size,
537 encode_config_slice(&orig_data, config, &mut encoded_data)
538 );
539
540 assert_encode_sanity(
541 std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
542 config,
543 input_len,
544 );
545
546 decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
547 assert_eq!(orig_data, decoded);
548 }
549 }
550
551 #[test]
552 fn encode_to_slice_random_valid_utf8() {
553 let mut input = Vec::new();
554 let mut output = Vec::new();
555
556 let input_len_range = Uniform::new(0, 1000);
557
558 let mut rng = rand::rngs::SmallRng::from_entropy();
559
560 for _ in 0..10_000 {
561 input.clear();
562 output.clear();
563
564 let input_len = input_len_range.sample(&mut rng);
565
566 for _ in 0..input_len {
567 input.push(rng.gen());
568 }
569
570 let config = random_config(&mut rng);
571
572 // fill up the output buffer with garbage
573 let encoded_size = encoded_size(input_len, config).unwrap();
574 for _ in 0..encoded_size {
575 output.push(rng.gen());
576 }
577
578 let orig_output_buf = output.to_vec();
579
580 let bytes_written =
581 encode_to_slice(&input, &mut output, config.char_set.encode_table());
582
583 // make sure the part beyond bytes_written is the same garbage it was before
584 assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
585
586 // make sure the encoded bytes are UTF-8
587 let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
588 }
589 }
590
591 #[test]
592 fn encode_with_padding_random_valid_utf8() {
593 let mut input = Vec::new();
594 let mut output = Vec::new();
595
596 let input_len_range = Uniform::new(0, 1000);
597
598 let mut rng = rand::rngs::SmallRng::from_entropy();
599
600 for _ in 0..10_000 {
601 input.clear();
602 output.clear();
603
604 let input_len = input_len_range.sample(&mut rng);
605
606 for _ in 0..input_len {
607 input.push(rng.gen());
608 }
609
610 let config = random_config(&mut rng);
611
612 // fill up the output buffer with garbage
613 let encoded_size = encoded_size(input_len, config).unwrap();
614 for _ in 0..encoded_size + 1000 {
615 output.push(rng.gen());
616 }
617
618 let orig_output_buf = output.to_vec();
619
620 encode_with_padding(&input, config, encoded_size, &mut output[0..encoded_size]);
621
622 // make sure the part beyond b64 is the same garbage it was before
623 assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]);
624
625 // make sure the encoded bytes are UTF-8
626 let _ = str::from_utf8(&output[0..encoded_size]).unwrap();
627 }
628 }
629
630 #[test]
631 fn add_padding_random_valid_utf8() {
632 let mut output = Vec::new();
633
634 let mut rng = rand::rngs::SmallRng::from_entropy();
635
636 // cover our bases for length % 3
637 for input_len in 0..10 {
638 output.clear();
639
640 // fill output with random
641 for _ in 0..10 {
642 output.push(rng.gen());
643 }
644
645 let orig_output_buf = output.to_vec();
646
647 let bytes_written = add_padding(input_len, &mut output);
648
649 // make sure the part beyond bytes_written is the same garbage it was before
650 assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
651
652 // make sure the encoded bytes are UTF-8
653 let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
654 }
655 }
656
657 fn assert_encoded_length(input_len: usize, encoded_len: usize, config: Config) {
658 assert_eq!(encoded_len, encoded_size(input_len, config).unwrap());
659
660 let mut bytes: Vec<u8> = Vec::new();
661 let mut rng = rand::rngs::SmallRng::from_entropy();
662
663 for _ in 0..input_len {
664 bytes.push(rng.gen());
665 }
666
667 let encoded = encode_config(&bytes, config);
668 assert_encode_sanity(&encoded, config, input_len);
669
670 assert_eq!(encoded_len, encoded.len());
671 }
672
673 }